diff --git a/cmd/testdata/example.js b/cmd/testdata/example.js
index f5d8fe5911e..7c6961ce4b2 100644
--- a/cmd/testdata/example.js
+++ b/cmd/testdata/example.js
@@ -138,7 +138,7 @@ export default function() {
},
"id": "${json.preview_payment_methods[0].id}",
"type": "${json.preview_payment_methods[0].type}"
- },
+ },
{
"data": {
"allow_saved_card": "${json.preview_payment_methods[1].data.allow_saved_card}",
@@ -234,7 +234,7 @@ export default function() {
},
"id": "${json.preview_payment_methods[0].id}",
"type": "${json.preview_payment_methods[0].type}"
- },
+ },
{
"data": {
"allow_saved_card": "${json.preview_payment_methods[1].data.allow_saved_card}",
diff --git a/go.mod b/go.mod
index b301a878294..693c4fd79db 100644
--- a/go.mod
+++ b/go.mod
@@ -3,15 +3,14 @@ module github.com/loadimpact/k6
go 1.14
require (
- github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4
+ github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c
github.com/DataDog/datadog-go v0.0.0-20180330214955-e67964b4021a
github.com/GeertJohan/go.rice v0.0.0-20170420135705-c02ca9a983da
- github.com/PuerkitoBio/goquery v1.3.0
+ github.com/PuerkitoBio/goquery v1.6.1
github.com/Shopify/sarama v1.16.0
github.com/Shopify/toxiproxy v2.1.4+incompatible // indirect
github.com/Soontao/goHttpDigestClient v0.0.0-20170320082612-6d28bb1415c5
- github.com/andybalholm/brotli v0.0.0-20190704151324-71eb68cc467c
- github.com/andybalholm/cascadia v1.0.0 // indirect
+ github.com/andybalholm/brotli v1.0.1
github.com/daaku/go.zipexe v0.0.0-20150329023125-a5fe2436ffcb // indirect
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 // indirect
github.com/dop251/goja v0.0.0-20210317175251-bb14c2267b76
@@ -37,12 +36,11 @@ require (
github.com/julienschmidt/httprouter v1.1.1-0.20180222160526-d18983907793
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 // indirect
github.com/kelseyhightower/envconfig v1.4.0
- github.com/klauspost/compress v1.7.2
- github.com/klauspost/cpuid v1.3.1 // indirect
+ github.com/klauspost/compress v1.11.13
github.com/kubernetes/helm v2.9.0+incompatible
github.com/labstack/echo v3.2.6+incompatible // indirect
github.com/labstack/gommon v0.2.2-0.20170925052817-57409ada9da0 // indirect
- github.com/mailru/easyjson v0.7.4-0.20200812114229-8ab5ff9cd8e4
+ github.com/mailru/easyjson v0.7.7
github.com/manyminds/api2go v0.0.0-20180125085803-95be7bd0455e
github.com/mattn/go-colorable v0.0.9
github.com/mattn/go-isatty v0.0.4
@@ -50,20 +48,20 @@ require (
github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d
github.com/onsi/ginkgo v1.14.0 // indirect
- github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2
+ github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c
github.com/pierrec/lz4 v1.0.2-0.20171218195038-2fcda4cb7018 // indirect
github.com/pierrec/xxHash v0.1.1 // indirect
github.com/pkg/errors v0.8.0
github.com/pmezard/go-difflib v1.0.0
github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165 // indirect
- github.com/serenize/snaker v0.0.0-20171204205717-a683aaf2d516
- github.com/sirupsen/logrus v1.6.0
+ github.com/serenize/snaker v0.0.0-20201027110005-a7ad2135616e
+ github.com/sirupsen/logrus v1.8.1
github.com/spf13/afero v1.1.1
github.com/spf13/cobra v0.0.4-0.20180629152535-a114f312e075
github.com/spf13/pflag v1.0.1
- github.com/stretchr/testify v1.5.1
- github.com/tidwall/gjson v1.6.1
- github.com/tidwall/pretty v1.0.2
+ github.com/stretchr/testify v1.7.0
+ github.com/tidwall/gjson v1.7.4
+ github.com/tidwall/pretty v1.1.0
github.com/ugorji/go v1.1.7 // indirect
github.com/urfave/negroni v0.3.1-0.20180130044549-22c5532ea862
github.com/valyala/bytebufferpool v1.0.0 // indirect
diff --git a/go.sum b/go.sum
index b1ba2faa144..62422df62c0 100644
--- a/go.sum
+++ b/go.sum
@@ -1,23 +1,23 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4 h1:pSm8mp0T2OH2CPmPDPtwHPr3VAQaOwVF/JbllOPP4xA=
-github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
+github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c h1:/IBSNwUN8+eKzUzbJPqhK839ygXJ82sde8x3ogr6R28=
+github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/datadog-go v0.0.0-20180330214955-e67964b4021a h1:zpQSzEApXM0qkXcpdjeJ4OpnBWhD/X8zT/iT1wYLiVU=
github.com/DataDog/datadog-go v0.0.0-20180330214955-e67964b4021a/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/GeertJohan/go.rice v0.0.0-20170420135705-c02ca9a983da h1:UVU3a9pRUyLdnBtn60WjRl0s4SEyJc2ChCY56OAR6wI=
github.com/GeertJohan/go.rice v0.0.0-20170420135705-c02ca9a983da/go.mod h1:DgrzXonpdQbfN3uYaGz1EG4Sbhyum/MMIn6Cphlh2bw=
-github.com/PuerkitoBio/goquery v1.3.0 h1:2LzdaeRwZjIMW7iKEei51jiCPB33mou4AI7QCzS4NgE=
-github.com/PuerkitoBio/goquery v1.3.0/go.mod h1:T9ezsOHcCrDCgA8aF1Cqr3sSYbO/xgdy8/R/XiIMAhA=
+github.com/PuerkitoBio/goquery v1.6.1 h1:FgjbQZKl5HTmcn4sKBgvx8vv63nhyhIpv7lJpFGCWpk=
+github.com/PuerkitoBio/goquery v1.6.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
github.com/Shopify/sarama v1.16.0 h1:9pI5+ZN06jB3bu5kHXqzzaErMC5rimcIZBQL9IOiEQ0=
github.com/Shopify/sarama v1.16.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/Soontao/goHttpDigestClient v0.0.0-20170320082612-6d28bb1415c5 h1:k+1+doEm31k0rRjCjLnGG3YRkuO9ljaEyS2ajZd6GK8=
github.com/Soontao/goHttpDigestClient v0.0.0-20170320082612-6d28bb1415c5/go.mod h1:5Q4+CyR7+Q3VMG8f78ou+QSX/BNUNUx5W48eFRat8DQ=
-github.com/andybalholm/brotli v0.0.0-20190704151324-71eb68cc467c h1:pBKtfXLqKZ+GPHGjlBheGaXK2lddydUG3XhWGrYjxOA=
-github.com/andybalholm/brotli v0.0.0-20190704151324-71eb68cc467c/go.mod h1:+lx6/Aqd1kLJ1GQfkvOnaZ1WGmLpMpbprPuIOOZX30U=
-github.com/andybalholm/cascadia v1.0.0 h1:hOCXnnZ5A+3eVDX8pvgl4kofXv2ELss0bKcqRySc45o=
-github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
+github.com/andybalholm/brotli v1.0.1 h1:KqhlKozYbRtJvsPrrEeXcO+N2l6NYT5A2QAFmSULpEc=
+github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
+github.com/andybalholm/cascadia v1.1.0 h1:BuuO6sSfQNFRu1LppgbD25Hr2vLYW25JvxHs5zzsLTo=
+github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@@ -57,8 +57,6 @@ github.com/gin-gonic/gin v1.1.5-0.20170702092826-d459835d2b07 h1:Gm6bjW5SQ/sIib9
github.com/gin-gonic/gin v1.1.5-0.20170702092826-d459835d2b07/go.mod h1:7cKuhb5qV2ggCFctp2fJQ+ErvciLZrIeoOSOm6mUr7Y=
github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU=
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
-github.com/golang/gddo v0.0.0-20190419222130-af0f2af80721 h1:KRMr9A3qfbVM7iV/WcLY/rL5LICqwMHLhwRXKu99fXw=
-github.com/golang/gddo v0.0.0-20190419222130-af0f2af80721/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -105,12 +103,8 @@ github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALr
github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=
github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.7.2 h1:liMOoeIvFpr9kEvalrZ7VVBA4wGf7zfOgwBjzz/5g2Y=
-github.com/klauspost/compress v1.7.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
-github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/klauspost/compress v1.11.13 h1:eSvu8Tmq6j2psUJqJrLcWH6K3w5Dwc+qipbaA6eVEN4=
+github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -122,8 +116,8 @@ github.com/labstack/echo v3.2.6+incompatible h1:28U/uXFFKBIP+VQIqq641LuYhMS7Br9Z
github.com/labstack/echo v3.2.6+incompatible/go.mod h1:0INS7j/VjnFxD4E2wkz67b8cVwCLbBmJyDaka6Cmk1s=
github.com/labstack/gommon v0.2.2-0.20170925052817-57409ada9da0 h1:7AIW1qc9sYYTZLamTsRKSmVvJDXkZZrIWXHDK4Gq4X0=
github.com/labstack/gommon v0.2.2-0.20170925052817-57409ada9da0/go.mod h1:/tj9csK2iPSBvn+3NLM9e52usepMtrd5ilFYA+wQNJ4=
-github.com/mailru/easyjson v0.7.4-0.20200812114229-8ab5ff9cd8e4 h1:O+6Lz+j8x3wl6w4uwwNs62EFM0MfLSkRAqI9LUNlcq0=
-github.com/mailru/easyjson v0.7.4-0.20200812114229-8ab5ff9cd8e4/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/manyminds/api2go v0.0.0-20180125085803-95be7bd0455e h1:Jg9dDZCJYqtv9GHQ34yDBFgEJLl4Hi4VEkKUyzHXCV8=
github.com/manyminds/api2go v0.0.0-20180125085803-95be7bd0455e/go.mod h1:Z60vy0EZVSu0bOugCHdcN5ZxFMKSpjRgsnh0XKPFqqk=
github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
@@ -146,8 +140,8 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso=
-github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0=
+github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c h1:rp5dCmg/yLR3mgFuSOe4oEnDDmGLROTvMragMUXpTQw=
+github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c/go.mod h1:X07ZCGwUbLaax7L0S3Tw4hpejzu63ZrrQiUe6W0hcy0=
github.com/pierrec/lz4 v1.0.2-0.20171218195038-2fcda4cb7018 h1:z+gewaCBdXQRy5LXNOOKQU0gU93ro44uK92zqkB9KCc=
github.com/pierrec/lz4 v1.0.2-0.20171218195038-2fcda4cb7018/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/xxHash v0.1.1 h1:KP4NrV9023xp3M4FkTYfcXqWigsOCImL1ANJ7sh5vg4=
@@ -160,10 +154,10 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:
github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165 h1:nkcn14uNmFEuGCb2mBZbBb24RdNRL08b/wb+xBOYpuk=
github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/serenize/snaker v0.0.0-20171204205717-a683aaf2d516 h1:ofR1ZdrNSkiWcMsRrubK9tb2/SlZVWttAfqUjJi6QYc=
-github.com/serenize/snaker v0.0.0-20171204205717-a683aaf2d516/go.mod h1:Yow6lPLSAXx2ifx470yD/nUe22Dv5vBvxK/UK9UUTVs=
-github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/serenize/snaker v0.0.0-20201027110005-a7ad2135616e h1:zWKUYT07mGmVBH+9UgnHXd/ekCK99C8EbDSAt5qsjXE=
+github.com/serenize/snaker v0.0.0-20201027110005-a7ad2135616e/go.mod h1:Yow6lPLSAXx2ifx470yD/nUe22Dv5vBvxK/UK9UUTVs=
+github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/spf13/afero v1.1.1 h1:Lt3ihYMlE+lreX1GS4Qw4ZsNpYQLxIXKBTEOXm3nt6I=
github.com/spf13/afero v1.1.1/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cobra v0.0.4-0.20180629152535-a114f312e075 h1:bfSj+hHTrZKbMJHEldrx659CxsvTHbAznKTfb42l2M4=
@@ -172,14 +166,16 @@ github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/tidwall/gjson v1.6.1 h1:LRbvNuNuvAiISWg6gxLEFuCe72UKy5hDqhxW/8183ws=
-github.com/tidwall/gjson v1.6.1/go.mod h1:BaHyNc5bjzYkPqgLq7mdVzeiRtULKULXLgZFKsxEHI0=
-github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
-github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
-github.com/tidwall/pretty v1.0.2 h1:Z7S3cePv9Jwm1KwS0513MRaoUe3S01WPbLNV40pwWZU=
-github.com/tidwall/pretty v1.0.2/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/tidwall/gjson v1.7.4 h1:19cchw8FOxkG5mdLRkGf9jqIqEyqdZhPqW60XfyFxk8=
+github.com/tidwall/gjson v1.7.4/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk=
+github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE=
+github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
+github.com/tidwall/pretty v1.1.0 h1:K3hMW5epkdAVwibsQEfR/7Zj0Qgt4DxtNumTq/VloO8=
+github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
@@ -215,6 +211,7 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
@@ -230,9 +227,9 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -305,6 +302,8 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
diff --git a/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go b/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go
index c6fbe444914..c8930680c5c 100644
--- a/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go
+++ b/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go
@@ -4,7 +4,9 @@ import (
"bytes"
"crypto/rand"
"encoding/binary"
+ "encoding/hex"
"errors"
+ "strings"
"time"
)
@@ -123,6 +125,59 @@ func ProcessChallenge(challengeMessageData []byte, user, password string) ([]byt
am.LmChallengeResponse = computeLmV2Response(ntlmV2Hash,
cm.ServerChallenge[:], clientChallenge)
}
+ return am.MarshalBinary()
+}
+
+func ProcessChallengeWithHash(challengeMessageData []byte, user, hash string) ([]byte, error) {
+ if user == "" && hash == "" {
+ return nil, errors.New("Anonymous authentication not supported")
+ }
+
+ var cm challengeMessage
+ if err := cm.UnmarshalBinary(challengeMessageData); err != nil {
+ return nil, err
+ }
+
+ if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATELMKEY) {
+ return nil, errors.New("Only NTLM v2 is supported, but server requested v1 (NTLMSSP_NEGOTIATE_LM_KEY)")
+ }
+ if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEKEYEXCH) {
+ return nil, errors.New("Key exchange requested but not supported (NTLMSSP_NEGOTIATE_KEY_EXCH)")
+ }
+
+ am := authenicateMessage{
+ UserName: user,
+ TargetName: cm.TargetName,
+ NegotiateFlags: cm.NegotiateFlags,
+ }
+ timestamp := cm.TargetInfo[avIDMsvAvTimestamp]
+ if timestamp == nil { // no time sent, take current time
+ ft := uint64(time.Now().UnixNano()) / 100
+ ft += 116444736000000000 // add time between unix & windows offset
+ timestamp = make([]byte, 8)
+ binary.LittleEndian.PutUint64(timestamp, ft)
+ }
+
+ clientChallenge := make([]byte, 8)
+ rand.Reader.Read(clientChallenge)
+
+ hashParts := strings.Split(hash, ":")
+ if len(hashParts) > 1 {
+ hash = hashParts[1]
+ }
+ hashBytes, err := hex.DecodeString(hash)
+ if err != nil {
+ return nil, err
+ }
+ ntlmV2Hash := hmacMd5(hashBytes, toUnicode(strings.ToUpper(user)+cm.TargetName))
+
+ am.NtChallengeResponse = computeNtlmV2Response(ntlmV2Hash,
+ cm.ServerChallenge[:], clientChallenge, timestamp, cm.TargetInfoRaw)
+
+ if cm.TargetInfoRaw == nil {
+ am.LmChallengeResponse = computeLmV2Response(ntlmV2Hash,
+ cm.ServerChallenge[:], clientChallenge)
+ }
return am.MarshalBinary()
}
diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiator.go b/vendor/github.com/Azure/go-ntlmssp/negotiator.go
index 6e304544bbe..7705eae4f87 100644
--- a/vendor/github.com/Azure/go-ntlmssp/negotiator.go
+++ b/vendor/github.com/Azure/go-ntlmssp/negotiator.go
@@ -137,7 +137,7 @@ func (l Negotiator) RoundTrip(req *http.Request) (res *http.Response, err error)
req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
- res, err = rt.RoundTrip(req)
+ return rt.RoundTrip(req)
}
return res, err
diff --git a/vendor/github.com/PuerkitoBio/goquery/README.md b/vendor/github.com/PuerkitoBio/goquery/README.md
index 40522062d0b..ac9b2c206ca 100644
--- a/vendor/github.com/PuerkitoBio/goquery/README.md
+++ b/vendor/github.com/PuerkitoBio/goquery/README.md
@@ -1,7 +1,6 @@
# goquery - a little like that j-thing, only in Go
[![build status](https://secure.travis-ci.org/PuerkitoBio/goquery.svg?branch=master)](http://travis-ci.org/PuerkitoBio/goquery) [![GoDoc](https://godoc.org/github.com/PuerkitoBio/goquery?status.png)](http://godoc.org/github.com/PuerkitoBio/goquery) [![Sourcegraph Badge](https://sourcegraph.com/github.com/PuerkitoBio/goquery/-/badge.svg)](https://sourcegraph.com/github.com/PuerkitoBio/goquery?badge)
-
goquery brings a syntax and a set of features similar to [jQuery][] to the [Go language][go]. It is based on Go's [net/html package][html] and the CSS Selector library [cascadia][]. Since the net/html parser returns nodes, and not a full-featured DOM tree, jQuery's stateful manipulation functions (like height(), css(), detach()) have been left off.
Also, because the net/html parser requires UTF-8 encoding, so does goquery: it is the caller's responsibility to ensure that the source document provides UTF-8 encoded HTML. See the [wiki][] for various options to do this.
@@ -15,6 +14,7 @@ Syntax-wise, it is as close as possible to jQuery, with the same function names
* [API](#api)
* [Examples](#examples)
* [Related Projects](#related-projects)
+* [Support](#support)
* [License](#license)
## Installation
@@ -37,6 +37,12 @@ Please note that because of the net/html dependency, goquery requires Go1.1+.
**Note that goquery's API is now stable, and will not break.**
+* **2021-01-11 (v1.6.1)** : Fix panic when calling `{Prepend,Append,Set}Html` on a `Selection` that contains non-Element nodes.
+* **2020-10-08 (v1.6.0)** : Parse html in context of the container node for all functions that deal with html strings (`AfterHtml`, `AppendHtml`, etc.). Thanks to [@thiemok][thiemok] and [@davidjwilkins][djw] for their work on this.
+* **2020-02-04 (v1.5.1)** : Update module dependencies.
+* **2018-11-15 (v1.5.0)** : Go module support (thanks @Zaba505).
+* **2018-06-07 (v1.4.1)** : Add `NewDocumentFromReader` examples.
+* **2018-03-24 (v1.4.0)** : Deprecate `NewDocument(url)` and `NewDocumentFromResponse(response)`.
* **2018-01-28 (v1.3.0)** : Add `ToEnd` constant to `Slice` until the end of the selection (thanks to @davidjwilkins for raising the issue).
* **2018-01-11 (v1.2.0)** : Add `AddBack*` and deprecate `AndSelf` (thanks to @davidjwilkins).
* **2017-02-12 (v1.1.0)** : Add `SetHtml` and `SetText` (thanks to @glebtv).
@@ -94,12 +100,24 @@ package main
import (
"fmt"
"log"
+ "net/http"
"github.com/PuerkitoBio/goquery"
)
func ExampleScrape() {
- doc, err := goquery.NewDocument("http://metalsucks.net")
+ // Request the HTML page.
+ res, err := http.Get("http://metalsucks.net")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer res.Body.Close()
+ if res.StatusCode != 200 {
+ log.Fatalf("status code error: %d %s", res.StatusCode, res.Status)
+ }
+
+ // Load the HTML document
+ doc, err := goquery.NewDocumentFromReader(res.Body)
if err != nil {
log.Fatal(err)
}
@@ -123,7 +141,30 @@ func main() {
- [Goq][goq], an HTML deserialization and scraping library based on goquery and struct tags.
- [andybalholm/cascadia][cascadia], the CSS selector library used by goquery.
- [suntong/cascadia][cascadiacli], a command-line interface to the cascadia CSS selector library, useful to test selectors.
-- [asciimoo/colly](https://github.com/asciimoo/colly), a lightning fast and elegant Scraping Framework
+- [gocolly/colly](https://github.com/gocolly/colly), a lightning fast and elegant Scraping Framework
+- [gnulnx/goperf](https://github.com/gnulnx/goperf), a website performance test tool that also fetches static assets.
+- [MontFerret/ferret](https://github.com/MontFerret/ferret), declarative web scraping.
+- [tacusci/berrycms](https://github.com/tacusci/berrycms), a modern simple to use CMS with easy to write plugins
+- [Dataflow kit](https://github.com/slotix/dataflowkit), Web Scraping framework for Gophers.
+- [Geziyor](https://github.com/geziyor/geziyor), a fast web crawling & scraping framework for Go. Supports JS rendering.
+- [Pagser](https://github.com/foolin/pagser), a simple, easy, extensible, configurable HTML parser to struct based on goquery and struct tags.
+- [stitcherd](https://github.com/vhodges/stitcherd), A server for doing server side includes using css selectors and DOM updates.
+
+## Support
+
+There are a number of ways you can support the project:
+
+* Use it, star it, build something with it, spread the word!
+ - If you do build something open-source or otherwise publicly-visible, let me know so I can add it to the [Related Projects](#related-projects) section!
+* Raise issues to improve the project (note: doc typos and clarifications are issues too!)
+ - Please search existing issues before opening a new one - it may have already been adressed.
+* Pull requests: please discuss new code in an issue first, unless the fix is really trivial.
+ - Make sure new code is tested.
+ - Be mindful of existing code - PRs that break existing code have a high probability of being declined, unless it fixes a serious issue.
+
+If you desperately want to send money my way, I have a BuyMeACoffee.com page:
+
+
## License
@@ -144,3 +185,5 @@ The [BSD 3-Clause license][bsd], the same as the [Go language][golic]. Cascadia'
[thatguystone]: https://github.com/thatguystone
[piotr]: https://github.com/piotrkowalczuk
[goq]: https://github.com/andrewstuart/goq
+[thiemok]: https://github.com/thiemok
+[djw]: https://github.com/davidjwilkins
diff --git a/vendor/github.com/PuerkitoBio/goquery/go.mod b/vendor/github.com/PuerkitoBio/goquery/go.mod
new file mode 100644
index 00000000000..95826ad366c
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/go.mod
@@ -0,0 +1,8 @@
+module github.com/PuerkitoBio/goquery
+
+require (
+ github.com/andybalholm/cascadia v1.1.0
+ golang.org/x/net v0.0.0-20200202094626-16171245cfb2
+)
+
+go 1.13
diff --git a/vendor/github.com/PuerkitoBio/goquery/go.sum b/vendor/github.com/PuerkitoBio/goquery/go.sum
new file mode 100644
index 00000000000..bc79107e59d
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/go.sum
@@ -0,0 +1,8 @@
+github.com/andybalholm/cascadia v1.1.0 h1:BuuO6sSfQNFRu1LppgbD25Hr2vLYW25JvxHs5zzsLTo=
+github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/PuerkitoBio/goquery/manipulation.go b/vendor/github.com/PuerkitoBio/goquery/manipulation.go
index 34eb7570fbe..35febf11895 100644
--- a/vendor/github.com/PuerkitoBio/goquery/manipulation.go
+++ b/vendor/github.com/PuerkitoBio/goquery/manipulation.go
@@ -39,8 +39,15 @@ func (s *Selection) AfterSelection(sel *Selection) *Selection {
// AfterHtml parses the html and inserts it after the set of matched elements.
//
// This follows the same rules as Selection.Append.
-func (s *Selection) AfterHtml(html string) *Selection {
- return s.AfterNodes(parseHtml(html)...)
+func (s *Selection) AfterHtml(htmlStr string) *Selection {
+ return s.eachNodeHtml(htmlStr, true, func(node *html.Node, nodes []*html.Node) {
+ nextSibling := node.NextSibling
+ for _, n := range nodes {
+ if node.Parent != nil {
+ node.Parent.InsertBefore(n, nextSibling)
+ }
+ }
+ })
}
// AfterNodes inserts the nodes after each element in the set of matched elements.
@@ -85,8 +92,12 @@ func (s *Selection) AppendSelection(sel *Selection) *Selection {
}
// AppendHtml parses the html and appends it to the set of matched elements.
-func (s *Selection) AppendHtml(html string) *Selection {
- return s.AppendNodes(parseHtml(html)...)
+func (s *Selection) AppendHtml(htmlStr string) *Selection {
+ return s.eachNodeHtml(htmlStr, false, func(node *html.Node, nodes []*html.Node) {
+ for _, n := range nodes {
+ node.AppendChild(n)
+ }
+ })
}
// AppendNodes appends the specified nodes to each node in the set of matched elements.
@@ -123,8 +134,14 @@ func (s *Selection) BeforeSelection(sel *Selection) *Selection {
// BeforeHtml parses the html and inserts it before the set of matched elements.
//
// This follows the same rules as Selection.Append.
-func (s *Selection) BeforeHtml(html string) *Selection {
- return s.BeforeNodes(parseHtml(html)...)
+func (s *Selection) BeforeHtml(htmlStr string) *Selection {
+ return s.eachNodeHtml(htmlStr, true, func(node *html.Node, nodes []*html.Node) {
+ for _, n := range nodes {
+ if node.Parent != nil {
+ node.Parent.InsertBefore(n, node)
+ }
+ }
+ })
}
// BeforeNodes inserts the nodes before each element in the set of matched elements.
@@ -184,8 +201,13 @@ func (s *Selection) PrependSelection(sel *Selection) *Selection {
}
// PrependHtml parses the html and prepends it to the set of matched elements.
-func (s *Selection) PrependHtml(html string) *Selection {
- return s.PrependNodes(parseHtml(html)...)
+func (s *Selection) PrependHtml(htmlStr string) *Selection {
+ return s.eachNodeHtml(htmlStr, false, func(node *html.Node, nodes []*html.Node) {
+ firstChild := node.FirstChild
+ for _, n := range nodes {
+ node.InsertBefore(n, firstChild)
+ }
+ })
}
// PrependNodes prepends the specified nodes to each node in the set of
@@ -212,14 +234,19 @@ func (s *Selection) Remove() *Selection {
return s
}
-// RemoveFiltered removes the set of matched elements by selector.
-// It returns the Selection of removed nodes.
+// RemoveFiltered removes from the current set of matched elements those that
+// match the selector filter. It returns the Selection of removed nodes.
+//
+// For example if the selection s contains "
", "" and ""
+// and s.RemoveFiltered("h2") is called, only the "" node is removed
+// (and returned), while "" and "" are kept in the document.
func (s *Selection) RemoveFiltered(selector string) *Selection {
return s.RemoveMatcher(compileMatcher(selector))
}
-// RemoveMatcher removes the set of matched elements.
-// It returns the Selection of removed nodes.
+// RemoveMatcher removes from the current set of matched elements those that
+// match the Matcher filter. It returns the Selection of removed nodes.
+// See RemoveFiltered for additional information.
func (s *Selection) RemoveMatcher(m Matcher) *Selection {
return s.FilterMatcher(m).Remove()
}
@@ -256,8 +283,16 @@ func (s *Selection) ReplaceWithSelection(sel *Selection) *Selection {
// It returns the removed elements.
//
// This follows the same rules as Selection.Append.
-func (s *Selection) ReplaceWithHtml(html string) *Selection {
- return s.ReplaceWithNodes(parseHtml(html)...)
+func (s *Selection) ReplaceWithHtml(htmlStr string) *Selection {
+ s.eachNodeHtml(htmlStr, true, func(node *html.Node, nodes []*html.Node) {
+ nextSibling := node.NextSibling
+ for _, n := range nodes {
+ if node.Parent != nil {
+ node.Parent.InsertBefore(n, nextSibling)
+ }
+ }
+ })
+ return s.Remove()
}
// ReplaceWithNodes replaces each element in the set of matched elements with
@@ -272,8 +307,17 @@ func (s *Selection) ReplaceWithNodes(ns ...*html.Node) *Selection {
// SetHtml sets the html content of each element in the selection to
// specified html string.
-func (s *Selection) SetHtml(html string) *Selection {
- return setHtmlNodes(s, parseHtml(html)...)
+func (s *Selection) SetHtml(htmlStr string) *Selection {
+ for _, context := range s.Nodes {
+ for c := context.FirstChild; c != nil; c = context.FirstChild {
+ context.RemoveChild(c)
+ }
+ }
+ return s.eachNodeHtml(htmlStr, false, func(node *html.Node, nodes []*html.Node) {
+ for _, n := range nodes {
+ node.AppendChild(n)
+ }
+ })
}
// SetText sets the content of each element in the selection to specified content.
@@ -329,8 +373,23 @@ func (s *Selection) WrapSelection(sel *Selection) *Selection {
// most child of the given HTML.
//
// It returns the original set of elements.
-func (s *Selection) WrapHtml(html string) *Selection {
- return s.wrapNodes(parseHtml(html)...)
+func (s *Selection) WrapHtml(htmlStr string) *Selection {
+ nodesMap := make(map[string][]*html.Node)
+ for _, context := range s.Nodes {
+ var parent *html.Node
+ if context.Parent != nil {
+ parent = context.Parent
+ } else {
+ parent = &html.Node{Type: html.ElementNode}
+ }
+ nodes, found := nodesMap[nodeName(parent)]
+ if !found {
+ nodes = parseHtmlWithContext(htmlStr, parent)
+ nodesMap[nodeName(parent)] = nodes
+ }
+ newSingleSelection(context, s.document).wrapAllNodes(cloneNodes(nodes)...)
+ }
+ return s
}
// WrapNode wraps each element in the set of matched elements inside the inner-
@@ -382,8 +441,18 @@ func (s *Selection) WrapAllSelection(sel *Selection) *Selection {
// document.
//
// It returns the original set of elements.
-func (s *Selection) WrapAllHtml(html string) *Selection {
- return s.wrapAllNodes(parseHtml(html)...)
+func (s *Selection) WrapAllHtml(htmlStr string) *Selection {
+ var context *html.Node
+ var nodes []*html.Node
+ if len(s.Nodes) > 0 {
+ context = s.Nodes[0]
+ if context.Parent != nil {
+ nodes = parseHtmlWithContext(htmlStr, context)
+ } else {
+ nodes = parseHtml(htmlStr)
+ }
+ }
+ return s.wrapAllNodes(nodes...)
}
func (s *Selection) wrapAllNodes(ns ...*html.Node) *Selection {
@@ -452,8 +521,17 @@ func (s *Selection) WrapInnerSelection(sel *Selection) *Selection {
// cloned before being inserted into the document.
//
// It returns the original set of elements.
-func (s *Selection) WrapInnerHtml(html string) *Selection {
- return s.wrapInnerNodes(parseHtml(html)...)
+func (s *Selection) WrapInnerHtml(htmlStr string) *Selection {
+ nodesMap := make(map[string][]*html.Node)
+ for _, context := range s.Nodes {
+ nodes, found := nodesMap[nodeName(context)]
+ if !found {
+ nodes = parseHtmlWithContext(htmlStr, context)
+ nodesMap[nodeName(context)] = nodes
+ }
+ newSingleSelection(context, s.document).wrapInnerNodes(cloneNodes(nodes)...)
+ }
+ return s
}
// WrapInnerNode wraps an HTML structure, matched by the given selector, around
@@ -493,16 +571,14 @@ func parseHtml(h string) []*html.Node {
return nodes
}
-func setHtmlNodes(s *Selection, ns ...*html.Node) *Selection {
- for _, n := range s.Nodes {
- for c := n.FirstChild; c != nil; c = n.FirstChild {
- n.RemoveChild(c)
- }
- for _, c := range ns {
- n.AppendChild(cloneNode(c))
- }
+func parseHtmlWithContext(h string, context *html.Node) []*html.Node {
+ // Errors are only returned when the io.Reader returns any error besides
+ // EOF, but strings.Reader never will
+ nodes, err := html.ParseFragment(strings.NewReader(h), context)
+ if err != nil {
+ panic("goquery: failed to parse HTML: " + err.Error())
}
- return s
+ return nodes
}
// Get the first child that is an ElementNode
@@ -572,3 +648,32 @@ func (s *Selection) manipulateNodes(ns []*html.Node, reverse bool,
return s
}
+
+// eachNodeHtml parses the given html string and inserts the resulting nodes in the dom with the mergeFn.
+// The parsed nodes are inserted for each element of the selection.
+// isParent can be used to indicate that the elements of the selection should be treated as the parent for the parsed html.
+// A cache is used to avoid parsing the html multiple times should the elements of the selection result in the same context.
+func (s *Selection) eachNodeHtml(htmlStr string, isParent bool, mergeFn func(n *html.Node, nodes []*html.Node)) *Selection {
+ // cache to avoid parsing the html for the same context multiple times
+ nodeCache := make(map[string][]*html.Node)
+ var context *html.Node
+ for _, n := range s.Nodes {
+ if isParent {
+ context = n.Parent
+ } else {
+ if n.Type != html.ElementNode {
+ continue
+ }
+ context = n
+ }
+ if context != nil {
+ nodes, found := nodeCache[nodeName(context)]
+ if !found {
+ nodes = parseHtmlWithContext(htmlStr, context)
+ nodeCache[nodeName(context)] = nodes
+ }
+ mergeFn(n, cloneNodes(nodes))
+ }
+ }
+ return s
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/type.go b/vendor/github.com/PuerkitoBio/goquery/type.go
index e2169fa30d9..6ad51dbc538 100644
--- a/vendor/github.com/PuerkitoBio/goquery/type.go
+++ b/vendor/github.com/PuerkitoBio/goquery/type.go
@@ -31,6 +31,10 @@ func NewDocumentFromNode(root *html.Node) *Document {
// NewDocument is a Document constructor that takes a string URL as argument.
// It loads the specified document, parses it, and stores the root Document
// node, ready to be manipulated.
+//
+// Deprecated: Use the net/http standard library package to make the request
+// and validate the response before calling goquery.NewDocumentFromReader
+// with the response's body.
func NewDocument(url string) (*Document, error) {
// Load the URL
res, e := http.Get(url)
@@ -40,10 +44,10 @@ func NewDocument(url string) (*Document, error) {
return NewDocumentFromResponse(res)
}
-// NewDocumentFromReader returns a Document from a generic reader.
+// NewDocumentFromReader returns a Document from an io.Reader.
// It returns an error as second value if the reader's data cannot be parsed
-// as html. It does *not* check if the reader is also an io.Closer, so the
-// provided reader is never closed by this call, it is the responsibility
+// as html. It does not check if the reader is also an io.Closer, the
+// provided reader is never closed by this call. It is the responsibility
// of the caller to close it if required.
func NewDocumentFromReader(r io.Reader) (*Document, error) {
root, e := html.Parse(r)
@@ -56,6 +60,8 @@ func NewDocumentFromReader(r io.Reader) (*Document, error) {
// NewDocumentFromResponse is another Document constructor that takes an http response as argument.
// It loads the specified response's document, parses it, and stores the root Document
// node, ready to be manipulated. The response's body is closed on return.
+//
+// Deprecated: Use goquery.NewDocumentFromReader with the response's body.
func NewDocumentFromResponse(res *http.Response) (*Document, error) {
if res == nil {
return nil, errors.New("Response is nil")
diff --git a/vendor/github.com/PuerkitoBio/goquery/utilities.go b/vendor/github.com/PuerkitoBio/goquery/utilities.go
index b4c061a4d43..3e11b1db1b4 100644
--- a/vendor/github.com/PuerkitoBio/goquery/utilities.go
+++ b/vendor/github.com/PuerkitoBio/goquery/utilities.go
@@ -36,12 +36,22 @@ func NodeName(s *Selection) string {
if s.Length() == 0 {
return ""
}
- switch n := s.Get(0); n.Type {
+ return nodeName(s.Get(0))
+}
+
+// nodeName returns the node name of the given html node.
+// See NodeName for additional details on behaviour.
+func nodeName(node *html.Node) string {
+ if node == nil {
+ return ""
+ }
+
+ switch node.Type {
case html.ElementNode, html.DoctypeNode:
- return n.Data
+ return node.Data
default:
- if n.Type >= 0 && int(n.Type) < len(nodeNames) {
- return nodeNames[n.Type]
+ if node.Type >= 0 && int(node.Type) < len(nodeNames) {
+ return nodeNames[node.Type]
}
return ""
}
diff --git a/vendor/github.com/andybalholm/brotli/README.md b/vendor/github.com/andybalholm/brotli/README.md
index 02d115e55a3..1ea7fdb759d 100644
--- a/vendor/github.com/andybalholm/brotli/README.md
+++ b/vendor/github.com/andybalholm/brotli/README.md
@@ -3,3 +3,5 @@ It was translated from the reference implementation (https://github.com/google/b
with the `c2go` tool at https://github.com/andybalholm/c2go.
I am using it in production with https://github.com/andybalholm/redwood.
+
+API documentation is found at https://pkg.go.dev/github.com/andybalholm/brotli?tab=doc.
diff --git a/vendor/github.com/andybalholm/brotli/backward_references.go b/vendor/github.com/andybalholm/brotli/backward_references.go
index 1642e471ad0..008c054d1c0 100644
--- a/vendor/github.com/andybalholm/brotli/backward_references.go
+++ b/vendor/github.com/andybalholm/brotli/backward_references.go
@@ -1,5 +1,9 @@
package brotli
+import (
+ "sync"
+)
+
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
@@ -31,13 +35,10 @@ func computeDistanceCode(distance uint, max_distance uint, dist_cache []int) uin
return distance + numDistanceShortCodes - 1
}
-/* "commands" points to the next output command to write to, "*num_commands" is
- initially the total amount of commands output by previous
- CreateBackwardReferences calls, and must be incremented by the amount written
- by this call. */
-func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands []command, num_commands *uint, num_literals *uint) {
+var hasherSearchResultPool sync.Pool
+
+func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) {
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
- var orig_commands []command = commands
var insert_length uint = *last_insert_len
var pos_end uint = position + num_bytes
var store_end uint
@@ -57,8 +58,14 @@ func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte,
/* Minimum score to accept a backward reference. */
hasher.PrepareDistanceCache(dist_cache)
- var sr2 hasherSearchResult
- var sr hasherSearchResult
+ sr2, _ := hasherSearchResultPool.Get().(*hasherSearchResult)
+ if sr2 == nil {
+ sr2 = &hasherSearchResult{}
+ }
+ sr, _ := hasherSearchResultPool.Get().(*hasherSearchResult)
+ if sr == nil {
+ sr = &hasherSearchResult{}
+ }
for position+hasher.HashTypeLength() < pos_end {
var max_length uint = pos_end - position
@@ -67,7 +74,7 @@ func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte,
sr.len_code_delta = 0
sr.distance = 0
sr.score = kMinScore
- hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position, max_length, max_distance, gap, params.dist.max_distance, &sr)
+ hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position, max_length, max_distance, gap, params.dist.max_distance, sr)
if sr.score > kMinScore {
/* Found a match. Let's look for something even better ahead. */
var delayed_backward_references_in_row int = 0
@@ -83,14 +90,14 @@ func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte,
sr2.distance = 0
sr2.score = kMinScore
max_distance = brotli_min_size_t(position+1, max_backward_limit)
- hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position+1, max_length, max_distance, gap, params.dist.max_distance, &sr2)
+ hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position+1, max_length, max_distance, gap, params.dist.max_distance, sr2)
if sr2.score >= sr.score+cost_diff_lazy {
/* Ok, let's just write one byte for now and start a match from the
next byte. */
position++
insert_length++
- sr = sr2
+ *sr = *sr2
delayed_backward_references_in_row++
if delayed_backward_references_in_row < 4 && position+hasher.HashTypeLength() < pos_end {
continue
@@ -114,8 +121,7 @@ func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte,
hasher.PrepareDistanceCache(dist_cache)
}
- initCommand(&commands[0], ¶ms.dist, insert_length, sr.len, sr.len_code_delta, distance_code)
- commands = commands[1:]
+ *commands = append(*commands, makeCommand(¶ms.dist, insert_length, sr.len, sr.len_code_delta, distance_code))
}
*num_literals += insert_length
@@ -173,5 +179,7 @@ func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte,
insert_length += pos_end - position
*last_insert_len = insert_length
- *num_commands += uint(-cap(commands) + cap(orig_commands))
+
+ hasherSearchResultPool.Put(sr)
+ hasherSearchResultPool.Put(sr2)
}
diff --git a/vendor/github.com/andybalholm/brotli/backward_references_hq.go b/vendor/github.com/andybalholm/brotli/backward_references_hq.go
index 5eac736133f..21629c1cdb7 100644
--- a/vendor/github.com/andybalholm/brotli/backward_references_hq.go
+++ b/vendor/github.com/andybalholm/brotli/backward_references_hq.go
@@ -123,14 +123,13 @@ func setCost(histogram []uint32, histogram_size uint, literal_histogram bool, co
}
}
-func zopfliCostModelSetFromCommands(self *zopfliCostModel, position uint, ringbuffer []byte, ringbuffer_mask uint, commands []command, num_commands uint, last_insert_len uint) {
+func zopfliCostModelSetFromCommands(self *zopfliCostModel, position uint, ringbuffer []byte, ringbuffer_mask uint, commands []command, last_insert_len uint) {
var histogram_literal [numLiteralSymbols]uint32
var histogram_cmd [numCommandSymbols]uint32
var histogram_dist [maxEffectiveDistanceAlphabetSize]uint32
var cost_literal [numLiteralSymbols]float32
var pos uint = position - last_insert_len
var min_cost_cmd float32 = kInfinity
- var i uint
var cost_cmd []float32 = self.cost_cmd_[:]
var literal_costs []float32
@@ -138,7 +137,7 @@ func zopfliCostModelSetFromCommands(self *zopfliCostModel, position uint, ringbu
histogram_cmd = [numCommandSymbols]uint32{}
histogram_dist = [maxEffectiveDistanceAlphabetSize]uint32{}
- for i = 0; i < num_commands; i++ {
+ for i := range commands {
var inslength uint = uint(commands[i].insert_len_)
var copylength uint = uint(commandCopyLen(&commands[i]))
var distcode uint = uint(commands[i].dist_prefix_) & 0x3FF
@@ -161,7 +160,7 @@ func zopfliCostModelSetFromCommands(self *zopfliCostModel, position uint, ringbu
setCost(histogram_cmd[:], numCommandSymbols, false, cost_cmd)
setCost(histogram_dist[:], uint(self.distance_histogram_size), false, self.cost_dist_)
- for i = 0; i < numCommandSymbols; i++ {
+ for i := 0; i < numCommandSymbols; i++ {
min_cost_cmd = brotli_min_float(min_cost_cmd, cost_cmd[i])
}
@@ -169,10 +168,10 @@ func zopfliCostModelSetFromCommands(self *zopfliCostModel, position uint, ringbu
{
literal_costs = self.literal_costs_
var literal_carry float32 = 0.0
- var num_bytes uint = self.num_bytes_
+ num_bytes := int(self.num_bytes_)
literal_costs[0] = 0.0
- for i = 0; i < num_bytes; i++ {
- literal_carry += cost_literal[ringbuffer[(position+i)&ringbuffer_mask]]
+ for i := 0; i < num_bytes; i++ {
+ literal_carry += cost_literal[ringbuffer[(position+uint(i))&ringbuffer_mask]]
literal_costs[i+1] = literal_costs[i] + literal_carry
literal_carry -= literal_costs[i+1] - literal_costs[i]
}
@@ -502,7 +501,9 @@ func updateNodes(num_bytes uint, block_start uint, pos uint, ringbuffer []byte,
var cost float32 = dist_cost + float32(getCopyExtra(copycode)) + zopfliCostModelGetCommandCost(model, cmdcode)
if cost < nodes[pos+len].u.cost {
updateZopfliNode(nodes, pos, start, uint(len), len_code, dist, 0, cost)
- result = brotli_max_size_t(result, uint(len))
+ if len > result {
+ result = len
+ }
}
}
}
@@ -530,7 +531,7 @@ func computeShortestPathFromNodes(num_bytes uint, nodes []zopfliNode) uint {
}
/* REQUIRES: nodes != NULL and len(nodes) >= num_bytes + 1 */
-func zopfliCreateCommands(num_bytes uint, block_start uint, nodes []zopfliNode, dist_cache []int, last_insert_len *uint, params *encoderParams, commands []command, num_literals *uint) {
+func zopfliCreateCommands(num_bytes uint, block_start uint, nodes []zopfliNode, dist_cache []int, last_insert_len *uint, params *encoderParams, commands *[]command, num_literals *uint) {
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
var pos uint = 0
var offset uint32 = nodes[0].u.next
@@ -552,7 +553,7 @@ func zopfliCreateCommands(num_bytes uint, block_start uint, nodes []zopfliNode,
var max_distance uint = brotli_min_size_t(block_start+pos, max_backward_limit)
var is_dictionary bool = (distance > max_distance+gap)
var dist_code uint = uint(zopfliNodeDistanceCode(next))
- initCommand(&commands[i], ¶ms.dist, insert_length, copy_length, int(len_code)-int(copy_length), dist_code)
+ *commands = append(*commands, makeCommand(¶ms.dist, insert_length, copy_length, int(len_code)-int(copy_length), dist_code))
if !is_dictionary && dist_code > 0 {
dist_cache[3] = dist_cache[2]
@@ -679,16 +680,16 @@ func zopfliComputeShortestPath(num_bytes uint, position uint, ringbuffer []byte,
return computeShortestPathFromNodes(num_bytes, nodes)
}
-func createZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher *h10, dist_cache []int, last_insert_len *uint, commands []command, num_commands *uint, num_literals *uint) {
+func createZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher *h10, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) {
var nodes []zopfliNode
nodes = make([]zopfliNode, (num_bytes + 1))
initZopfliNodes(nodes, num_bytes+1)
- *num_commands += zopfliComputeShortestPath(num_bytes, position, ringbuffer, ringbuffer_mask, params, dist_cache, hasher, nodes)
+ zopfliComputeShortestPath(num_bytes, position, ringbuffer, ringbuffer_mask, params, dist_cache, hasher, nodes)
zopfliCreateCommands(num_bytes, position, nodes, dist_cache, last_insert_len, params, commands, num_literals)
nodes = nil
}
-func createHqZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands []command, num_commands *uint, num_literals *uint) {
+func createHqZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) {
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
var num_matches []uint32 = make([]uint32, num_bytes)
var matches_size uint = 4 * num_bytes
@@ -703,7 +704,7 @@ func createHqZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer
var orig_num_literals uint
var orig_last_insert_len uint
var orig_dist_cache [4]int
- var orig_num_commands uint
+ var orig_num_commands int
var model zopfliCostModel
var nodes []zopfliNode
var matches []backwardMatch = make([]backwardMatch, matches_size)
@@ -769,7 +770,7 @@ func createHqZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer
orig_num_literals = *num_literals
orig_last_insert_len = *last_insert_len
copy(orig_dist_cache[:], dist_cache[:4])
- orig_num_commands = *num_commands
+ orig_num_commands = len(*commands)
nodes = make([]zopfliNode, (num_bytes + 1))
initZopfliCostModel(&model, ¶ms.dist, num_bytes)
for i = 0; i < 2; i++ {
@@ -777,14 +778,14 @@ func createHqZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer
if i == 0 {
zopfliCostModelSetFromLiteralCosts(&model, position, ringbuffer, ringbuffer_mask)
} else {
- zopfliCostModelSetFromCommands(&model, position, ringbuffer, ringbuffer_mask, commands, *num_commands-orig_num_commands, orig_last_insert_len)
+ zopfliCostModelSetFromCommands(&model, position, ringbuffer, ringbuffer_mask, (*commands)[orig_num_commands:], orig_last_insert_len)
}
- *num_commands = orig_num_commands
+ *commands = (*commands)[:orig_num_commands]
*num_literals = orig_num_literals
*last_insert_len = orig_last_insert_len
copy(dist_cache, orig_dist_cache[:4])
- *num_commands += zopfliIterate(num_bytes, position, ringbuffer, ringbuffer_mask, params, gap, dist_cache, &model, num_matches, matches, nodes)
+ zopfliIterate(num_bytes, position, ringbuffer, ringbuffer_mask, params, gap, dist_cache, &model, num_matches, matches, nodes)
zopfliCreateCommands(num_bytes, position, nodes, dist_cache, last_insert_len, params, commands, num_literals)
}
diff --git a/vendor/github.com/andybalholm/brotli/block_splitter.go b/vendor/github.com/andybalholm/brotli/block_splitter.go
index 2ccff45a3ea..978a1314748 100644
--- a/vendor/github.com/andybalholm/brotli/block_splitter.go
+++ b/vendor/github.com/andybalholm/brotli/block_splitter.go
@@ -33,23 +33,21 @@ const (
kMinItersForRefining uint = 100
)
-func countLiterals(cmds []command, num_commands uint) uint {
+func countLiterals(cmds []command) uint {
var total_length uint = 0
/* Count how many we have. */
- var i uint
- for i = 0; i < num_commands; i++ {
+ for i := range cmds {
total_length += uint(cmds[i].insert_len_)
}
return total_length
}
-func copyLiteralsToByteArray(cmds []command, num_commands uint, data []byte, offset uint, mask uint, literals []byte) {
+func copyLiteralsToByteArray(cmds []command, data []byte, offset uint, mask uint, literals []byte) {
var pos uint = 0
var from_pos uint = offset & mask
- var i uint
- for i = 0; i < num_commands; i++ {
+ for i := range cmds {
var insert_len uint = uint(cmds[i].insert_len_)
if from_pos+insert_len > mask {
var head_size uint = mask + 1 - from_pos
@@ -90,24 +88,19 @@ const clustersPerBatch = 16
func initBlockSplit(self *blockSplit) {
self.num_types = 0
self.num_blocks = 0
- self.types = nil
- self.lengths = nil
+ self.types = self.types[:0]
+ self.lengths = self.lengths[:0]
self.types_alloc_size = 0
self.lengths_alloc_size = 0
}
-func destroyBlockSplit(self *blockSplit) {
- self.types = nil
- self.lengths = nil
-}
-
-func splitBlock(cmds []command, num_commands uint, data []byte, pos uint, mask uint, params *encoderParams, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit) {
+func splitBlock(cmds []command, data []byte, pos uint, mask uint, params *encoderParams, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit) {
{
- var literals_count uint = countLiterals(cmds, num_commands)
+ var literals_count uint = countLiterals(cmds)
var literals []byte = make([]byte, literals_count)
/* Create a continuous array of literals. */
- copyLiteralsToByteArray(cmds, num_commands, data, pos, mask, literals)
+ copyLiteralsToByteArray(cmds, data, pos, mask, literals)
/* Create the block split on the array of literals.
Literal histograms have alphabet size 256. */
@@ -116,28 +109,26 @@ func splitBlock(cmds []command, num_commands uint, data []byte, pos uint, mask u
literals = nil
}
{
- var insert_and_copy_codes []uint16 = make([]uint16, num_commands)
+ var insert_and_copy_codes []uint16 = make([]uint16, len(cmds))
/* Compute prefix codes for commands. */
- var i uint
- for i = 0; i < num_commands; i++ {
+ for i := range cmds {
insert_and_copy_codes[i] = cmds[i].cmd_prefix_
}
/* Create the block split on the array of command prefixes. */
- splitByteVectorCommand(insert_and_copy_codes, num_commands, kSymbolsPerCommandHistogram, kMaxCommandHistograms, kCommandStrideLength, kCommandBlockSwitchCost, params, insert_and_copy_split)
+ splitByteVectorCommand(insert_and_copy_codes, kSymbolsPerCommandHistogram, kMaxCommandHistograms, kCommandStrideLength, kCommandBlockSwitchCost, params, insert_and_copy_split)
/* TODO: reuse for distances? */
insert_and_copy_codes = nil
}
{
- var distance_prefixes []uint16 = make([]uint16, num_commands)
+ var distance_prefixes []uint16 = make([]uint16, len(cmds))
var j uint = 0
/* Create a continuous array of distance prefixes. */
- var i uint
- for i = 0; i < num_commands; i++ {
+ for i := range cmds {
var cmd *command = &cmds[i]
if commandCopyLen(cmd) != 0 && cmd.cmd_prefix_ >= 128 {
distance_prefixes[j] = cmd.dist_prefix_ & 0x3FF
diff --git a/vendor/github.com/andybalholm/brotli/block_splitter_command.go b/vendor/github.com/andybalholm/brotli/block_splitter_command.go
index e505fe13e73..9dec13e4d90 100644
--- a/vendor/github.com/andybalholm/brotli/block_splitter_command.go
+++ b/vendor/github.com/andybalholm/brotli/block_splitter_command.go
@@ -372,7 +372,8 @@ func clusterBlocksCommand(data []uint16, length uint, num_blocks uint, block_ids
histogram_symbols = nil
}
-func splitByteVectorCommand(data []uint16, length uint, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) {
+func splitByteVectorCommand(data []uint16, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) {
+ length := uint(len(data))
var data_size uint = histogramDataSizeCommand()
var num_histograms uint = length/literals_per_histogram + 1
var histograms []histogramCommand
diff --git a/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go b/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go
index 395f6049043..2470f84e4b0 100644
--- a/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go
+++ b/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go
@@ -1,6 +1,9 @@
package brotli
-import "math"
+import (
+ "math"
+ "sync"
+)
const maxHuffmanTreeSize = (2*numCommandSymbols + 1)
@@ -118,7 +121,7 @@ func encodeMlen(length uint, bits *uint64, numbits *uint, nibblesbits *uint64) {
*bits = uint64(length) - 1
}
-func storeCommandExtra(cmd *command, storage_ix *uint, storage []byte) {
+func storeCommandExtra(cmd *command, bw *bitWriter) {
var copylen_code uint32 = commandCopyLenCode(cmd)
var inscode uint16 = getInsertLengthCode(uint(cmd.insert_len_))
var copycode uint16 = getCopyLengthCode(uint(copylen_code))
@@ -126,7 +129,7 @@ func storeCommandExtra(cmd *command, storage_ix *uint, storage []byte) {
var insextraval uint64 = uint64(cmd.insert_len_) - uint64(getInsertBase(inscode))
var copyextraval uint64 = uint64(copylen_code) - uint64(getCopyBase(copycode))
var bits uint64 = copyextraval< 0
REQUIRES: length <= (1 << 24) */
-func storeCompressedMetaBlockHeader(is_final_block bool, length uint, storage_ix *uint, storage []byte) {
+func storeCompressedMetaBlockHeader(is_final_block bool, length uint, bw *bitWriter) {
var lenbits uint64
var nlenbits uint
var nibblesbits uint64
@@ -166,41 +169,41 @@ func storeCompressedMetaBlockHeader(is_final_block bool, length uint, storage_ix
}
/* Write ISLAST bit. */
- writeBits(1, is_final, storage_ix, storage)
+ bw.writeBits(1, is_final)
/* Write ISEMPTY bit. */
if is_final_block {
- writeBits(1, 0, storage_ix, storage)
+ bw.writeBits(1, 0)
}
encodeMlen(length, &lenbits, &nlenbits, &nibblesbits)
- writeBits(2, nibblesbits, storage_ix, storage)
- writeBits(nlenbits, lenbits, storage_ix, storage)
+ bw.writeBits(2, nibblesbits)
+ bw.writeBits(nlenbits, lenbits)
if !is_final_block {
/* Write ISUNCOMPRESSED bit. */
- writeBits(1, 0, storage_ix, storage)
+ bw.writeBits(1, 0)
}
}
/* Stores the uncompressed meta-block header.
REQUIRES: length > 0
REQUIRES: length <= (1 << 24) */
-func storeUncompressedMetaBlockHeader(length uint, storage_ix *uint, storage []byte) {
+func storeUncompressedMetaBlockHeader(length uint, bw *bitWriter) {
var lenbits uint64
var nlenbits uint
var nibblesbits uint64
/* Write ISLAST bit.
Uncompressed block cannot be the last one, so set to 0. */
- writeBits(1, 0, storage_ix, storage)
+ bw.writeBits(1, 0)
encodeMlen(length, &lenbits, &nlenbits, &nibblesbits)
- writeBits(2, nibblesbits, storage_ix, storage)
- writeBits(nlenbits, lenbits, storage_ix, storage)
+ bw.writeBits(2, nibblesbits)
+ bw.writeBits(nlenbits, lenbits)
/* Write ISUNCOMPRESSED bit. */
- writeBits(1, 1, storage_ix, storage)
+ bw.writeBits(1, 1)
}
var storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder = [codeLengthCodes]byte{1, 2, 3, 4, 0, 5, 17, 6, 16, 7, 8, 9, 10, 11, 12, 13, 14, 15}
@@ -208,7 +211,7 @@ var storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder = [codeLengthCodes]byte
var storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeSymbols = [6]byte{0, 7, 3, 2, 1, 15}
var storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeBitLengths = [6]byte{2, 4, 3, 2, 2, 4}
-func storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes int, code_length_bitdepth []byte, storage_ix *uint, storage []byte) {
+func storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes int, code_length_bitdepth []byte, bw *bitWriter) {
var skip_some uint = 0
var codes_to_store uint = codeLengthCodes
/* The bit lengths of the Huffman code over the code length alphabet
@@ -238,38 +241,38 @@ func storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes int, code_length_bitdepth
}
}
- writeBits(2, uint64(skip_some), storage_ix, storage)
+ bw.writeBits(2, uint64(skip_some))
{
var i uint
for i = skip_some; i < codes_to_store; i++ {
var l uint = uint(code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[i]])
- writeBits(uint(storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeBitLengths[l]), uint64(storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeSymbols[l]), storage_ix, storage)
+ bw.writeBits(uint(storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeBitLengths[l]), uint64(storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeSymbols[l]))
}
}
}
-func storeHuffmanTreeToBitMask(huffman_tree_size uint, huffman_tree []byte, huffman_tree_extra_bits []byte, code_length_bitdepth []byte, code_length_bitdepth_symbols []uint16, storage_ix *uint, storage []byte) {
+func storeHuffmanTreeToBitMask(huffman_tree_size uint, huffman_tree []byte, huffman_tree_extra_bits []byte, code_length_bitdepth []byte, code_length_bitdepth_symbols []uint16, bw *bitWriter) {
var i uint
for i = 0; i < huffman_tree_size; i++ {
var ix uint = uint(huffman_tree[i])
- writeBits(uint(code_length_bitdepth[ix]), uint64(code_length_bitdepth_symbols[ix]), storage_ix, storage)
+ bw.writeBits(uint(code_length_bitdepth[ix]), uint64(code_length_bitdepth_symbols[ix]))
/* Extra bits */
switch ix {
case repeatPreviousCodeLength:
- writeBits(2, uint64(huffman_tree_extra_bits[i]), storage_ix, storage)
+ bw.writeBits(2, uint64(huffman_tree_extra_bits[i]))
case repeatZeroCodeLength:
- writeBits(3, uint64(huffman_tree_extra_bits[i]), storage_ix, storage)
+ bw.writeBits(3, uint64(huffman_tree_extra_bits[i]))
}
}
}
-func storeSimpleHuffmanTree(depths []byte, symbols []uint, num_symbols uint, max_bits uint, storage_ix *uint, storage []byte) {
+func storeSimpleHuffmanTree(depths []byte, symbols []uint, num_symbols uint, max_bits uint, bw *bitWriter) {
/* value of 1 indicates a simple Huffman code */
- writeBits(2, 1, storage_ix, storage)
+ bw.writeBits(2, 1)
- writeBits(2, uint64(num_symbols)-1, storage_ix, storage) /* NSYM - 1 */
+ bw.writeBits(2, uint64(num_symbols)-1) /* NSYM - 1 */
{
/* Sort */
var i uint
@@ -286,17 +289,17 @@ func storeSimpleHuffmanTree(depths []byte, symbols []uint, num_symbols uint, max
}
if num_symbols == 2 {
- writeBits(max_bits, uint64(symbols[0]), storage_ix, storage)
- writeBits(max_bits, uint64(symbols[1]), storage_ix, storage)
+ bw.writeBits(max_bits, uint64(symbols[0]))
+ bw.writeBits(max_bits, uint64(symbols[1]))
} else if num_symbols == 3 {
- writeBits(max_bits, uint64(symbols[0]), storage_ix, storage)
- writeBits(max_bits, uint64(symbols[1]), storage_ix, storage)
- writeBits(max_bits, uint64(symbols[2]), storage_ix, storage)
+ bw.writeBits(max_bits, uint64(symbols[0]))
+ bw.writeBits(max_bits, uint64(symbols[1]))
+ bw.writeBits(max_bits, uint64(symbols[2]))
} else {
- writeBits(max_bits, uint64(symbols[0]), storage_ix, storage)
- writeBits(max_bits, uint64(symbols[1]), storage_ix, storage)
- writeBits(max_bits, uint64(symbols[2]), storage_ix, storage)
- writeBits(max_bits, uint64(symbols[3]), storage_ix, storage)
+ bw.writeBits(max_bits, uint64(symbols[0]))
+ bw.writeBits(max_bits, uint64(symbols[1]))
+ bw.writeBits(max_bits, uint64(symbols[2]))
+ bw.writeBits(max_bits, uint64(symbols[3]))
/* tree-select */
var tmp int
@@ -305,13 +308,13 @@ func storeSimpleHuffmanTree(depths []byte, symbols []uint, num_symbols uint, max
} else {
tmp = 0
}
- writeBits(1, uint64(tmp), storage_ix, storage)
+ bw.writeBits(1, uint64(tmp))
}
}
/* num = alphabet size
depths = symbol depths */
-func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
+func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, bw *bitWriter) {
var huffman_tree [numCommandSymbols]byte
var huffman_tree_extra_bits [numCommandSymbols]byte
var huffman_tree_size uint = 0
@@ -354,19 +357,19 @@ func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, storage_ix *u
convertBitDepthsToSymbols(code_length_bitdepth[:], codeLengthCodes, code_length_bitdepth_symbols[:])
/* Now, we have all the data, let's start storing it */
- storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes, code_length_bitdepth[:], storage_ix, storage)
+ storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes, code_length_bitdepth[:], bw)
if num_codes == 1 {
code_length_bitdepth[code] = 0
}
/* Store the real Huffman tree now. */
- storeHuffmanTreeToBitMask(huffman_tree_size, huffman_tree[:], huffman_tree_extra_bits[:], code_length_bitdepth[:], code_length_bitdepth_symbols[:], storage_ix, storage)
+ storeHuffmanTreeToBitMask(huffman_tree_size, huffman_tree[:], huffman_tree_extra_bits[:], code_length_bitdepth[:], code_length_bitdepth_symbols[:], bw)
}
/* Builds a Huffman tree from histogram[0:length] into depth[0:length] and
bits[0:length] and stores the encoded tree to the bit stream. */
-func buildAndStoreHuffmanTree(histogram []uint32, histogram_length uint, alphabet_size uint, tree []huffmanTree, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
+func buildAndStoreHuffmanTree(histogram []uint32, histogram_length uint, alphabet_size uint, tree []huffmanTree, depth []byte, bits []uint16, bw *bitWriter) {
var count uint = 0
var s4 = [4]uint{0}
var i uint
@@ -391,8 +394,8 @@ func buildAndStoreHuffmanTree(histogram []uint32, histogram_length uint, alphabe
}
if count <= 1 {
- writeBits(4, 1, storage_ix, storage)
- writeBits(max_bits, uint64(s4[0]), storage_ix, storage)
+ bw.writeBits(4, 1)
+ bw.writeBits(max_bits, uint64(s4[0]))
depth[s4[0]] = 0
bits[s4[0]] = 0
return
@@ -405,17 +408,19 @@ func buildAndStoreHuffmanTree(histogram []uint32, histogram_length uint, alphabe
convertBitDepthsToSymbols(depth, histogram_length, bits)
if count <= 4 {
- storeSimpleHuffmanTree(depth, s4[:], count, max_bits, storage_ix, storage)
+ storeSimpleHuffmanTree(depth, s4[:], count, max_bits, bw)
} else {
- storeHuffmanTree(depth, histogram_length, tree, storage_ix, storage)
+ storeHuffmanTree(depth, histogram_length, tree, bw)
}
}
-func sortHuffmanTree1(v0 *huffmanTree, v1 *huffmanTree) bool {
+func sortHuffmanTree1(v0 huffmanTree, v1 huffmanTree) bool {
return v0.total_count_ < v1.total_count_
}
-func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_bits uint, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
+var huffmanTreePool sync.Pool
+
+func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_bits uint, depth []byte, bits []uint16, bw *bitWriter) {
var count uint = 0
var symbols = [4]uint{0}
var length uint = 0
@@ -434,8 +439,8 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
}
if count <= 1 {
- writeBits(4, 1, storage_ix, storage)
- writeBits(max_bits, uint64(symbols[0]), storage_ix, storage)
+ bw.writeBits(4, 1)
+ bw.writeBits(max_bits, uint64(symbols[0]))
depth[symbols[0]] = 0
bits[symbols[0]] = 0
return
@@ -446,7 +451,13 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
}
{
var max_tree_size uint = 2*length + 1
- var tree []huffmanTree = make([]huffmanTree, max_tree_size)
+ tree, _ := huffmanTreePool.Get().(*[]huffmanTree)
+ if tree == nil || cap(*tree) < int(max_tree_size) {
+ tmp := make([]huffmanTree, max_tree_size)
+ tree = &tmp
+ } else {
+ *tree = (*tree)[:max_tree_size]
+ }
var count_limit uint32
for count_limit = 1; ; count_limit *= 2 {
var node int = 0
@@ -455,9 +466,9 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
l--
if histogram[l] != 0 {
if histogram[l] >= count_limit {
- initHuffmanTree(&tree[node:][0], histogram[l], -1, int16(l))
+ initHuffmanTree(&(*tree)[node:][0], histogram[l], -1, int16(l))
} else {
- initHuffmanTree(&tree[node:][0], count_limit, -1, int16(l))
+ initHuffmanTree(&(*tree)[node:][0], count_limit, -1, int16(l))
}
node++
@@ -471,7 +482,7 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
var j int = n + 1
var k int
- sortHuffmanTreeItems(tree, uint(n), huffmanTreeComparator(sortHuffmanTree1))
+ sortHuffmanTreeItems(*tree, uint(n), huffmanTreeComparator(sortHuffmanTree1))
/* The nodes are:
[0, n): the sorted leaf nodes that we start with.
@@ -482,15 +493,15 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
There will be (2n+1) elements at the end. */
initHuffmanTree(&sentinel, math.MaxUint32, -1, -1)
- tree[node] = sentinel
+ (*tree)[node] = sentinel
node++
- tree[node] = sentinel
+ (*tree)[node] = sentinel
node++
for k = n - 1; k > 0; k-- {
var left int
var right int
- if tree[i].total_count_ <= tree[j].total_count_ {
+ if (*tree)[i].total_count_ <= (*tree)[j].total_count_ {
left = i
i++
} else {
@@ -498,7 +509,7 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
j++
}
- if tree[i].total_count_ <= tree[j].total_count_ {
+ if (*tree)[i].total_count_ <= (*tree)[j].total_count_ {
right = i
i++
} else {
@@ -507,17 +518,17 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
}
/* The sentinel node becomes the parent node. */
- tree[node-1].total_count_ = tree[left].total_count_ + tree[right].total_count_
+ (*tree)[node-1].total_count_ = (*tree)[left].total_count_ + (*tree)[right].total_count_
- tree[node-1].index_left_ = int16(left)
- tree[node-1].index_right_or_value_ = int16(right)
+ (*tree)[node-1].index_left_ = int16(left)
+ (*tree)[node-1].index_right_or_value_ = int16(right)
/* Add back the last sentinel node. */
- tree[node] = sentinel
+ (*tree)[node] = sentinel
node++
}
- if setDepth(2*n-1, tree, depth, 14) {
+ if setDepth(2*n-1, *tree, depth, 14) {
/* We need to pack the Huffman tree in 14 bits. If this was not
successful, add fake entities to the lowest values and retry. */
break
@@ -525,7 +536,7 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
}
}
- tree = nil
+ huffmanTreePool.Put(tree)
}
convertBitDepthsToSymbols(depth, length, bits)
@@ -533,9 +544,9 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
var i uint
/* value of 1 indicates a simple Huffman code */
- writeBits(2, 1, storage_ix, storage)
+ bw.writeBits(2, 1)
- writeBits(2, uint64(count)-1, storage_ix, storage) /* NSYM - 1 */
+ bw.writeBits(2, uint64(count)-1) /* NSYM - 1 */
/* Sort */
for i = 0; i < count; i++ {
@@ -550,33 +561,27 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
}
if count == 2 {
- writeBits(max_bits, uint64(symbols[0]), storage_ix, storage)
- writeBits(max_bits, uint64(symbols[1]), storage_ix, storage)
+ bw.writeBits(max_bits, uint64(symbols[0]))
+ bw.writeBits(max_bits, uint64(symbols[1]))
} else if count == 3 {
- writeBits(max_bits, uint64(symbols[0]), storage_ix, storage)
- writeBits(max_bits, uint64(symbols[1]), storage_ix, storage)
- writeBits(max_bits, uint64(symbols[2]), storage_ix, storage)
+ bw.writeBits(max_bits, uint64(symbols[0]))
+ bw.writeBits(max_bits, uint64(symbols[1]))
+ bw.writeBits(max_bits, uint64(symbols[2]))
} else {
- writeBits(max_bits, uint64(symbols[0]), storage_ix, storage)
- writeBits(max_bits, uint64(symbols[1]), storage_ix, storage)
- writeBits(max_bits, uint64(symbols[2]), storage_ix, storage)
- writeBits(max_bits, uint64(symbols[3]), storage_ix, storage)
+ bw.writeBits(max_bits, uint64(symbols[0]))
+ bw.writeBits(max_bits, uint64(symbols[1]))
+ bw.writeBits(max_bits, uint64(symbols[2]))
+ bw.writeBits(max_bits, uint64(symbols[3]))
/* tree-select */
- var tmp int
- if depth[symbols[0]] == 1 {
- tmp = 1
- } else {
- tmp = 0
- }
- writeBits(1, uint64(tmp), storage_ix, storage)
+ bw.writeSingleBit(depth[symbols[0]] == 1)
}
} else {
var previous_value byte = 8
var i uint
/* Complex Huffman Tree */
- storeStaticCodeLengthCode(storage_ix, storage)
+ storeStaticCodeLengthCode(bw)
/* Actual RLE coding. */
for i = 0; i < length; {
@@ -589,21 +594,21 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
i += reps
if value == 0 {
- writeBits(uint(kZeroRepsDepth[reps]), kZeroRepsBits[reps], storage_ix, storage)
+ bw.writeBits(uint(kZeroRepsDepth[reps]), kZeroRepsBits[reps])
} else {
if previous_value != value {
- writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value]), storage_ix, storage)
+ bw.writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value]))
reps--
}
if reps < 3 {
for reps != 0 {
reps--
- writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value]), storage_ix, storage)
+ bw.writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value]))
}
} else {
reps -= 3
- writeBits(uint(kNonZeroRepsDepth[reps]), kNonZeroRepsBits[reps], storage_ix, storage)
+ bw.writeBits(uint(kNonZeroRepsDepth[reps]), kNonZeroRepsBits[reps])
}
previous_value = value
@@ -728,7 +733,7 @@ const symbolBits = 9
var encodeContextMap_kSymbolMask uint32 = (1 << symbolBits) - 1
-func encodeContextMap(context_map []uint32, context_map_size uint, num_clusters uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
+func encodeContextMap(context_map []uint32, context_map_size uint, num_clusters uint, tree []huffmanTree, bw *bitWriter) {
var i uint
var rle_symbols []uint32
var max_run_length_prefix uint32 = 6
@@ -737,7 +742,7 @@ func encodeContextMap(context_map []uint32, context_map_size uint, num_clusters
var depths [maxContextMapSymbols]byte
var bits [maxContextMapSymbols]uint16
- storeVarLenUint8(num_clusters-1, storage_ix, storage)
+ storeVarLenUint8(num_clusters-1, bw)
if num_clusters == 1 {
return
@@ -752,45 +757,45 @@ func encodeContextMap(context_map []uint32, context_map_size uint, num_clusters
}
{
var use_rle bool = (max_run_length_prefix > 0)
- writeSingleBit(use_rle, storage_ix, storage)
+ bw.writeSingleBit(use_rle)
if use_rle {
- writeBits(4, uint64(max_run_length_prefix)-1, storage_ix, storage)
+ bw.writeBits(4, uint64(max_run_length_prefix)-1)
}
}
- buildAndStoreHuffmanTree(histogram[:], uint(uint32(num_clusters)+max_run_length_prefix), uint(uint32(num_clusters)+max_run_length_prefix), tree, depths[:], bits[:], storage_ix, storage)
+ buildAndStoreHuffmanTree(histogram[:], uint(uint32(num_clusters)+max_run_length_prefix), uint(uint32(num_clusters)+max_run_length_prefix), tree, depths[:], bits[:], bw)
for i = 0; i < num_rle_symbols; i++ {
var rle_symbol uint32 = rle_symbols[i] & encodeContextMap_kSymbolMask
var extra_bits_val uint32 = rle_symbols[i] >> symbolBits
- writeBits(uint(depths[rle_symbol]), uint64(bits[rle_symbol]), storage_ix, storage)
+ bw.writeBits(uint(depths[rle_symbol]), uint64(bits[rle_symbol]))
if rle_symbol > 0 && rle_symbol <= max_run_length_prefix {
- writeBits(uint(rle_symbol), uint64(extra_bits_val), storage_ix, storage)
+ bw.writeBits(uint(rle_symbol), uint64(extra_bits_val))
}
}
- writeBits(1, 1, storage_ix, storage) /* use move-to-front */
+ bw.writeBits(1, 1) /* use move-to-front */
rle_symbols = nil
}
/* Stores the block switch command with index block_ix to the bit stream. */
-func storeBlockSwitch(code *blockSplitCode, block_len uint32, block_type byte, is_first_block bool, storage_ix *uint, storage []byte) {
+func storeBlockSwitch(code *blockSplitCode, block_len uint32, block_type byte, is_first_block bool, bw *bitWriter) {
var typecode uint = nextBlockTypeCode(&code.type_code_calculator, block_type)
var lencode uint
var len_nextra uint32
var len_extra uint32
if !is_first_block {
- writeBits(uint(code.type_depths[typecode]), uint64(code.type_bits[typecode]), storage_ix, storage)
+ bw.writeBits(uint(code.type_depths[typecode]), uint64(code.type_bits[typecode]))
}
getBlockLengthPrefixCode(block_len, &lencode, &len_nextra, &len_extra)
- writeBits(uint(code.length_depths[lencode]), uint64(code.length_bits[lencode]), storage_ix, storage)
- writeBits(uint(len_nextra), uint64(len_extra), storage_ix, storage)
+ bw.writeBits(uint(code.length_depths[lencode]), uint64(code.length_bits[lencode]))
+ bw.writeBits(uint(len_nextra), uint64(len_extra))
}
/* Builds a BlockSplitCode data structure from the block split given by the
vector of block types and block lengths and stores it to the bit stream. */
-func buildAndStoreBlockSplitCode(types []byte, lengths []uint32, num_blocks uint, num_types uint, tree []huffmanTree, code *blockSplitCode, storage_ix *uint, storage []byte) {
+func buildAndStoreBlockSplitCode(types []byte, lengths []uint32, num_blocks uint, num_types uint, tree []huffmanTree, code *blockSplitCode, bw *bitWriter) {
var type_histo [maxBlockTypeSymbols]uint32
var length_histo [numBlockLenSymbols]uint32
var i uint
@@ -808,17 +813,17 @@ func buildAndStoreBlockSplitCode(types []byte, lengths []uint32, num_blocks uint
length_histo[blockLengthPrefixCode(lengths[i])]++
}
- storeVarLenUint8(num_types-1, storage_ix, storage)
+ storeVarLenUint8(num_types-1, bw)
if num_types > 1 { /* TODO: else? could StoreBlockSwitch occur? */
- buildAndStoreHuffmanTree(type_histo[0:], num_types+2, num_types+2, tree, code.type_depths[0:], code.type_bits[0:], storage_ix, storage)
- buildAndStoreHuffmanTree(length_histo[0:], numBlockLenSymbols, numBlockLenSymbols, tree, code.length_depths[0:], code.length_bits[0:], storage_ix, storage)
- storeBlockSwitch(code, lengths[0], types[0], true, storage_ix, storage)
+ buildAndStoreHuffmanTree(type_histo[0:], num_types+2, num_types+2, tree, code.type_depths[0:], code.type_bits[0:], bw)
+ buildAndStoreHuffmanTree(length_histo[0:], numBlockLenSymbols, numBlockLenSymbols, tree, code.length_depths[0:], code.length_bits[0:], bw)
+ storeBlockSwitch(code, lengths[0], types[0], true, bw)
}
}
/* Stores a context map where the histogram type is always the block type. */
-func storeTrivialContextMap(num_types uint, context_bits uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
- storeVarLenUint8(num_types-1, storage_ix, storage)
+func storeTrivialContextMap(num_types uint, context_bits uint, tree []huffmanTree, bw *bitWriter) {
+ storeVarLenUint8(num_types-1, bw)
if num_types > 1 {
var repeat_code uint = context_bits - 1
var repeat_bits uint = (1 << repeat_code) - 1
@@ -832,16 +837,16 @@ func storeTrivialContextMap(num_types uint, context_bits uint, tree []huffmanTre
}
/* Write RLEMAX. */
- writeBits(1, 1, storage_ix, storage)
+ bw.writeBits(1, 1)
- writeBits(4, uint64(repeat_code)-1, storage_ix, storage)
+ bw.writeBits(4, uint64(repeat_code)-1)
histogram[repeat_code] = uint32(num_types)
histogram[0] = 1
for i = context_bits; i < alphabet_size; i++ {
histogram[i] = 1
}
- buildAndStoreHuffmanTree(histogram[:], alphabet_size, alphabet_size, tree, depths[:], bits[:], storage_ix, storage)
+ buildAndStoreHuffmanTree(histogram[:], alphabet_size, alphabet_size, tree, depths[:], bits[:], bw)
for i = 0; i < num_types; i++ {
var tmp uint
if i == 0 {
@@ -850,13 +855,13 @@ func storeTrivialContextMap(num_types uint, context_bits uint, tree []huffmanTre
tmp = i + context_bits - 1
}
var code uint = tmp
- writeBits(uint(depths[code]), uint64(bits[code]), storage_ix, storage)
- writeBits(uint(depths[repeat_code]), uint64(bits[repeat_code]), storage_ix, storage)
- writeBits(repeat_code, uint64(repeat_bits), storage_ix, storage)
+ bw.writeBits(uint(depths[code]), uint64(bits[code]))
+ bw.writeBits(uint(depths[repeat_code]), uint64(bits[repeat_code]))
+ bw.writeBits(repeat_code, uint64(repeat_bits))
}
/* Write IMTF (inverse-move-to-front) bit. */
- writeBits(1, 1, storage_ix, storage)
+ bw.writeBits(1, 1)
}
}
@@ -875,38 +880,48 @@ type blockEncoder struct {
bits_ []uint16
}
-func initBlockEncoder(self *blockEncoder, histogram_length uint, num_block_types uint, block_types []byte, block_lengths []uint32, num_blocks uint) {
+var blockEncoderPool sync.Pool
+
+func getBlockEncoder(histogram_length uint, num_block_types uint, block_types []byte, block_lengths []uint32, num_blocks uint) *blockEncoder {
+ self, _ := blockEncoderPool.Get().(*blockEncoder)
+
+ if self != nil {
+ self.block_ix_ = 0
+ self.entropy_ix_ = 0
+ self.depths_ = self.depths_[:0]
+ self.bits_ = self.bits_[:0]
+ } else {
+ self = &blockEncoder{}
+ }
+
self.histogram_length_ = histogram_length
self.num_block_types_ = num_block_types
self.block_types_ = block_types
self.block_lengths_ = block_lengths
self.num_blocks_ = num_blocks
initBlockTypeCodeCalculator(&self.block_split_code_.type_code_calculator)
- self.block_ix_ = 0
if num_blocks == 0 {
self.block_len_ = 0
} else {
self.block_len_ = uint(block_lengths[0])
}
- self.entropy_ix_ = 0
- self.depths_ = nil
- self.bits_ = nil
+
+ return self
}
func cleanupBlockEncoder(self *blockEncoder) {
- self.depths_ = nil
- self.bits_ = nil
+ blockEncoderPool.Put(self)
}
/* Creates entropy codes of block lengths and block types and stores them
to the bit stream. */
-func buildAndStoreBlockSwitchEntropyCodes(self *blockEncoder, tree []huffmanTree, storage_ix *uint, storage []byte) {
- buildAndStoreBlockSplitCode(self.block_types_, self.block_lengths_, self.num_blocks_, self.num_block_types_, tree, &self.block_split_code_, storage_ix, storage)
+func buildAndStoreBlockSwitchEntropyCodes(self *blockEncoder, tree []huffmanTree, bw *bitWriter) {
+ buildAndStoreBlockSplitCode(self.block_types_, self.block_lengths_, self.num_blocks_, self.num_block_types_, tree, &self.block_split_code_, bw)
}
/* Stores the next symbol with the entropy code of the current block type.
Updates the block type and block length at block boundaries. */
-func storeSymbol(self *blockEncoder, symbol uint, storage_ix *uint, storage []byte) {
+func storeSymbol(self *blockEncoder, symbol uint, bw *bitWriter) {
if self.block_len_ == 0 {
self.block_ix_++
var block_ix uint = self.block_ix_
@@ -914,20 +929,20 @@ func storeSymbol(self *blockEncoder, symbol uint, storage_ix *uint, storage []by
var block_type byte = self.block_types_[block_ix]
self.block_len_ = uint(block_len)
self.entropy_ix_ = uint(block_type) * self.histogram_length_
- storeBlockSwitch(&self.block_split_code_, block_len, block_type, false, storage_ix, storage)
+ storeBlockSwitch(&self.block_split_code_, block_len, block_type, false, bw)
}
self.block_len_--
{
var ix uint = self.entropy_ix_ + symbol
- writeBits(uint(self.depths_[ix]), uint64(self.bits_[ix]), storage_ix, storage)
+ bw.writeBits(uint(self.depths_[ix]), uint64(self.bits_[ix]))
}
}
/* Stores the next symbol with the entropy code of the current block type and
context value.
Updates the block type and block length at block boundaries. */
-func storeSymbolWithContext(self *blockEncoder, symbol uint, context uint, context_map []uint32, storage_ix *uint, storage []byte, context_bits uint) {
+func storeSymbolWithContext(self *blockEncoder, symbol uint, context uint, context_map []uint32, bw *bitWriter, context_bits uint) {
if self.block_len_ == 0 {
self.block_ix_++
var block_ix uint = self.block_ix_
@@ -935,119 +950,134 @@ func storeSymbolWithContext(self *blockEncoder, symbol uint, context uint, conte
var block_type byte = self.block_types_[block_ix]
self.block_len_ = uint(block_len)
self.entropy_ix_ = uint(block_type) << context_bits
- storeBlockSwitch(&self.block_split_code_, block_len, block_type, false, storage_ix, storage)
+ storeBlockSwitch(&self.block_split_code_, block_len, block_type, false, bw)
}
self.block_len_--
{
var histo_ix uint = uint(context_map[self.entropy_ix_+context])
var ix uint = histo_ix*self.histogram_length_ + symbol
- writeBits(uint(self.depths_[ix]), uint64(self.bits_[ix]), storage_ix, storage)
+ bw.writeBits(uint(self.depths_[ix]), uint64(self.bits_[ix]))
}
}
-func buildAndStoreEntropyCodesLiteral(self *blockEncoder, histograms []histogramLiteral, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
+func buildAndStoreEntropyCodesLiteral(self *blockEncoder, histograms []histogramLiteral, histograms_size uint, alphabet_size uint, tree []huffmanTree, bw *bitWriter) {
var table_size uint = histograms_size * self.histogram_length_
- self.depths_ = make([]byte, table_size)
- self.bits_ = make([]uint16, table_size)
+ if cap(self.depths_) < int(table_size) {
+ self.depths_ = make([]byte, table_size)
+ } else {
+ self.depths_ = self.depths_[:table_size]
+ }
+ if cap(self.bits_) < int(table_size) {
+ self.bits_ = make([]uint16, table_size)
+ } else {
+ self.bits_ = self.bits_[:table_size]
+ }
{
var i uint
for i = 0; i < histograms_size; i++ {
var ix uint = i * self.histogram_length_
- buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], storage_ix, storage)
+ buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], bw)
}
}
}
-func buildAndStoreEntropyCodesCommand(self *blockEncoder, histograms []histogramCommand, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
+func buildAndStoreEntropyCodesCommand(self *blockEncoder, histograms []histogramCommand, histograms_size uint, alphabet_size uint, tree []huffmanTree, bw *bitWriter) {
var table_size uint = histograms_size * self.histogram_length_
- self.depths_ = make([]byte, table_size)
- self.bits_ = make([]uint16, table_size)
+ if cap(self.depths_) < int(table_size) {
+ self.depths_ = make([]byte, table_size)
+ } else {
+ self.depths_ = self.depths_[:table_size]
+ }
+ if cap(self.bits_) < int(table_size) {
+ self.bits_ = make([]uint16, table_size)
+ } else {
+ self.bits_ = self.bits_[:table_size]
+ }
{
var i uint
for i = 0; i < histograms_size; i++ {
var ix uint = i * self.histogram_length_
- buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], storage_ix, storage)
+ buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], bw)
}
}
}
-func buildAndStoreEntropyCodesDistance(self *blockEncoder, histograms []histogramDistance, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
+func buildAndStoreEntropyCodesDistance(self *blockEncoder, histograms []histogramDistance, histograms_size uint, alphabet_size uint, tree []huffmanTree, bw *bitWriter) {
var table_size uint = histograms_size * self.histogram_length_
- self.depths_ = make([]byte, table_size)
- self.bits_ = make([]uint16, table_size)
+ if cap(self.depths_) < int(table_size) {
+ self.depths_ = make([]byte, table_size)
+ } else {
+ self.depths_ = self.depths_[:table_size]
+ }
+ if cap(self.bits_) < int(table_size) {
+ self.bits_ = make([]uint16, table_size)
+ } else {
+ self.bits_ = self.bits_[:table_size]
+ }
{
var i uint
for i = 0; i < histograms_size; i++ {
var ix uint = i * self.histogram_length_
- buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], storage_ix, storage)
+ buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], bw)
}
}
}
-func jumpToByteBoundary(storage_ix *uint, storage []byte) {
- *storage_ix = (*storage_ix + 7) &^ 7
- storage[*storage_ix>>3] = 0
-}
-
-func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_byte byte, prev_byte2 byte, is_last bool, params *encoderParams, literal_context_mode int, commands []command, n_commands uint, mb *metaBlockSplit, storage_ix *uint, storage []byte) {
+func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_byte byte, prev_byte2 byte, is_last bool, params *encoderParams, literal_context_mode int, commands []command, mb *metaBlockSplit, bw *bitWriter) {
var pos uint = start_pos
var i uint
var num_distance_symbols uint32 = params.dist.alphabet_size
var num_effective_distance_symbols uint32 = num_distance_symbols
var tree []huffmanTree
var literal_context_lut contextLUT = getContextLUT(literal_context_mode)
- var literal_enc blockEncoder
- var command_enc blockEncoder
- var distance_enc blockEncoder
var dist *distanceParams = ¶ms.dist
if params.large_window && num_effective_distance_symbols > numHistogramDistanceSymbols {
num_effective_distance_symbols = numHistogramDistanceSymbols
}
- storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage)
+ storeCompressedMetaBlockHeader(is_last, length, bw)
tree = make([]huffmanTree, maxHuffmanTreeSize)
- initBlockEncoder(&literal_enc, numLiteralSymbols, mb.literal_split.num_types, mb.literal_split.types, mb.literal_split.lengths, mb.literal_split.num_blocks)
- initBlockEncoder(&command_enc, numCommandSymbols, mb.command_split.num_types, mb.command_split.types, mb.command_split.lengths, mb.command_split.num_blocks)
- initBlockEncoder(&distance_enc, uint(num_effective_distance_symbols), mb.distance_split.num_types, mb.distance_split.types, mb.distance_split.lengths, mb.distance_split.num_blocks)
+ literal_enc := getBlockEncoder(numLiteralSymbols, mb.literal_split.num_types, mb.literal_split.types, mb.literal_split.lengths, mb.literal_split.num_blocks)
+ command_enc := getBlockEncoder(numCommandSymbols, mb.command_split.num_types, mb.command_split.types, mb.command_split.lengths, mb.command_split.num_blocks)
+ distance_enc := getBlockEncoder(uint(num_effective_distance_symbols), mb.distance_split.num_types, mb.distance_split.types, mb.distance_split.lengths, mb.distance_split.num_blocks)
- buildAndStoreBlockSwitchEntropyCodes(&literal_enc, tree, storage_ix, storage)
- buildAndStoreBlockSwitchEntropyCodes(&command_enc, tree, storage_ix, storage)
- buildAndStoreBlockSwitchEntropyCodes(&distance_enc, tree, storage_ix, storage)
+ buildAndStoreBlockSwitchEntropyCodes(literal_enc, tree, bw)
+ buildAndStoreBlockSwitchEntropyCodes(command_enc, tree, bw)
+ buildAndStoreBlockSwitchEntropyCodes(distance_enc, tree, bw)
- writeBits(2, uint64(dist.distance_postfix_bits), storage_ix, storage)
- writeBits(4, uint64(dist.num_direct_distance_codes)>>dist.distance_postfix_bits, storage_ix, storage)
+ bw.writeBits(2, uint64(dist.distance_postfix_bits))
+ bw.writeBits(4, uint64(dist.num_direct_distance_codes)>>dist.distance_postfix_bits)
for i = 0; i < mb.literal_split.num_types; i++ {
- writeBits(2, uint64(literal_context_mode), storage_ix, storage)
+ bw.writeBits(2, uint64(literal_context_mode))
}
if mb.literal_context_map_size == 0 {
- storeTrivialContextMap(mb.literal_histograms_size, literalContextBits, tree, storage_ix, storage)
+ storeTrivialContextMap(mb.literal_histograms_size, literalContextBits, tree, bw)
} else {
- encodeContextMap(mb.literal_context_map, mb.literal_context_map_size, mb.literal_histograms_size, tree, storage_ix, storage)
+ encodeContextMap(mb.literal_context_map, mb.literal_context_map_size, mb.literal_histograms_size, tree, bw)
}
if mb.distance_context_map_size == 0 {
- storeTrivialContextMap(mb.distance_histograms_size, distanceContextBits, tree, storage_ix, storage)
+ storeTrivialContextMap(mb.distance_histograms_size, distanceContextBits, tree, bw)
} else {
- encodeContextMap(mb.distance_context_map, mb.distance_context_map_size, mb.distance_histograms_size, tree, storage_ix, storage)
+ encodeContextMap(mb.distance_context_map, mb.distance_context_map_size, mb.distance_histograms_size, tree, bw)
}
- buildAndStoreEntropyCodesLiteral(&literal_enc, mb.literal_histograms, mb.literal_histograms_size, numLiteralSymbols, tree, storage_ix, storage)
- buildAndStoreEntropyCodesCommand(&command_enc, mb.command_histograms, mb.command_histograms_size, numCommandSymbols, tree, storage_ix, storage)
- buildAndStoreEntropyCodesDistance(&distance_enc, mb.distance_histograms, mb.distance_histograms_size, uint(num_distance_symbols), tree, storage_ix, storage)
+ buildAndStoreEntropyCodesLiteral(literal_enc, mb.literal_histograms, mb.literal_histograms_size, numLiteralSymbols, tree, bw)
+ buildAndStoreEntropyCodesCommand(command_enc, mb.command_histograms, mb.command_histograms_size, numCommandSymbols, tree, bw)
+ buildAndStoreEntropyCodesDistance(distance_enc, mb.distance_histograms, mb.distance_histograms_size, uint(num_distance_symbols), tree, bw)
tree = nil
- for i = 0; i < n_commands; i++ {
- var cmd command = commands[i]
+ for _, cmd := range commands {
var cmd_code uint = uint(cmd.cmd_prefix_)
- storeSymbol(&command_enc, cmd_code, storage_ix, storage)
- storeCommandExtra(&cmd, storage_ix, storage)
+ storeSymbol(command_enc, cmd_code, bw)
+ storeCommandExtra(&cmd, bw)
if mb.literal_context_map_size == 0 {
var j uint
for j = uint(cmd.insert_len_); j != 0; j-- {
- storeSymbol(&literal_enc, uint(input[pos&mask]), storage_ix, storage)
+ storeSymbol(literal_enc, uint(input[pos&mask]), bw)
pos++
}
} else {
@@ -1055,7 +1085,7 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
for j = uint(cmd.insert_len_); j != 0; j-- {
var context uint = uint(getContext(prev_byte, prev_byte2, literal_context_lut))
var literal byte = input[pos&mask]
- storeSymbolWithContext(&literal_enc, uint(literal), context, mb.literal_context_map, storage_ix, storage, literalContextBits)
+ storeSymbolWithContext(literal_enc, uint(literal), context, mb.literal_context_map, bw, literalContextBits)
prev_byte2 = prev_byte
prev_byte = literal
pos++
@@ -1071,30 +1101,28 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
var distnumextra uint32 = uint32(cmd.dist_prefix_) >> 10
var distextra uint64 = uint64(cmd.dist_extra_)
if mb.distance_context_map_size == 0 {
- storeSymbol(&distance_enc, dist_code, storage_ix, storage)
+ storeSymbol(distance_enc, dist_code, bw)
} else {
var context uint = uint(commandDistanceContext(&cmd))
- storeSymbolWithContext(&distance_enc, dist_code, context, mb.distance_context_map, storage_ix, storage, distanceContextBits)
+ storeSymbolWithContext(distance_enc, dist_code, context, mb.distance_context_map, bw, distanceContextBits)
}
- writeBits(uint(distnumextra), distextra, storage_ix, storage)
+ bw.writeBits(uint(distnumextra), distextra)
}
}
}
- cleanupBlockEncoder(&distance_enc)
- cleanupBlockEncoder(&command_enc)
- cleanupBlockEncoder(&literal_enc)
+ cleanupBlockEncoder(distance_enc)
+ cleanupBlockEncoder(command_enc)
+ cleanupBlockEncoder(literal_enc)
if is_last {
- jumpToByteBoundary(storage_ix, storage)
+ bw.jumpToByteBoundary()
}
}
-func buildHistograms(input []byte, start_pos uint, mask uint, commands []command, n_commands uint, lit_histo *histogramLiteral, cmd_histo *histogramCommand, dist_histo *histogramDistance) {
+func buildHistograms(input []byte, start_pos uint, mask uint, commands []command, lit_histo *histogramLiteral, cmd_histo *histogramCommand, dist_histo *histogramDistance) {
var pos uint = start_pos
- var i uint
- for i = 0; i < n_commands; i++ {
- var cmd command = commands[i]
+ for _, cmd := range commands {
var j uint
histogramAddCommand(cmd_histo, uint(cmd.cmd_prefix_))
for j = uint(cmd.insert_len_); j != 0; j-- {
@@ -1109,18 +1137,16 @@ func buildHistograms(input []byte, start_pos uint, mask uint, commands []command
}
}
-func storeDataWithHuffmanCodes(input []byte, start_pos uint, mask uint, commands []command, n_commands uint, lit_depth []byte, lit_bits []uint16, cmd_depth []byte, cmd_bits []uint16, dist_depth []byte, dist_bits []uint16, storage_ix *uint, storage []byte) {
+func storeDataWithHuffmanCodes(input []byte, start_pos uint, mask uint, commands []command, lit_depth []byte, lit_bits []uint16, cmd_depth []byte, cmd_bits []uint16, dist_depth []byte, dist_bits []uint16, bw *bitWriter) {
var pos uint = start_pos
- var i uint
- for i = 0; i < n_commands; i++ {
- var cmd command = commands[i]
+ for _, cmd := range commands {
var cmd_code uint = uint(cmd.cmd_prefix_)
var j uint
- writeBits(uint(cmd_depth[cmd_code]), uint64(cmd_bits[cmd_code]), storage_ix, storage)
- storeCommandExtra(&cmd, storage_ix, storage)
+ bw.writeBits(uint(cmd_depth[cmd_code]), uint64(cmd_bits[cmd_code]))
+ storeCommandExtra(&cmd, bw)
for j = uint(cmd.insert_len_); j != 0; j-- {
var literal byte = input[pos&mask]
- writeBits(uint(lit_depth[literal]), uint64(lit_bits[literal]), storage_ix, storage)
+ bw.writeBits(uint(lit_depth[literal]), uint64(lit_bits[literal]))
pos++
}
@@ -1129,13 +1155,13 @@ func storeDataWithHuffmanCodes(input []byte, start_pos uint, mask uint, commands
var dist_code uint = uint(cmd.dist_prefix_) & 0x3FF
var distnumextra uint32 = uint32(cmd.dist_prefix_) >> 10
var distextra uint32 = cmd.dist_extra_
- writeBits(uint(dist_depth[dist_code]), uint64(dist_bits[dist_code]), storage_ix, storage)
- writeBits(uint(distnumextra), uint64(distextra), storage_ix, storage)
+ bw.writeBits(uint(dist_depth[dist_code]), uint64(dist_bits[dist_code]))
+ bw.writeBits(uint(distnumextra), uint64(distextra))
}
}
}
-func storeMetaBlockTrivial(input []byte, start_pos uint, length uint, mask uint, is_last bool, params *encoderParams, commands []command, n_commands uint, storage_ix *uint, storage []byte) {
+func storeMetaBlockTrivial(input []byte, start_pos uint, length uint, mask uint, is_last bool, params *encoderParams, commands []command, bw *bitWriter) {
var lit_histo histogramLiteral
var cmd_histo histogramCommand
var dist_histo histogramDistance
@@ -1148,44 +1174,42 @@ func storeMetaBlockTrivial(input []byte, start_pos uint, length uint, mask uint,
var tree []huffmanTree
var num_distance_symbols uint32 = params.dist.alphabet_size
- storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage)
+ storeCompressedMetaBlockHeader(is_last, length, bw)
histogramClearLiteral(&lit_histo)
histogramClearCommand(&cmd_histo)
histogramClearDistance(&dist_histo)
- buildHistograms(input, start_pos, mask, commands, n_commands, &lit_histo, &cmd_histo, &dist_histo)
+ buildHistograms(input, start_pos, mask, commands, &lit_histo, &cmd_histo, &dist_histo)
- writeBits(13, 0, storage_ix, storage)
+ bw.writeBits(13, 0)
tree = make([]huffmanTree, maxHuffmanTreeSize)
- buildAndStoreHuffmanTree(lit_histo.data_[:], numLiteralSymbols, numLiteralSymbols, tree, lit_depth[:], lit_bits[:], storage_ix, storage)
- buildAndStoreHuffmanTree(cmd_histo.data_[:], numCommandSymbols, numCommandSymbols, tree, cmd_depth[:], cmd_bits[:], storage_ix, storage)
- buildAndStoreHuffmanTree(dist_histo.data_[:], maxSimpleDistanceAlphabetSize, uint(num_distance_symbols), tree, dist_depth[:], dist_bits[:], storage_ix, storage)
+ buildAndStoreHuffmanTree(lit_histo.data_[:], numLiteralSymbols, numLiteralSymbols, tree, lit_depth[:], lit_bits[:], bw)
+ buildAndStoreHuffmanTree(cmd_histo.data_[:], numCommandSymbols, numCommandSymbols, tree, cmd_depth[:], cmd_bits[:], bw)
+ buildAndStoreHuffmanTree(dist_histo.data_[:], maxSimpleDistanceAlphabetSize, uint(num_distance_symbols), tree, dist_depth[:], dist_bits[:], bw)
tree = nil
- storeDataWithHuffmanCodes(input, start_pos, mask, commands, n_commands, lit_depth[:], lit_bits[:], cmd_depth[:], cmd_bits[:], dist_depth[:], dist_bits[:], storage_ix, storage)
+ storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], cmd_depth[:], cmd_bits[:], dist_depth[:], dist_bits[:], bw)
if is_last {
- jumpToByteBoundary(storage_ix, storage)
+ bw.jumpToByteBoundary()
}
}
-func storeMetaBlockFast(input []byte, start_pos uint, length uint, mask uint, is_last bool, params *encoderParams, commands []command, n_commands uint, storage_ix *uint, storage []byte) {
+func storeMetaBlockFast(input []byte, start_pos uint, length uint, mask uint, is_last bool, params *encoderParams, commands []command, bw *bitWriter) {
var num_distance_symbols uint32 = params.dist.alphabet_size
var distance_alphabet_bits uint32 = log2FloorNonZero(uint(num_distance_symbols-1)) + 1
- storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage)
+ storeCompressedMetaBlockHeader(is_last, length, bw)
- writeBits(13, 0, storage_ix, storage)
+ bw.writeBits(13, 0)
- if n_commands <= 128 {
+ if len(commands) <= 128 {
var histogram = [numLiteralSymbols]uint32{0}
var pos uint = start_pos
var num_literals uint = 0
- var i uint
var lit_depth [numLiteralSymbols]byte
var lit_bits [numLiteralSymbols]uint16
- for i = 0; i < n_commands; i++ {
- var cmd command = commands[i]
+ for _, cmd := range commands {
var j uint
for j = uint(cmd.insert_len_); j != 0; j-- {
histogram[input[pos&mask]]++
@@ -1197,11 +1221,11 @@ func storeMetaBlockFast(input []byte, start_pos uint, length uint, mask uint, is
}
buildAndStoreHuffmanTreeFast(histogram[:], num_literals, /* max_bits = */
- 8, lit_depth[:], lit_bits[:], storage_ix, storage)
+ 8, lit_depth[:], lit_bits[:], bw)
- storeStaticCommandHuffmanTree(storage_ix, storage)
- storeStaticDistanceHuffmanTree(storage_ix, storage)
- storeDataWithHuffmanCodes(input, start_pos, mask, commands, n_commands, lit_depth[:], lit_bits[:], kStaticCommandCodeDepth[:], kStaticCommandCodeBits[:], kStaticDistanceCodeDepth[:], kStaticDistanceCodeBits[:], storage_ix, storage)
+ storeStaticCommandHuffmanTree(bw)
+ storeStaticDistanceHuffmanTree(bw)
+ storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], kStaticCommandCodeDepth[:], kStaticCommandCodeBits[:], kStaticDistanceCodeDepth[:], kStaticDistanceCodeBits[:], bw)
} else {
var lit_histo histogramLiteral
var cmd_histo histogramCommand
@@ -1215,51 +1239,45 @@ func storeMetaBlockFast(input []byte, start_pos uint, length uint, mask uint, is
histogramClearLiteral(&lit_histo)
histogramClearCommand(&cmd_histo)
histogramClearDistance(&dist_histo)
- buildHistograms(input, start_pos, mask, commands, n_commands, &lit_histo, &cmd_histo, &dist_histo)
+ buildHistograms(input, start_pos, mask, commands, &lit_histo, &cmd_histo, &dist_histo)
buildAndStoreHuffmanTreeFast(lit_histo.data_[:], lit_histo.total_count_, /* max_bits = */
- 8, lit_depth[:], lit_bits[:], storage_ix, storage)
+ 8, lit_depth[:], lit_bits[:], bw)
buildAndStoreHuffmanTreeFast(cmd_histo.data_[:], cmd_histo.total_count_, /* max_bits = */
- 10, cmd_depth[:], cmd_bits[:], storage_ix, storage)
+ 10, cmd_depth[:], cmd_bits[:], bw)
buildAndStoreHuffmanTreeFast(dist_histo.data_[:], dist_histo.total_count_, /* max_bits = */
- uint(distance_alphabet_bits), dist_depth[:], dist_bits[:], storage_ix, storage)
+ uint(distance_alphabet_bits), dist_depth[:], dist_bits[:], bw)
- storeDataWithHuffmanCodes(input, start_pos, mask, commands, n_commands, lit_depth[:], lit_bits[:], cmd_depth[:], cmd_bits[:], dist_depth[:], dist_bits[:], storage_ix, storage)
+ storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], cmd_depth[:], cmd_bits[:], dist_depth[:], dist_bits[:], bw)
}
if is_last {
- jumpToByteBoundary(storage_ix, storage)
+ bw.jumpToByteBoundary()
}
}
/* This is for storing uncompressed blocks (simple raw storage of
bytes-as-bytes). */
-func storeUncompressedMetaBlock(is_final_block bool, input []byte, position uint, mask uint, len uint, storage_ix *uint, storage []byte) {
+func storeUncompressedMetaBlock(is_final_block bool, input []byte, position uint, mask uint, len uint, bw *bitWriter) {
var masked_pos uint = position & mask
- storeUncompressedMetaBlockHeader(uint(len), storage_ix, storage)
- jumpToByteBoundary(storage_ix, storage)
+ storeUncompressedMetaBlockHeader(uint(len), bw)
+ bw.jumpToByteBoundary()
if masked_pos+len > mask+1 {
var len1 uint = mask + 1 - masked_pos
- copy(storage[*storage_ix>>3:], input[masked_pos:][:len1])
- *storage_ix += len1 << 3
+ bw.writeBytes(input[masked_pos:][:len1])
len -= len1
masked_pos = 0
}
- copy(storage[*storage_ix>>3:], input[masked_pos:][:len])
- *storage_ix += uint(len << 3)
-
- /* We need to clear the next 4 bytes to continue to be
- compatible with BrotliWriteBits. */
- writeBitsPrepareStorage(*storage_ix, storage)
+ bw.writeBytes(input[masked_pos:][:len])
/* Since the uncompressed block itself may not be the final block, add an
empty one after this. */
if is_final_block {
- writeBits(1, 1, storage_ix, storage) /* islast */
- writeBits(1, 1, storage_ix, storage) /* isempty */
- jumpToByteBoundary(storage_ix, storage)
+ bw.writeBits(1, 1) /* islast */
+ bw.writeBits(1, 1) /* isempty */
+ bw.jumpToByteBoundary()
}
}
diff --git a/vendor/github.com/andybalholm/brotli/command.go b/vendor/github.com/andybalholm/brotli/command.go
index e93ccdfa1e6..b1662a55552 100644
--- a/vendor/github.com/andybalholm/brotli/command.go
+++ b/vendor/github.com/andybalholm/brotli/command.go
@@ -194,26 +194,28 @@ type command struct {
}
/* distance_code is e.g. 0 for same-as-last short code, or 16 for offset 1. */
-func initCommand(self *command, dist *distanceParams, insertlen uint, copylen uint, copylen_code_delta int, distance_code uint) {
+func makeCommand(dist *distanceParams, insertlen uint, copylen uint, copylen_code_delta int, distance_code uint) (cmd command) {
/* Don't rely on signed int representation, use honest casts. */
var delta uint32 = uint32(byte(int8(copylen_code_delta)))
- self.insert_len_ = uint32(insertlen)
- self.copy_len_ = uint32(uint32(copylen) | delta<<25)
+ cmd.insert_len_ = uint32(insertlen)
+ cmd.copy_len_ = uint32(uint32(copylen) | delta<<25)
/* The distance prefix and extra bits are stored in this Command as if
npostfix and ndirect were 0, they are only recomputed later after the
clustering if needed. */
- prefixEncodeCopyDistance(distance_code, uint(dist.num_direct_distance_codes), uint(dist.distance_postfix_bits), &self.dist_prefix_, &self.dist_extra_)
+ prefixEncodeCopyDistance(distance_code, uint(dist.num_direct_distance_codes), uint(dist.distance_postfix_bits), &cmd.dist_prefix_, &cmd.dist_extra_)
+ getLengthCode(insertlen, uint(int(copylen)+copylen_code_delta), (cmd.dist_prefix_&0x3FF == 0), &cmd.cmd_prefix_)
- getLengthCode(insertlen, uint(int(copylen)+copylen_code_delta), (self.dist_prefix_&0x3FF == 0), &self.cmd_prefix_)
+ return cmd
}
-func initInsertCommand(self *command, insertlen uint) {
- self.insert_len_ = uint32(insertlen)
- self.copy_len_ = 4 << 25
- self.dist_extra_ = 0
- self.dist_prefix_ = numDistanceShortCodes
- getLengthCode(insertlen, 4, false, &self.cmd_prefix_)
+func makeInsertCommand(insertlen uint) (cmd command) {
+ cmd.insert_len_ = uint32(insertlen)
+ cmd.copy_len_ = 4 << 25
+ cmd.dist_extra_ = 0
+ cmd.dist_prefix_ = numDistanceShortCodes
+ getLengthCode(insertlen, 4, false, &cmd.cmd_prefix_)
+ return cmd
}
func commandRestoreDistanceCode(self *command, dist *distanceParams) uint32 {
diff --git a/vendor/github.com/andybalholm/brotli/compress_fragment.go b/vendor/github.com/andybalholm/brotli/compress_fragment.go
index 435898e1651..dbf0c43bf26 100644
--- a/vendor/github.com/andybalholm/brotli/compress_fragment.go
+++ b/vendor/github.com/andybalholm/brotli/compress_fragment.go
@@ -33,14 +33,8 @@ func hashBytesAtOffset5(v uint64, offset int, shift uint) uint32 {
}
func isMatch5(p1 []byte, p2 []byte) bool {
- var i int
- for i = 0; i < 5; i++ {
- if p1[i] != p2[i] {
- return false
- }
- }
-
- return true
+ return binary.LittleEndian.Uint32(p1) == binary.LittleEndian.Uint32(p2) &&
+ p1[4] == p2[4]
}
/* Builds a literal prefix code into "depths" and "bits" based on the statistics
@@ -51,7 +45,7 @@ func isMatch5(p1 []byte, p2 []byte) bool {
and thus have to assign a non-zero depth for each literal.
Returns estimated compression ratio millibytes/char for encoding given input
with generated code. */
-func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte, bits []uint16, storage_ix *uint, storage []byte) uint {
+func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte, bits []uint16, bw *bitWriter) uint {
var histogram = [256]uint32{0}
var histogram_total uint
var i uint
@@ -88,7 +82,7 @@ func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte
}
buildAndStoreHuffmanTreeFast(histogram[:], histogram_total, /* max_bits = */
- 8, depths, bits, storage_ix, storage)
+ 8, depths, bits, bw)
{
var literal_ratio uint = 0
for i = 0; i < 256; i++ {
@@ -104,7 +98,7 @@ func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte
/* Builds a command and distance prefix code (each 64 symbols) into "depth" and
"bits" based on "histogram" and stores it into the bit stream. */
-func buildAndStoreCommandPrefixCode1(histogram []uint32, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
+func buildAndStoreCommandPrefixCode1(histogram []uint32, depth []byte, bits []uint16, bw *bitWriter) {
var tree [129]huffmanTree
var cmd_depth = [numCommandSymbols]byte{0}
/* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */
@@ -151,141 +145,141 @@ func buildAndStoreCommandPrefixCode1(histogram []uint32, depth []byte, bits []ui
cmd_depth[448+8*i] = depth[56+i]
}
- storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], storage_ix, storage)
+ storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], bw)
}
- storeHuffmanTree(depth[64:], 64, tree[:], storage_ix, storage)
+ storeHuffmanTree(depth[64:], 64, tree[:], bw)
}
/* REQUIRES: insertlen < 6210 */
-func emitInsertLen1(insertlen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
+func emitInsertLen1(insertlen uint, depth []byte, bits []uint16, histo []uint32, bw *bitWriter) {
if insertlen < 6 {
var code uint = insertlen + 40
- writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
+ bw.writeBits(uint(depth[code]), uint64(bits[code]))
histo[code]++
} else if insertlen < 130 {
var tail uint = insertlen - 2
var nbits uint32 = log2FloorNonZero(tail) - 1
var prefix uint = tail >> nbits
var inscode uint = uint((nbits << 1) + uint32(prefix) + 42)
- writeBits(uint(depth[inscode]), uint64(bits[inscode]), storage_ix, storage)
- writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<> nbits
var code uint = uint((nbits << 1) + uint32(prefix) + 20)
- writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
- writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<> nbits
var code uint = uint((nbits << 1) + uint32(prefix) + 4)
- writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
- writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<> 5) + 30
- writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
- writeBits(5, uint64(tail)&31, storage_ix, storage)
- writeBits(uint(depth[64]), uint64(bits[64]), storage_ix, storage)
+ bw.writeBits(uint(depth[code]), uint64(bits[code]))
+ bw.writeBits(5, uint64(tail)&31)
+ bw.writeBits(uint(depth[64]), uint64(bits[64]))
histo[code]++
histo[64]++
} else if copylen < 2120 {
var tail uint = copylen - 72
var nbits uint32 = log2FloorNonZero(tail)
var code uint = uint(nbits + 28)
- writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
- writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<> nbits) & 1
var offset uint = (2 + prefix) << nbits
var distcode uint = uint(2*(nbits-1) + uint32(prefix) + 80)
- writeBits(uint(depth[distcode]), uint64(bits[distcode]), storage_ix, storage)
- writeBits(uint(nbits), uint64(d)-uint64(offset), storage_ix, storage)
+ bw.writeBits(uint(depth[distcode]), uint64(bits[distcode]))
+ bw.writeBits(uint(nbits), uint64(d)-uint64(offset))
histo[distcode]++
}
-func emitLiterals(input []byte, len uint, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
+func emitLiterals(input []byte, len uint, depth []byte, bits []uint16, bw *bitWriter) {
var j uint
for j = 0; j < len; j++ {
var lit byte = input[j]
- writeBits(uint(depth[lit]), uint64(bits[lit]), storage_ix, storage)
+ bw.writeBits(uint(depth[lit]), uint64(bits[lit]))
}
}
/* REQUIRES: len <= 1 << 24. */
-func storeMetaBlockHeader1(len uint, is_uncompressed bool, storage_ix *uint, storage []byte) {
+func storeMetaBlockHeader1(len uint, is_uncompressed bool, bw *bitWriter) {
var nibbles uint = 6
/* ISLAST */
- writeBits(1, 0, storage_ix, storage)
+ bw.writeBits(1, 0)
if len <= 1<<16 {
nibbles = 4
@@ -293,34 +287,11 @@ func storeMetaBlockHeader1(len uint, is_uncompressed bool, storage_ix *uint, sto
nibbles = 5
}
- writeBits(2, uint64(nibbles)-4, storage_ix, storage)
- writeBits(nibbles*4, uint64(len)-1, storage_ix, storage)
+ bw.writeBits(2, uint64(nibbles)-4)
+ bw.writeBits(nibbles*4, uint64(len)-1)
/* ISUNCOMPRESSED */
- writeSingleBit(is_uncompressed, storage_ix, storage)
-}
-
-func updateBits(n_bits uint, bits uint32, pos uint, array []byte) {
- for n_bits > 0 {
- var byte_pos uint = pos >> 3
- var n_unchanged_bits uint = pos & 7
- var n_changed_bits uint = brotli_min_size_t(n_bits, 8-n_unchanged_bits)
- var total_bits uint = n_unchanged_bits + n_changed_bits
- var mask uint32 = (^((1 << total_bits) - 1)) | ((1 << n_unchanged_bits) - 1)
- var unchanged_bits uint32 = uint32(array[byte_pos]) & mask
- var changed_bits uint32 = bits & ((1 << n_changed_bits) - 1)
- array[byte_pos] = byte(changed_bits<>= n_changed_bits
- pos += n_changed_bits
- }
-}
-
-func rewindBitPosition1(new_storage_ix uint, storage_ix *uint, storage []byte) {
- var bitpos uint = new_storage_ix & 7
- var mask uint = (1 << bitpos) - 1
- storage[new_storage_ix>>3] &= byte(mask)
- *storage_ix = new_storage_ix
+ bw.writeSingleBit(is_uncompressed)
}
var shouldMergeBlock_kSampleRate uint = 43
@@ -351,151 +322,26 @@ func shouldUseUncompressedMode(metablock_start []byte, next_emit []byte, insertl
}
}
-func emitUncompressedMetaBlock1(begin []byte, end []byte, storage_ix_start uint, storage_ix *uint, storage []byte) {
- var len uint = uint(-cap(end) + cap(begin))
- rewindBitPosition1(storage_ix_start, storage_ix, storage)
- storeMetaBlockHeader1(uint(len), true, storage_ix, storage)
- *storage_ix = (*storage_ix + 7) &^ 7
- copy(storage[*storage_ix>>3:], begin[:len])
- *storage_ix += uint(len << 3)
- storage[*storage_ix>>3] = 0
+func emitUncompressedMetaBlock1(data []byte, storage_ix_start uint, bw *bitWriter) {
+ bw.rewind(storage_ix_start)
+ storeMetaBlockHeader1(uint(len(data)), true, bw)
+ bw.jumpToByteBoundary()
+ bw.writeBytes(data)
}
var kCmdHistoSeed = [128]uint32{
- 0,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 0,
- 0,
- 0,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 0,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 0,
- 0,
- 0,
- 0,
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 0, 0, 0, 0,
}
var compressFragmentFastImpl_kFirstBlockSize uint = 3 << 15
var compressFragmentFastImpl_kMergeBlockSize uint = 1 << 16
-func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []int, table_bits uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, storage_ix *uint, storage []byte) {
+func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []int, table_bits uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, bw *bitWriter) {
var cmd_histo [128]uint32
var ip_end int
var next_emit int = 0
@@ -506,7 +352,7 @@ func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []
var metablock_start int = input
var block_size uint = brotli_min_size_t(input_size, compressFragmentFastImpl_kFirstBlockSize)
var total_block_size uint = block_size
- var mlen_storage_ix uint = *storage_ix + 3
+ var mlen_storage_ix uint = bw.getPos() + 3
var lit_depth [256]byte
var lit_bits [256]uint16
var literal_ratio uint
@@ -523,21 +369,21 @@ func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []
/* Save the bit position of the MLEN field of the meta-block header, so that
we can update it later if we decide to extend this meta-block. */
- storeMetaBlockHeader1(block_size, false, storage_ix, storage)
+ storeMetaBlockHeader1(block_size, false, bw)
/* No block splits, no contexts. */
- writeBits(13, 0, storage_ix, storage)
+ bw.writeBits(13, 0)
- literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], storage_ix, storage)
+ literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], bw)
{
/* Store the pre-compressed command and distance prefix codes. */
var i uint
for i = 0; i+7 < *cmd_code_numbits; i += 8 {
- writeBits(8, uint64(cmd_code[i>>3]), storage_ix, storage)
+ bw.writeBits(8, uint64(cmd_code[i>>3]))
}
}
- writeBits(*cmd_code_numbits&7, uint64(cmd_code[*cmd_code_numbits>>3]), storage_ix, storage)
+ bw.writeBits(*cmd_code_numbits&7, uint64(cmd_code[*cmd_code_numbits>>3]))
/* Initialize the command and distance histograms. We will gather
statistics of command and distance codes during the processing
@@ -636,27 +482,27 @@ emit_commands:
var insert uint = uint(base - next_emit)
ip += int(matched)
if insert < 6210 {
- emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
+ emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], bw)
} else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) {
- emitUncompressedMetaBlock1(in[metablock_start:], in[base:], mlen_storage_ix-3, storage_ix, storage)
+ emitUncompressedMetaBlock1(in[metablock_start:base], mlen_storage_ix-3, bw)
input_size -= uint(base - input)
input = base
next_emit = input
goto next_block
} else {
- emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
+ emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], bw)
}
- emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage)
+ emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], bw)
if distance == last_distance {
- writeBits(uint(cmd_depth[64]), uint64(cmd_bits[64]), storage_ix, storage)
+ bw.writeBits(uint(cmd_depth[64]), uint64(cmd_bits[64]))
cmd_histo[64]++
} else {
- emitDistance1(uint(distance), cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
+ emitDistance1(uint(distance), cmd_depth, cmd_bits, cmd_histo[:], bw)
last_distance = distance
}
- emitCopyLenLastDistance1(matched, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
+ emitCopyLenLastDistance1(matched, cmd_depth, cmd_bits, cmd_histo[:], bw)
next_emit = ip
if ip >= ip_limit {
@@ -692,8 +538,8 @@ emit_commands:
}
ip += int(matched)
last_distance = int(base - candidate) /* > 0 */
- emitCopyLen1(matched, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
- emitDistance1(uint(last_distance), cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
+ emitCopyLen1(matched, cmd_depth, cmd_bits, cmd_histo[:], bw)
+ emitDistance1(uint(last_distance), cmd_depth, cmd_bits, cmd_histo[:], bw)
next_emit = ip
if ip >= ip_limit {
@@ -739,7 +585,7 @@ emit_remainder:
nibbles. */
total_block_size += block_size
- updateBits(20, uint32(total_block_size-1), mlen_storage_ix, storage)
+ bw.updateBits(20, uint32(total_block_size-1), mlen_storage_ix)
goto emit_commands
}
@@ -747,13 +593,13 @@ emit_remainder:
if next_emit < ip_end {
var insert uint = uint(ip_end - next_emit)
if insert < 6210 {
- emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
- emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage)
+ emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], bw)
+ emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], bw)
} else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) {
- emitUncompressedMetaBlock1(in[metablock_start:], in[ip_end:], mlen_storage_ix-3, storage_ix, storage)
+ emitUncompressedMetaBlock1(in[metablock_start:ip_end], mlen_storage_ix-3, bw)
} else {
- emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
- emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage)
+ emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], bw)
+ emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], bw)
}
}
@@ -769,30 +615,29 @@ next_block:
/* Save the bit position of the MLEN field of the meta-block header, so that
we can update it later if we decide to extend this meta-block. */
- mlen_storage_ix = *storage_ix + 3
+ mlen_storage_ix = bw.getPos() + 3
- storeMetaBlockHeader1(block_size, false, storage_ix, storage)
+ storeMetaBlockHeader1(block_size, false, bw)
/* No block splits, no contexts. */
- writeBits(13, 0, storage_ix, storage)
+ bw.writeBits(13, 0)
- literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], storage_ix, storage)
- buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, storage_ix, storage)
+ literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], bw)
+ buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, bw)
goto emit_commands
}
if !is_last {
/* If this is not the last block, update the command and distance prefix
codes for the next block and store the compressed forms. */
- cmd_code[0] = 0
-
- *cmd_code_numbits = 0
- buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, cmd_code_numbits, cmd_code)
+ var bw bitWriter
+ bw.dst = cmd_code
+ buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, &bw)
+ *cmd_code_numbits = bw.getPos()
}
}
-/* Compresses "input" string to the "*storage" buffer as one or more complete
- meta-blocks, and updates the "*storage_ix" bit position.
+/* Compresses "input" string to bw as one or more complete meta-blocks.
If "is_last" is 1, emits an additional empty last meta-block.
@@ -813,28 +658,28 @@ next_block:
REQUIRES: "table_size" is an odd (9, 11, 13, 15) power of two
OUTPUT: maximal copy distance <= |input_size|
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */
-func compressFragmentFast(input []byte, input_size uint, is_last bool, table []int, table_size uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, storage_ix *uint, storage []byte) {
- var initial_storage_ix uint = *storage_ix
+func compressFragmentFast(input []byte, input_size uint, is_last bool, table []int, table_size uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, bw *bitWriter) {
+ var initial_storage_ix uint = bw.getPos()
var table_bits uint = uint(log2FloorNonZero(table_size))
if input_size == 0 {
assert(is_last)
- writeBits(1, 1, storage_ix, storage) /* islast */
- writeBits(1, 1, storage_ix, storage) /* isempty */
- *storage_ix = (*storage_ix + 7) &^ 7
+ bw.writeBits(1, 1) /* islast */
+ bw.writeBits(1, 1) /* isempty */
+ bw.jumpToByteBoundary()
return
}
- compressFragmentFastImpl(input, input_size, is_last, table, table_bits, cmd_depth, cmd_bits, cmd_code_numbits, cmd_code, storage_ix, storage)
+ compressFragmentFastImpl(input, input_size, is_last, table, table_bits, cmd_depth, cmd_bits, cmd_code_numbits, cmd_code, bw)
/* If output is larger than single uncompressed block, rewrite it. */
- if *storage_ix-initial_storage_ix > 31+(input_size<<3) {
- emitUncompressedMetaBlock1(input, input[input_size:], initial_storage_ix, storage_ix, storage)
+ if bw.getPos()-initial_storage_ix > 31+(input_size<<3) {
+ emitUncompressedMetaBlock1(input[:input_size], initial_storage_ix, bw)
}
if is_last {
- writeBits(1, 1, storage_ix, storage) /* islast */
- writeBits(1, 1, storage_ix, storage) /* isempty */
- *storage_ix = (*storage_ix + 7) &^ 7
+ bw.writeBits(1, 1) /* islast */
+ bw.writeBits(1, 1) /* isempty */
+ bw.jumpToByteBoundary()
}
}
diff --git a/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go b/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go
index ffeb321644b..2473aca3fec 100644
--- a/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go
+++ b/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go
@@ -30,19 +30,18 @@ func hashBytesAtOffset(v uint64, offset uint, shift uint, length uint) uint32 {
}
func isMatch1(p1 []byte, p2 []byte, length uint) bool {
- var i uint
- for i = 0; i < length && i < 6; i++ {
- if p1[i] != p2[i] {
- return false
- }
+ if binary.LittleEndian.Uint32(p1) != binary.LittleEndian.Uint32(p2) {
+ return false
}
-
- return true
+ if length == 4 {
+ return true
+ }
+ return p1[4] == p2[4] && p1[5] == p2[5]
}
/* Builds a command and distance prefix code (each 64 symbols) into "depth" and
"bits" based on "histogram" and stores it into the bit stream. */
-func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
+func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uint16, bw *bitWriter) {
var tree [129]huffmanTree
var cmd_depth = [numCommandSymbols]byte{0}
/* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */
@@ -88,10 +87,10 @@ func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uin
cmd_depth[448+8*i] = depth[16+i]
}
- storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], storage_ix, storage)
+ storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], bw)
}
- storeHuffmanTree(depth[64:], 64, tree[:], storage_ix, storage)
+ storeHuffmanTree(depth[64:], 64, tree[:], bw)
}
func emitInsertLen(insertlen uint32, commands *[]uint32) {
@@ -198,11 +197,11 @@ func emitDistance(distance uint32, commands *[]uint32) {
}
/* REQUIRES: len <= 1 << 24. */
-func storeMetaBlockHeader(len uint, is_uncompressed bool, storage_ix *uint, storage []byte) {
+func storeMetaBlockHeader(len uint, is_uncompressed bool, bw *bitWriter) {
var nibbles uint = 6
/* ISLAST */
- writeBits(1, 0, storage_ix, storage)
+ bw.writeBits(1, 0)
if len <= 1<<16 {
nibbles = 4
@@ -210,11 +209,11 @@ func storeMetaBlockHeader(len uint, is_uncompressed bool, storage_ix *uint, stor
nibbles = 5
}
- writeBits(2, uint64(nibbles)-4, storage_ix, storage)
- writeBits(nibbles*4, uint64(len)-1, storage_ix, storage)
+ bw.writeBits(2, uint64(nibbles)-4)
+ bw.writeBits(nibbles*4, uint64(len)-1)
/* ISUNCOMPRESSED */
- writeSingleBit(is_uncompressed, storage_ix, storage)
+ bw.writeSingleBit(is_uncompressed)
}
func createCommands(input []byte, block_size uint, input_size uint, base_ip_ptr []byte, table []int, table_bits uint, min_match uint, literals *[]byte, commands *[]uint32) {
@@ -441,163 +440,20 @@ emit_remainder:
}
var storeCommands_kNumExtraBits = [128]uint32{
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 1,
- 1,
- 2,
- 2,
- 3,
- 3,
- 4,
- 4,
- 5,
- 5,
- 6,
- 7,
- 8,
- 9,
- 10,
- 12,
- 14,
- 24,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 1,
- 1,
- 2,
- 2,
- 3,
- 3,
- 4,
- 4,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 1,
- 1,
- 2,
- 2,
- 3,
- 3,
- 4,
- 4,
- 5,
- 5,
- 6,
- 7,
- 8,
- 9,
- 10,
- 24,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 1,
- 1,
- 2,
- 2,
- 3,
- 3,
- 4,
- 4,
- 5,
- 5,
- 6,
- 6,
- 7,
- 7,
- 8,
- 8,
- 9,
- 9,
- 10,
- 10,
- 11,
- 11,
- 12,
- 12,
- 13,
- 13,
- 14,
- 14,
- 15,
- 15,
- 16,
- 16,
- 17,
- 17,
- 18,
- 18,
- 19,
- 19,
- 20,
- 20,
- 21,
- 21,
- 22,
- 22,
- 23,
- 23,
- 24,
- 24,
+ 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 9, 10, 12, 14, 24,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 9, 10, 24,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16,
+ 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24,
}
var storeCommands_kInsertOffset = [24]uint32{
- 0,
- 1,
- 2,
- 3,
- 4,
- 5,
- 6,
- 8,
- 10,
- 14,
- 18,
- 26,
- 34,
- 50,
- 66,
- 98,
- 130,
- 194,
- 322,
- 578,
- 1090,
- 2114,
- 6210,
- 22594,
+ 0, 1, 2, 3, 4, 5, 6, 8, 10, 14, 18, 26, 34, 50, 66, 98, 130, 194, 322, 578,
+ 1090, 2114, 6210, 22594,
}
-func storeCommands(literals []byte, num_literals uint, commands []uint32, num_commands uint, storage_ix *uint, storage []byte) {
+func storeCommands(literals []byte, num_literals uint, commands []uint32, num_commands uint, bw *bitWriter) {
var lit_depths [256]byte
var lit_bits [256]uint16
var lit_histo = [256]uint32{0}
@@ -610,7 +466,7 @@ func storeCommands(literals []byte, num_literals uint, commands []uint32, num_co
}
buildAndStoreHuffmanTreeFast(lit_histo[:], num_literals, /* max_bits = */
- 8, lit_depths[:], lit_bits[:], storage_ix, storage)
+ 8, lit_depths[:], lit_bits[:], bw)
for i = 0; i < num_commands; i++ {
var code uint32 = commands[i] & 0xFF
@@ -622,21 +478,21 @@ func storeCommands(literals []byte, num_literals uint, commands []uint32, num_co
cmd_histo[2] += 1
cmd_histo[64] += 1
cmd_histo[84] += 1
- buildAndStoreCommandPrefixCode(cmd_histo[:], cmd_depths[:], cmd_bits[:], storage_ix, storage)
+ buildAndStoreCommandPrefixCode(cmd_histo[:], cmd_depths[:], cmd_bits[:], bw)
for i = 0; i < num_commands; i++ {
var cmd uint32 = commands[i]
var code uint32 = cmd & 0xFF
var extra uint32 = cmd >> 8
assert(code < 128)
- writeBits(uint(cmd_depths[code]), uint64(cmd_bits[code]), storage_ix, storage)
- writeBits(uint(storeCommands_kNumExtraBits[code]), uint64(extra), storage_ix, storage)
+ bw.writeBits(uint(cmd_depths[code]), uint64(cmd_bits[code]))
+ bw.writeBits(uint(storeCommands_kNumExtraBits[code]), uint64(extra))
if code < 24 {
var insert uint32 = storeCommands_kInsertOffset[code] + extra
var j uint32
for j = 0; j < insert; j++ {
var lit byte = literals[0]
- writeBits(uint(lit_depths[lit]), uint64(lit_bits[lit]), storage_ix, storage)
+ bw.writeBits(uint(lit_depths[lit]), uint64(lit_bits[lit]))
literals = literals[1:]
}
}
@@ -664,22 +520,13 @@ func shouldCompress(input []byte, input_size uint, num_literals uint) bool {
}
}
-func rewindBitPosition(new_storage_ix uint, storage_ix *uint, storage []byte) {
- var bitpos uint = new_storage_ix & 7
- var mask uint = (1 << bitpos) - 1
- storage[new_storage_ix>>3] &= byte(mask)
- *storage_ix = new_storage_ix
-}
-
-func emitUncompressedMetaBlock(input []byte, input_size uint, storage_ix *uint, storage []byte) {
- storeMetaBlockHeader(input_size, true, storage_ix, storage)
- *storage_ix = (*storage_ix + 7) &^ 7
- copy(storage[*storage_ix>>3:], input[:input_size])
- *storage_ix += input_size << 3
- storage[*storage_ix>>3] = 0
+func emitUncompressedMetaBlock(input []byte, input_size uint, bw *bitWriter) {
+ storeMetaBlockHeader(input_size, true, bw)
+ bw.jumpToByteBoundary()
+ bw.writeBytes(input[:input_size])
}
-func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_bits uint, min_match uint, storage_ix *uint, storage []byte) {
+func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_bits uint, min_match uint, bw *bitWriter) {
/* Save the start of the first block for position and distance computations.
*/
var base_ip []byte = input
@@ -693,17 +540,17 @@ func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, co
num_literals = uint(-cap(literals) + cap(literal_buf))
if shouldCompress(input, block_size, num_literals) {
var num_commands uint = uint(-cap(commands) + cap(command_buf))
- storeMetaBlockHeader(block_size, false, storage_ix, storage)
+ storeMetaBlockHeader(block_size, false, bw)
/* No block splits, no contexts. */
- writeBits(13, 0, storage_ix, storage)
+ bw.writeBits(13, 0)
- storeCommands(literal_buf, num_literals, command_buf, num_commands, storage_ix, storage)
+ storeCommands(literal_buf, num_literals, command_buf, num_commands, bw)
} else {
/* Since we did not find many backward references and the entropy of
the data is close to 8 bits, we can simply emit an uncompressed block.
This makes compression speed of uncompressible data about 3x faster. */
- emitUncompressedMetaBlock(input, block_size, storage_ix, storage)
+ emitUncompressedMetaBlock(input, block_size, bw)
}
input = input[block_size:]
@@ -711,8 +558,7 @@ func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, co
}
}
-/* Compresses "input" string to the "*storage" buffer as one or more complete
- meta-blocks, and updates the "*storage_ix" bit position.
+/* Compresses "input" string to bw as one or more complete meta-blocks.
If "is_last" is 1, emits an additional empty last meta-block.
@@ -724,8 +570,8 @@ func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, co
REQUIRES: "table_size" is a power of two
OUTPUT: maximal copy distance <= |input_size|
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */
-func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_size uint, storage_ix *uint, storage []byte) {
- var initial_storage_ix uint = *storage_ix
+func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_size uint, bw *bitWriter) {
+ var initial_storage_ix uint = bw.getPos()
var table_bits uint = uint(log2FloorNonZero(table_size))
var min_match uint
if table_bits <= 15 {
@@ -733,17 +579,17 @@ func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, comman
} else {
min_match = 6
}
- compressFragmentTwoPassImpl(input, input_size, is_last, command_buf, literal_buf, table, table_bits, min_match, storage_ix, storage)
+ compressFragmentTwoPassImpl(input, input_size, is_last, command_buf, literal_buf, table, table_bits, min_match, bw)
/* If output is larger than single uncompressed block, rewrite it. */
- if *storage_ix-initial_storage_ix > 31+(input_size<<3) {
- rewindBitPosition(initial_storage_ix, storage_ix, storage)
- emitUncompressedMetaBlock(input, input_size, storage_ix, storage)
+ if bw.getPos()-initial_storage_ix > 31+(input_size<<3) {
+ bw.rewind(initial_storage_ix)
+ emitUncompressedMetaBlock(input, input_size, bw)
}
if is_last {
- writeBits(1, 1, storage_ix, storage) /* islast */
- writeBits(1, 1, storage_ix, storage) /* isempty */
- *storage_ix = (*storage_ix + 7) &^ 7
+ bw.writeBits(1, 1) /* islast */
+ bw.writeBits(1, 1) /* isempty */
+ bw.jumpToByteBoundary()
}
}
diff --git a/vendor/github.com/andybalholm/brotli/encode.go b/vendor/github.com/andybalholm/brotli/encode.go
index c01322bf147..3abaf571aff 100644
--- a/vendor/github.com/andybalholm/brotli/encode.go
+++ b/vendor/github.com/andybalholm/brotli/encode.go
@@ -74,26 +74,22 @@ const (
type Writer struct {
dst io.Writer
options WriterOptions
+ err error
params encoderParams
hasher_ hasherHandle
input_pos_ uint64
ringbuffer_ ringBuffer
- cmd_alloc_size_ uint
- commands_ []command
- num_commands_ uint
+ commands []command
num_literals_ uint
last_insert_len_ uint
last_flush_pos_ uint64
last_processed_pos_ uint64
dist_cache_ [numDistanceShortCodes]int
saved_dist_cache_ [4]int
- last_bytes_ uint16
- last_bytes_bits_ byte
prev_byte_ byte
prev_byte2_ byte
- storage_size_ uint
- storage_ []byte
+ bw bitWriter
small_table_ [1 << 10]int
large_table_ []int
large_table_size_ uint
@@ -103,9 +99,6 @@ type Writer struct {
cmd_code_numbits_ uint
command_buf_ []uint32
literal_buf_ []byte
- next_out_ []byte
- available_out_ uint
- total_out_ uint
tiny_buf_ struct {
u64 [2]uint64
u8 [16]byte
@@ -146,16 +139,6 @@ func wrapPosition(position uint64) uint32 {
return result
}
-func getBrotliStorage(s *Writer, size uint) []byte {
- if s.storage_size_ < size {
- s.storage_ = nil
- s.storage_ = make([]byte, size)
- s.storage_size_ = size
- }
-
- return s.storage_
-}
-
func hashTableSize(max_table_size uint, input_size uint) uint {
var htsize uint = 256
for htsize < max_table_size && htsize < input_size {
@@ -201,353 +184,22 @@ func getHashTable(s *Writer, quality int, input_size uint, table_size *uint) []i
return table
}
-func encodeWindowBits(lgwin int, large_window bool, last_bytes *uint16, last_bytes_bits *byte) {
+func encodeWindowBits(lgwin int, large_window bool, bw *bitWriter) {
if large_window {
- *last_bytes = uint16((lgwin&0x3F)<<8 | 0x11)
- *last_bytes_bits = 14
+ bw.writeBits(14, uint64((lgwin&0x3F)<<8|0x11))
} else {
if lgwin == 16 {
- *last_bytes = 0
- *last_bytes_bits = 1
+ bw.writeBits(1, 0)
} else if lgwin == 17 {
- *last_bytes = 1
- *last_bytes_bits = 7
+ bw.writeBits(7, 1)
} else if lgwin > 17 {
- *last_bytes = uint16((lgwin-17)<<1 | 0x01)
- *last_bytes_bits = 4
+ bw.writeBits(4, uint64((lgwin-17)<<1|0x01))
} else {
- *last_bytes = uint16((lgwin-8)<<4 | 0x01)
- *last_bytes_bits = 7
+ bw.writeBits(7, uint64((lgwin-8)<<4|0x01))
}
}
}
-/* Initializes the command and distance prefix codes for the first block. */
-
-var initCommandPrefixCodes_kDefaultCommandDepths = [128]byte{
- 0,
- 4,
- 4,
- 5,
- 6,
- 6,
- 7,
- 7,
- 7,
- 7,
- 7,
- 8,
- 8,
- 8,
- 8,
- 8,
- 0,
- 0,
- 0,
- 4,
- 4,
- 4,
- 4,
- 4,
- 5,
- 5,
- 6,
- 6,
- 6,
- 6,
- 7,
- 7,
- 7,
- 7,
- 10,
- 10,
- 10,
- 10,
- 10,
- 10,
- 0,
- 4,
- 4,
- 5,
- 5,
- 5,
- 6,
- 6,
- 7,
- 8,
- 8,
- 9,
- 10,
- 10,
- 10,
- 10,
- 10,
- 10,
- 10,
- 10,
- 10,
- 10,
- 10,
- 10,
- 5,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 6,
- 6,
- 6,
- 6,
- 6,
- 6,
- 5,
- 5,
- 5,
- 5,
- 5,
- 5,
- 4,
- 4,
- 4,
- 4,
- 4,
- 4,
- 4,
- 5,
- 5,
- 5,
- 5,
- 5,
- 5,
- 6,
- 6,
- 7,
- 7,
- 7,
- 8,
- 10,
- 12,
- 12,
- 12,
- 12,
- 12,
- 12,
- 12,
- 12,
- 12,
- 12,
- 12,
- 12,
-}
-var initCommandPrefixCodes_kDefaultCommandBits = [128]uint16{
- 0,
- 0,
- 8,
- 9,
- 3,
- 35,
- 7,
- 71,
- 39,
- 103,
- 23,
- 47,
- 175,
- 111,
- 239,
- 31,
- 0,
- 0,
- 0,
- 4,
- 12,
- 2,
- 10,
- 6,
- 13,
- 29,
- 11,
- 43,
- 27,
- 59,
- 87,
- 55,
- 15,
- 79,
- 319,
- 831,
- 191,
- 703,
- 447,
- 959,
- 0,
- 14,
- 1,
- 25,
- 5,
- 21,
- 19,
- 51,
- 119,
- 159,
- 95,
- 223,
- 479,
- 991,
- 63,
- 575,
- 127,
- 639,
- 383,
- 895,
- 255,
- 767,
- 511,
- 1023,
- 14,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 27,
- 59,
- 7,
- 39,
- 23,
- 55,
- 30,
- 1,
- 17,
- 9,
- 25,
- 5,
- 0,
- 8,
- 4,
- 12,
- 2,
- 10,
- 6,
- 21,
- 13,
- 29,
- 3,
- 19,
- 11,
- 15,
- 47,
- 31,
- 95,
- 63,
- 127,
- 255,
- 767,
- 2815,
- 1791,
- 3839,
- 511,
- 2559,
- 1535,
- 3583,
- 1023,
- 3071,
- 2047,
- 4095,
-}
-var initCommandPrefixCodes_kDefaultCommandCode = []byte{
- 0xff,
- 0x77,
- 0xd5,
- 0xbf,
- 0xe7,
- 0xde,
- 0xea,
- 0x9e,
- 0x51,
- 0x5d,
- 0xde,
- 0xc6,
- 0x70,
- 0x57,
- 0xbc,
- 0x58,
- 0x58,
- 0x58,
- 0xd8,
- 0xd8,
- 0x58,
- 0xd5,
- 0xcb,
- 0x8c,
- 0xea,
- 0xe0,
- 0xc3,
- 0x87,
- 0x1f,
- 0x83,
- 0xc1,
- 0x60,
- 0x1c,
- 0x67,
- 0xb2,
- 0xaa,
- 0x06,
- 0x83,
- 0xc1,
- 0x60,
- 0x30,
- 0x18,
- 0xcc,
- 0xa1,
- 0xce,
- 0x88,
- 0x54,
- 0x94,
- 0x46,
- 0xe1,
- 0xb0,
- 0xd0,
- 0x4e,
- 0xb2,
- 0xf7,
- 0x04,
- 0x00,
-}
-var initCommandPrefixCodes_kDefaultCommandCodeNumBits uint = 448
-
-func initCommandPrefixCodes(cmd_depths []byte, cmd_bits []uint16, cmd_code []byte, cmd_code_numbits *uint) {
- copy(cmd_depths, initCommandPrefixCodes_kDefaultCommandDepths[:])
- copy(cmd_bits, initCommandPrefixCodes_kDefaultCommandBits[:])
-
- /* Initialize the pre-compressed form of the command and distance prefix
- codes. */
- copy(cmd_code, initCommandPrefixCodes_kDefaultCommandCode)
-
- *cmd_code_numbits = initCommandPrefixCodes_kDefaultCommandCodeNumBits
-}
-
/* Decide about the context map based on the ability of the prediction
ability of the previous byte UTF8-prefix on the next byte. The
prediction ability is calculated as Shannon entropy. Here we need
@@ -557,136 +209,16 @@ func initCommandPrefixCodes(cmd_depths []byte, cmd_bits []uint16, cmd_code []byt
coding. */
var kStaticContextMapContinuation = [64]uint32{
- 1,
- 1,
- 2,
- 2,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
+ 1, 1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
}
var kStaticContextMapSimpleUTF8 = [64]uint32{
- 0,
- 0,
- 1,
- 1,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
+ 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
}
func chooseContextMap(quality int, bigram_histo []uint32, num_literal_contexts *uint, literal_context_map *[]uint32) {
@@ -738,70 +270,22 @@ func chooseContextMap(quality int, bigram_histo []uint32, num_literal_contexts *
first 5 bits of literals. */
var kStaticContextMapComplexUTF8 = [64]uint32{
- 11,
- 11,
- 12,
- 12,
- 0,
- 0,
- 0,
- 0,
- 1,
- 1,
- 9,
- 9,
- 2,
- 2,
- 2,
- 2,
- 1,
- 1,
- 1,
- 1,
- 8,
- 3,
- 3,
- 3,
- 1,
- 1,
- 1,
- 1,
- 2,
- 2,
- 2,
- 2,
- 8,
- 4,
- 4,
- 4,
- 8,
- 7,
- 4,
- 4,
- 8,
- 0,
- 0,
- 0,
- 3,
- 3,
- 3,
- 3,
- 5,
- 5,
- 10,
- 5,
- 5,
- 5,
- 10,
- 5,
- 6,
- 6,
- 6,
- 6,
- 6,
- 6,
- 6,
- 6,
+ 11, 11, 12, 12, /* 0 special */
+ 0, 0, 0, 0, /* 4 lf */
+ 1, 1, 9, 9, /* 8 space */
+ 2, 2, 2, 2, /* !, first after space/lf and after something else. */
+ 1, 1, 1, 1, /* " */
+ 8, 3, 3, 3, /* % */
+ 1, 1, 1, 1, /* ({[ */
+ 2, 2, 2, 2, /* }]) */
+ 8, 4, 4, 4, /* :; */
+ 8, 7, 4, 4, /* . */
+ 8, 0, 0, 0, /* > */
+ 3, 3, 3, 3, /* [0..9] */
+ 5, 5, 10, 5, /* [A-Z] */
+ 5, 5, 10, 5,
+ 6, 6, 6, 6, /* [a-z] */
+ 6, 6, 6, 6,
}
func shouldUseComplexStaticContextMap(input []byte, start_pos uint, length uint, mask uint, quality int, size_hint uint, num_literal_contexts *uint, literal_context_map *[]uint32) bool {
@@ -933,40 +417,34 @@ func chooseContextMode(params *encoderParams, data []byte, pos uint, mask uint,
return contextUTF8
}
-func writeMetaBlockInternal(data []byte, mask uint, last_flush_pos uint64, bytes uint, is_last bool, literal_context_mode int, params *encoderParams, prev_byte byte, prev_byte2 byte, num_literals uint, num_commands uint, commands []command, saved_dist_cache []int, dist_cache []int, storage_ix *uint, storage []byte) {
+func writeMetaBlockInternal(data []byte, mask uint, last_flush_pos uint64, bytes uint, is_last bool, literal_context_mode int, params *encoderParams, prev_byte byte, prev_byte2 byte, num_literals uint, commands []command, saved_dist_cache []int, dist_cache []int, bw *bitWriter) {
var wrapped_last_flush_pos uint32 = wrapPosition(last_flush_pos)
- var last_bytes uint16
- var last_bytes_bits byte
var literal_context_lut contextLUT = getContextLUT(literal_context_mode)
var block_params encoderParams = *params
if bytes == 0 {
/* Write the ISLAST and ISEMPTY bits. */
- writeBits(2, 3, storage_ix, storage)
-
- *storage_ix = (*storage_ix + 7) &^ 7
+ bw.writeBits(2, 3)
+ bw.jumpToByteBoundary()
return
}
- if !shouldCompress_encode(data, mask, last_flush_pos, bytes, num_literals, num_commands) {
+ if !shouldCompress_encode(data, mask, last_flush_pos, bytes, num_literals, uint(len(commands))) {
/* Restore the distance cache, as its last update by
CreateBackwardReferences is now unused. */
copy(dist_cache, saved_dist_cache[:4])
- storeUncompressedMetaBlock(is_last, data, uint(wrapped_last_flush_pos), mask, bytes, storage_ix, storage)
+ storeUncompressedMetaBlock(is_last, data, uint(wrapped_last_flush_pos), mask, bytes, bw)
return
}
- assert(*storage_ix <= 14)
- last_bytes = uint16(storage[1])<<8 | uint16(storage[0])
- last_bytes_bits = byte(*storage_ix)
+ savedPos := bw.getPos()
if params.quality <= maxQualityForStaticEntropyCodes {
- storeMetaBlockFast(data, uint(wrapped_last_flush_pos), bytes, mask, is_last, params, commands, num_commands, storage_ix, storage)
+ storeMetaBlockFast(data, uint(wrapped_last_flush_pos), bytes, mask, is_last, params, commands, bw)
} else if params.quality < minQualityForBlockSplit {
- storeMetaBlockTrivial(data, uint(wrapped_last_flush_pos), bytes, mask, is_last, params, commands, num_commands, storage_ix, storage)
+ storeMetaBlockTrivial(data, uint(wrapped_last_flush_pos), bytes, mask, is_last, params, commands, bw)
} else {
- var mb metaBlockSplit
- initMetaBlockSplit(&mb)
+ mb := getMetaBlockSplit()
if params.quality < minQualityForHqBlockSplitting {
var num_literal_contexts uint = 1
var literal_context_map []uint32 = nil
@@ -974,9 +452,9 @@ func writeMetaBlockInternal(data []byte, mask uint, last_flush_pos uint64, bytes
decideOverLiteralContextModeling(data, uint(wrapped_last_flush_pos), bytes, mask, params.quality, params.size_hint, &num_literal_contexts, &literal_context_map)
}
- buildMetaBlockGreedy(data, uint(wrapped_last_flush_pos), mask, prev_byte, prev_byte2, literal_context_lut, num_literal_contexts, literal_context_map, commands, num_commands, &mb)
+ buildMetaBlockGreedy(data, uint(wrapped_last_flush_pos), mask, prev_byte, prev_byte2, literal_context_lut, num_literal_contexts, literal_context_map, commands, mb)
} else {
- buildMetaBlock(data, uint(wrapped_last_flush_pos), mask, &block_params, prev_byte, prev_byte2, commands, num_commands, literal_context_mode, &mb)
+ buildMetaBlock(data, uint(wrapped_last_flush_pos), mask, &block_params, prev_byte, prev_byte2, commands, literal_context_mode, mb)
}
if params.quality >= minQualityForOptimizeHistograms {
@@ -988,21 +466,18 @@ func writeMetaBlockInternal(data []byte, mask uint, last_flush_pos uint64, bytes
num_effective_dist_codes = numHistogramDistanceSymbols
}
- optimizeHistograms(num_effective_dist_codes, &mb)
+ optimizeHistograms(num_effective_dist_codes, mb)
}
- storeMetaBlock(data, uint(wrapped_last_flush_pos), bytes, mask, prev_byte, prev_byte2, is_last, &block_params, literal_context_mode, commands, num_commands, &mb, storage_ix, storage)
- destroyMetaBlockSplit(&mb)
+ storeMetaBlock(data, uint(wrapped_last_flush_pos), bytes, mask, prev_byte, prev_byte2, is_last, &block_params, literal_context_mode, commands, mb, bw)
+ freeMetaBlockSplit(mb)
}
- if bytes+4 < *storage_ix>>3 {
+ if bytes+4 < bw.getPos()>>3 {
/* Restore the distance cache and last byte. */
copy(dist_cache, saved_dist_cache[:4])
-
- storage[0] = byte(last_bytes)
- storage[1] = byte(last_bytes >> 8)
- *storage_ix = uint(last_bytes_bits)
- storeUncompressedMetaBlock(is_last, data, uint(wrapped_last_flush_pos), mask, bytes, storage_ix, storage)
+ bw.rewind(savedPos)
+ storeUncompressedMetaBlock(is_last, data, uint(wrapped_last_flush_pos), mask, bytes, bw)
}
}
@@ -1035,8 +510,10 @@ func ensureInitialized(s *Writer) bool {
return true
}
- s.last_bytes_bits_ = 0
- s.last_bytes_ = 0
+ s.bw.bits = 0
+ s.bw.nbits = 0
+ s.bw.dst = s.bw.dst[:0]
+
s.remaining_metadata_bytes_ = math.MaxUint32
sanitizeParams(&s.params)
@@ -1052,11 +529,42 @@ func ensureInitialized(s *Writer) bool {
lgwin = brotli_max_int(lgwin, 18)
}
- encodeWindowBits(lgwin, s.params.large_window, &s.last_bytes_, &s.last_bytes_bits_)
+ encodeWindowBits(lgwin, s.params.large_window, &s.bw)
}
if s.params.quality == fastOnePassCompressionQuality {
- initCommandPrefixCodes(s.cmd_depths_[:], s.cmd_bits_[:], s.cmd_code_[:], &s.cmd_code_numbits_)
+ s.cmd_depths_ = [128]byte{
+ 0, 4, 4, 5, 6, 6, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8,
+ 0, 0, 0, 4, 4, 4, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7,
+ 7, 7, 10, 10, 10, 10, 10, 10, 0, 4, 4, 5, 5, 5, 6, 6,
+ 7, 8, 8, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4,
+ 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 7, 7, 7, 8, 10,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ }
+ s.cmd_bits_ = [128]uint16{
+ 0, 0, 8, 9, 3, 35, 7, 71,
+ 39, 103, 23, 47, 175, 111, 239, 31,
+ 0, 0, 0, 4, 12, 2, 10, 6,
+ 13, 29, 11, 43, 27, 59, 87, 55,
+ 15, 79, 319, 831, 191, 703, 447, 959,
+ 0, 14, 1, 25, 5, 21, 19, 51,
+ 119, 159, 95, 223, 479, 991, 63, 575,
+ 127, 639, 383, 895, 255, 767, 511, 1023,
+ 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 27, 59, 7, 39, 23, 55, 30, 1, 17, 9, 25, 5, 0, 8, 4, 12,
+ 2, 10, 6, 21, 13, 29, 3, 19, 11, 15, 47, 31, 95, 63, 127, 255,
+ 767, 2815, 1791, 3839, 511, 2559, 1535, 3583, 1023, 3071, 2047, 4095,
+ }
+ s.cmd_code_ = [512]byte{
+ 0xff, 0x77, 0xd5, 0xbf, 0xe7, 0xde, 0xea, 0x9e, 0x51, 0x5d, 0xde, 0xc6,
+ 0x70, 0x57, 0xbc, 0x58, 0x58, 0x58, 0xd8, 0xd8, 0x58, 0xd5, 0xcb, 0x8c,
+ 0xea, 0xe0, 0xc3, 0x87, 0x1f, 0x83, 0xc1, 0x60, 0x1c, 0x67, 0xb2, 0xaa,
+ 0x06, 0x83, 0xc1, 0x60, 0x30, 0x18, 0xcc, 0xa1, 0xce, 0x88, 0x54, 0x94,
+ 0x46, 0xe1, 0xb0, 0xd0, 0x4e, 0xb2, 0xf7, 0x04, 0x00,
+ }
+ s.cmd_code_numbits_ = 448
}
s.is_initialized_ = true
@@ -1081,33 +589,23 @@ func encoderInitParams(params *encoderParams) {
func encoderInitState(s *Writer) {
encoderInitParams(&s.params)
s.input_pos_ = 0
- s.num_commands_ = 0
+ s.commands = s.commands[:0]
s.num_literals_ = 0
s.last_insert_len_ = 0
s.last_flush_pos_ = 0
s.last_processed_pos_ = 0
s.prev_byte_ = 0
s.prev_byte2_ = 0
- s.storage_size_ = 0
- s.storage_ = nil
- s.hasher_ = nil
- s.large_table_ = nil
- s.large_table_size_ = 0
+ if s.hasher_ != nil {
+ s.hasher_.Common().is_prepared_ = false
+ }
s.cmd_code_numbits_ = 0
- s.command_buf_ = nil
- s.literal_buf_ = nil
- s.next_out_ = nil
- s.available_out_ = 0
- s.total_out_ = 0
s.stream_state_ = streamProcessing
s.is_last_block_emitted_ = false
s.is_initialized_ = false
ringBufferInit(&s.ringbuffer_)
- s.commands_ = nil
- s.cmd_alloc_size_ = 0
-
/* Initialize distance cache. */
s.dist_cache_[0] = 4
@@ -1190,7 +688,7 @@ func updateLastProcessedPos(s *Writer) bool {
}
func extendLastCommand(s *Writer, bytes *uint32, wrapped_last_processed_pos *uint32) {
- var last_command *command = &s.commands_[s.num_commands_-1]
+ var last_command *command = &s.commands[len(s.commands)-1]
var data []byte = s.ringbuffer_.buffer_
var mask uint32 = s.ringbuffer_.mask_
var max_backward_distance uint64 = ((uint64(1)) << s.params.lgwin) - windowGap
@@ -1219,18 +717,17 @@ func extendLastCommand(s *Writer, bytes *uint32, wrapped_last_processed_pos *uin
}
/*
- Processes the accumulated input data and sets |*out_size| to the length of
- the new output meta-block, or to zero if no new output meta-block has been
- created (in this case the processed input data is buffered internally).
- If |*out_size| is positive, |*output| points to the start of the output
- data. If |is_last| or |force_flush| is true, an output meta-block is
+ Processes the accumulated input data and writes
+ the new output meta-block to s.dest, if one has been
+ created (otherwise the processed input data is buffered internally).
+ If |is_last| or |force_flush| is true, an output meta-block is
always created. However, until |is_last| is true encoder may retain up
to 7 bits of the last byte of output. To force encoder to dump the remaining
bits use WriteMetadata() to append an empty meta-data block.
Returns false if the size of the input data is larger than
input_block_size().
*/
-func encodeData(s *Writer, is_last bool, force_flush bool, out_size *uint, output *[]byte) bool {
+func encodeData(s *Writer, is_last bool, force_flush bool) bool {
var delta uint64 = unprocessedInputSize(s)
var bytes uint32 = uint32(delta)
var wrapped_last_processed_pos uint32 = wrapPosition(s.last_processed_pos_)
@@ -1253,60 +750,52 @@ func encodeData(s *Writer, is_last bool, force_flush bool, out_size *uint, outpu
return false
}
- if s.params.quality == fastTwoPassCompressionQuality && s.command_buf_ == nil {
- s.command_buf_ = make([]uint32, kCompressFragmentTwoPassBlockSize)
- s.literal_buf_ = make([]byte, kCompressFragmentTwoPassBlockSize)
+ if s.params.quality == fastTwoPassCompressionQuality {
+ if s.command_buf_ == nil || cap(s.command_buf_) < int(kCompressFragmentTwoPassBlockSize) {
+ s.command_buf_ = make([]uint32, kCompressFragmentTwoPassBlockSize)
+ s.literal_buf_ = make([]byte, kCompressFragmentTwoPassBlockSize)
+ } else {
+ s.command_buf_ = s.command_buf_[:kCompressFragmentTwoPassBlockSize]
+ s.literal_buf_ = s.literal_buf_[:kCompressFragmentTwoPassBlockSize]
+ }
}
if s.params.quality == fastOnePassCompressionQuality || s.params.quality == fastTwoPassCompressionQuality {
- var storage []byte
- var storage_ix uint = uint(s.last_bytes_bits_)
var table_size uint
var table []int
if delta == 0 && !is_last {
/* We have no new input data and we don't have to finish the stream, so
nothing to do. */
- *out_size = 0
-
return true
}
- storage = getBrotliStorage(s, uint(2*bytes+503))
- storage[0] = byte(s.last_bytes_)
- storage[1] = byte(s.last_bytes_ >> 8)
table = getHashTable(s, s.params.quality, uint(bytes), &table_size)
if s.params.quality == fastOnePassCompressionQuality {
- compressFragmentFast(data[wrapped_last_processed_pos&mask:], uint(bytes), is_last, table, table_size, s.cmd_depths_[:], s.cmd_bits_[:], &s.cmd_code_numbits_, s.cmd_code_[:], &storage_ix, storage)
+ compressFragmentFast(data[wrapped_last_processed_pos&mask:], uint(bytes), is_last, table, table_size, s.cmd_depths_[:], s.cmd_bits_[:], &s.cmd_code_numbits_, s.cmd_code_[:], &s.bw)
} else {
- compressFragmentTwoPass(data[wrapped_last_processed_pos&mask:], uint(bytes), is_last, s.command_buf_, s.literal_buf_, table, table_size, &storage_ix, storage)
+ compressFragmentTwoPass(data[wrapped_last_processed_pos&mask:], uint(bytes), is_last, s.command_buf_, s.literal_buf_, table, table_size, &s.bw)
}
- s.last_bytes_ = uint16(storage[storage_ix>>3])
- s.last_bytes_bits_ = byte(storage_ix & 7)
updateLastProcessedPos(s)
- *output = storage[0:]
- *out_size = storage_ix >> 3
+ s.writeOutput(s.bw.dst)
+ s.bw.dst = s.bw.dst[:0]
return true
}
{
/* Theoretical max number of commands is 1 per 2 bytes. */
- var newsize uint = uint(uint32(s.num_commands_) + bytes/2 + 1)
- if newsize > s.cmd_alloc_size_ {
- var new_commands []command
-
+ newsize := len(s.commands) + int(bytes)/2 + 1
+ if newsize > cap(s.commands) {
/* Reserve a bit more memory to allow merging with a next block
without reallocation: that would impact speed. */
- newsize += uint((bytes / 4) + 16)
+ newsize += int(bytes/4) + 16
- s.cmd_alloc_size_ = newsize
- new_commands = make([]command, newsize)
- if s.commands_ != nil {
- copy(new_commands, s.commands_[:s.num_commands_])
- s.commands_ = nil
+ new_commands := make([]command, len(s.commands), newsize)
+ if s.commands != nil {
+ copy(new_commands, s.commands)
}
- s.commands_ = new_commands
+ s.commands = new_commands
}
}
@@ -1314,46 +803,44 @@ func encodeData(s *Writer, is_last bool, force_flush bool, out_size *uint, outpu
literal_context_mode = chooseContextMode(&s.params, data, uint(wrapPosition(s.last_flush_pos_)), uint(mask), uint(s.input_pos_-s.last_flush_pos_))
- if s.num_commands_ != 0 && s.last_insert_len_ == 0 {
+ if len(s.commands) != 0 && s.last_insert_len_ == 0 {
extendLastCommand(s, &bytes, &wrapped_last_processed_pos)
}
if s.params.quality == zopflificationQuality {
assert(s.params.hasher.type_ == 10)
- createZopfliBackwardReferences(uint(bytes), uint(wrapped_last_processed_pos), data, uint(mask), &s.params, s.hasher_.(*h10), s.dist_cache_[:], &s.last_insert_len_, s.commands_[s.num_commands_:], &s.num_commands_, &s.num_literals_)
+ createZopfliBackwardReferences(uint(bytes), uint(wrapped_last_processed_pos), data, uint(mask), &s.params, s.hasher_.(*h10), s.dist_cache_[:], &s.last_insert_len_, &s.commands, &s.num_literals_)
} else if s.params.quality == hqZopflificationQuality {
assert(s.params.hasher.type_ == 10)
- createHqZopfliBackwardReferences(uint(bytes), uint(wrapped_last_processed_pos), data, uint(mask), &s.params, s.hasher_, s.dist_cache_[:], &s.last_insert_len_, s.commands_[s.num_commands_:], &s.num_commands_, &s.num_literals_)
+ createHqZopfliBackwardReferences(uint(bytes), uint(wrapped_last_processed_pos), data, uint(mask), &s.params, s.hasher_, s.dist_cache_[:], &s.last_insert_len_, &s.commands, &s.num_literals_)
} else {
- createBackwardReferences(uint(bytes), uint(wrapped_last_processed_pos), data, uint(mask), &s.params, s.hasher_, s.dist_cache_[:], &s.last_insert_len_, s.commands_[s.num_commands_:], &s.num_commands_, &s.num_literals_)
+ createBackwardReferences(uint(bytes), uint(wrapped_last_processed_pos), data, uint(mask), &s.params, s.hasher_, s.dist_cache_[:], &s.last_insert_len_, &s.commands, &s.num_literals_)
}
{
var max_length uint = maxMetablockSize(&s.params)
var max_literals uint = max_length / 8
- var max_commands uint = max_length / 8
+ max_commands := int(max_length / 8)
var processed_bytes uint = uint(s.input_pos_ - s.last_flush_pos_)
var next_input_fits_metablock bool = (processed_bytes+inputBlockSize(s) <= max_length)
- var should_flush bool = (s.params.quality < minQualityForBlockSplit && s.num_literals_+s.num_commands_ >= maxNumDelayedSymbols)
+ var should_flush bool = (s.params.quality < minQualityForBlockSplit && s.num_literals_+uint(len(s.commands)) >= maxNumDelayedSymbols)
/* If maximal possible additional block doesn't fit metablock, flush now. */
/* TODO: Postpone decision until next block arrives? */
/* If block splitting is not used, then flush as soon as there is some
amount of commands / literals produced. */
- if !is_last && !force_flush && !should_flush && next_input_fits_metablock && s.num_literals_ < max_literals && s.num_commands_ < max_commands {
+ if !is_last && !force_flush && !should_flush && next_input_fits_metablock && s.num_literals_ < max_literals && len(s.commands) < max_commands {
/* Merge with next input block. Everything will happen later. */
if updateLastProcessedPos(s) {
hasherReset(s.hasher_)
}
- *out_size = 0
return true
}
}
/* Create the last insert-only command. */
if s.last_insert_len_ > 0 {
- initInsertCommand(&s.commands_[s.num_commands_], s.last_insert_len_)
- s.num_commands_++
+ s.commands = append(s.commands, makeInsertCommand(s.last_insert_len_))
s.num_literals_ += s.last_insert_len_
s.last_insert_len_ = 0
}
@@ -1361,8 +848,6 @@ func encodeData(s *Writer, is_last bool, force_flush bool, out_size *uint, outpu
if !is_last && s.input_pos_ == s.last_flush_pos_ {
/* We have no new input data and we don't have to finish the stream, so
nothing to do. */
- *out_size = 0
-
return true
}
@@ -1371,13 +856,7 @@ func encodeData(s *Writer, is_last bool, force_flush bool, out_size *uint, outpu
assert(s.input_pos_-s.last_flush_pos_ <= 1<<24)
{
var metablock_size uint32 = uint32(s.input_pos_ - s.last_flush_pos_)
- var storage []byte = getBrotliStorage(s, uint(2*metablock_size+503))
- var storage_ix uint = uint(s.last_bytes_bits_)
- storage[0] = byte(s.last_bytes_)
- storage[1] = byte(s.last_bytes_ >> 8)
- writeMetaBlockInternal(data, uint(mask), s.last_flush_pos_, uint(metablock_size), is_last, literal_context_mode, &s.params, s.prev_byte_, s.prev_byte2_, s.num_literals_, s.num_commands_, s.commands_, s.saved_dist_cache_[:], s.dist_cache_[:], &storage_ix, storage)
- s.last_bytes_ = uint16(storage[storage_ix>>3])
- s.last_bytes_bits_ = byte(storage_ix & 7)
+ writeMetaBlockInternal(data, uint(mask), s.last_flush_pos_, uint(metablock_size), is_last, literal_context_mode, &s.params, s.prev_byte_, s.prev_byte2_, s.num_literals_, s.commands, s.saved_dist_cache_[:], s.dist_cache_[:], &s.bw)
s.last_flush_pos_ = s.input_pos_
if updateLastProcessedPos(s) {
hasherReset(s.hasher_)
@@ -1391,36 +870,29 @@ func encodeData(s *Writer, is_last bool, force_flush bool, out_size *uint, outpu
s.prev_byte2_ = data[uint32(s.last_flush_pos_-2)&mask]
}
- s.num_commands_ = 0
+ s.commands = s.commands[:0]
s.num_literals_ = 0
/* Save the state of the distance cache in case we need to restore it for
emitting an uncompressed block. */
copy(s.saved_dist_cache_[:], s.dist_cache_[:])
- *output = storage[0:]
- *out_size = storage_ix >> 3
+ s.writeOutput(s.bw.dst)
+ s.bw.dst = s.bw.dst[:0]
return true
}
}
-/* Dumps remaining output bits and metadata header to |header|.
- Returns number of produced bytes.
- REQUIRED: |header| should be 8-byte aligned and at least 16 bytes long.
+/* Dumps remaining output bits and metadata header to s.bw.
REQUIRED: |block_size| <= (1 << 24). */
-func writeMetadataHeader(s *Writer, block_size uint, header []byte) uint {
- var storage_ix uint
- storage_ix = uint(s.last_bytes_bits_)
- header[0] = byte(s.last_bytes_)
- header[1] = byte(s.last_bytes_ >> 8)
- s.last_bytes_ = 0
- s.last_bytes_bits_ = 0
-
- writeBits(1, 0, &storage_ix, header)
- writeBits(2, 3, &storage_ix, header)
- writeBits(1, 0, &storage_ix, header)
+func writeMetadataHeader(s *Writer, block_size uint) {
+ bw := &s.bw
+
+ bw.writeBits(1, 0)
+ bw.writeBits(2, 3)
+ bw.writeBits(1, 0)
if block_size == 0 {
- writeBits(2, 0, &storage_ix, header)
+ bw.writeBits(2, 0)
} else {
var nbits uint32
if block_size == 1 {
@@ -1429,95 +901,62 @@ func writeMetadataHeader(s *Writer, block_size uint, header []byte) uint {
nbits = log2FloorNonZero(uint(uint32(block_size)-1)) + 1
}
var nbytes uint32 = (nbits + 7) / 8
- writeBits(2, uint64(nbytes), &storage_ix, header)
- writeBits(uint(8*nbytes), uint64(block_size)-1, &storage_ix, header)
+ bw.writeBits(2, uint64(nbytes))
+ bw.writeBits(uint(8*nbytes), uint64(block_size)-1)
}
- return (storage_ix + 7) >> 3
+ bw.jumpToByteBoundary()
}
func injectBytePaddingBlock(s *Writer) {
- var seal uint32 = uint32(s.last_bytes_)
- var seal_bits uint = uint(s.last_bytes_bits_)
- var destination []byte
- s.last_bytes_ = 0
- s.last_bytes_bits_ = 0
-
/* is_last = 0, data_nibbles = 11, reserved = 0, meta_nibbles = 00 */
- seal |= 0x6 << seal_bits
-
- seal_bits += 6
-
- /* If we have already created storage, then append to it.
- Storage is valid until next block is being compressed. */
- if s.next_out_ != nil {
- destination = s.next_out_[s.available_out_:]
- } else {
- destination = s.tiny_buf_.u8[:]
- s.next_out_ = destination
- }
-
- destination[0] = byte(seal)
- if seal_bits > 8 {
- destination[1] = byte(seal >> 8)
- }
- if seal_bits > 16 {
- destination[2] = byte(seal >> 16)
- }
- s.available_out_ += (seal_bits + 7) >> 3
+ s.bw.writeBits(6, 0x6)
+ s.bw.jumpToByteBoundary()
+ s.writeOutput(s.bw.dst)
+ s.bw.dst = s.bw.dst[:0]
}
func checkFlushComplete(s *Writer) {
- if s.stream_state_ == streamFlushRequested && s.available_out_ == 0 {
+ if s.stream_state_ == streamFlushRequested && s.err == nil {
s.stream_state_ = streamProcessing
- s.next_out_ = nil
}
}
func encoderCompressStreamFast(s *Writer, op int, available_in *uint, next_in *[]byte) bool {
var block_size_limit uint = uint(1) << s.params.lgwin
var buf_size uint = brotli_min_size_t(kCompressFragmentTwoPassBlockSize, brotli_min_size_t(*available_in, block_size_limit))
- var tmp_command_buf []uint32 = nil
var command_buf []uint32 = nil
- var tmp_literal_buf []byte = nil
var literal_buf []byte = nil
if s.params.quality != fastOnePassCompressionQuality && s.params.quality != fastTwoPassCompressionQuality {
return false
}
if s.params.quality == fastTwoPassCompressionQuality {
- if s.command_buf_ == nil && buf_size == kCompressFragmentTwoPassBlockSize {
- s.command_buf_ = make([]uint32, kCompressFragmentTwoPassBlockSize)
- s.literal_buf_ = make([]byte, kCompressFragmentTwoPassBlockSize)
- }
-
- if s.command_buf_ != nil {
- command_buf = s.command_buf_
- literal_buf = s.literal_buf_
+ if s.command_buf_ == nil || cap(s.command_buf_) < int(buf_size) {
+ s.command_buf_ = make([]uint32, buf_size)
+ s.literal_buf_ = make([]byte, buf_size)
} else {
- tmp_command_buf = make([]uint32, buf_size)
- tmp_literal_buf = make([]byte, buf_size)
- command_buf = tmp_command_buf
- literal_buf = tmp_literal_buf
+ s.command_buf_ = s.command_buf_[:buf_size]
+ s.literal_buf_ = s.literal_buf_[:buf_size]
}
+
+ command_buf = s.command_buf_
+ literal_buf = s.literal_buf_
}
for {
- if s.stream_state_ == streamFlushRequested && s.last_bytes_bits_ != 0 {
+ if s.stream_state_ == streamFlushRequested && s.bw.nbits&7 != 0 {
injectBytePaddingBlock(s)
continue
}
- /* Compress block only when internal output buffer is empty, stream is not
+ /* Compress block only when stream is not
finished, there is no pending flush request, and there is either
additional input or pending operation. */
- if s.available_out_ == 0 && s.stream_state_ == streamProcessing && (*available_in != 0 || op != int(operationProcess)) {
+ if s.stream_state_ == streamProcessing && (*available_in != 0 || op != int(operationProcess)) {
var block_size uint = brotli_min_size_t(block_size_limit, *available_in)
var is_last bool = (*available_in == block_size) && (op == int(operationFinish))
var force_flush bool = (*available_in == block_size) && (op == int(operationFlush))
- var max_out_size uint = 2*block_size + 503
- var storage []byte = nil
- var storage_ix uint = uint(s.last_bytes_bits_)
var table_size uint
var table []int
@@ -1526,26 +965,18 @@ func encoderCompressStreamFast(s *Writer, op int, available_in *uint, next_in *[
continue
}
- storage = getBrotliStorage(s, max_out_size)
-
- storage[0] = byte(s.last_bytes_)
- storage[1] = byte(s.last_bytes_ >> 8)
table = getHashTable(s, s.params.quality, block_size, &table_size)
if s.params.quality == fastOnePassCompressionQuality {
- compressFragmentFast(*next_in, block_size, is_last, table, table_size, s.cmd_depths_[:], s.cmd_bits_[:], &s.cmd_code_numbits_, s.cmd_code_[:], &storage_ix, storage)
+ compressFragmentFast(*next_in, block_size, is_last, table, table_size, s.cmd_depths_[:], s.cmd_bits_[:], &s.cmd_code_numbits_, s.cmd_code_[:], &s.bw)
} else {
- compressFragmentTwoPass(*next_in, block_size, is_last, command_buf, literal_buf, table, table_size, &storage_ix, storage)
+ compressFragmentTwoPass(*next_in, block_size, is_last, command_buf, literal_buf, table, table_size, &s.bw)
}
*next_in = (*next_in)[block_size:]
*available_in -= block_size
- var out_bytes uint = storage_ix >> 3
- s.next_out_ = storage
- s.available_out_ = out_bytes
-
- s.last_bytes_ = uint16(storage[storage_ix>>3])
- s.last_bytes_bits_ = byte(storage_ix & 7)
+ s.writeOutput(s.bw.dst)
+ s.bw.dst = s.bw.dst[:0]
if force_flush {
s.stream_state_ = streamFlushRequested
@@ -1559,8 +990,6 @@ func encoderCompressStreamFast(s *Writer, op int, available_in *uint, next_in *[
break
}
- tmp_command_buf = nil
- tmp_literal_buf = nil
checkFlushComplete(s)
return true
}
@@ -1581,17 +1010,13 @@ func processMetadata(s *Writer, available_in *uint, next_in *[]byte) bool {
}
for {
- if s.stream_state_ == streamFlushRequested && s.last_bytes_bits_ != 0 {
+ if s.stream_state_ == streamFlushRequested && s.bw.nbits&7 != 0 {
injectBytePaddingBlock(s)
continue
}
- if s.available_out_ != 0 {
- break
- }
-
if s.input_pos_ != s.last_flush_pos_ {
- var result bool = encodeData(s, false, true, &s.available_out_, &s.next_out_)
+ var result bool = encodeData(s, false, true)
if !result {
return false
}
@@ -1599,8 +1024,9 @@ func processMetadata(s *Writer, available_in *uint, next_in *[]byte) bool {
}
if s.stream_state_ == streamMetadataHead {
- s.next_out_ = s.tiny_buf_.u8[:]
- s.available_out_ = writeMetadataHeader(s, uint(s.remaining_metadata_bytes_), s.next_out_)
+ writeMetadataHeader(s, uint(s.remaining_metadata_bytes_))
+ s.writeOutput(s.bw.dst)
+ s.bw.dst = s.bw.dst[:0]
s.stream_state_ = streamMetadataBody
continue
} else {
@@ -1614,12 +1040,11 @@ func processMetadata(s *Writer, available_in *uint, next_in *[]byte) bool {
/* This guarantees progress in "TakeOutput" workflow. */
var c uint32 = brotli_min_uint32_t(s.remaining_metadata_bytes_, 16)
- s.next_out_ = s.tiny_buf_.u8[:]
- copy(s.next_out_, (*next_in)[:c])
+ copy(s.tiny_buf_.u8[:], (*next_in)[:c])
*next_in = (*next_in)[c:]
*available_in -= uint(c)
s.remaining_metadata_bytes_ -= c
- s.available_out_ = uint(c)
+ s.writeOutput(s.tiny_buf_.u8[:c])
continue
}
@@ -1687,20 +1112,20 @@ func encoderCompressStream(s *Writer, op int, available_in *uint, next_in *[]byt
continue
}
- if s.stream_state_ == streamFlushRequested && s.last_bytes_bits_ != 0 {
+ if s.stream_state_ == streamFlushRequested && s.bw.nbits&7 != 0 {
injectBytePaddingBlock(s)
continue
}
- /* Compress data only when internal output buffer is empty, stream is not
+ /* Compress data only when stream is not
finished and there is no pending flush request. */
- if s.available_out_ == 0 && s.stream_state_ == streamProcessing {
+ if s.stream_state_ == streamProcessing {
if remaining_block_size == 0 || op != int(operationProcess) {
var is_last bool = ((*available_in == 0) && op == int(operationFinish))
var force_flush bool = ((*available_in == 0) && op == int(operationFlush))
var result bool
updateSizeHint(s, *available_in)
- result = encodeData(s, is_last, force_flush, &s.available_out_, &s.next_out_)
+ result = encodeData(s, is_last, force_flush)
if !result {
return false
}
@@ -1721,18 +1146,13 @@ func encoderCompressStream(s *Writer, op int, available_in *uint, next_in *[]byt
return true
}
-func encoderHasMoreOutput(s *Writer) bool {
- return s.available_out_ != 0
-}
-
-func encoderTakeOutput(s *Writer) []byte {
- if s.available_out_ == 0 {
- return nil
+func (w *Writer) writeOutput(data []byte) {
+ if w.err != nil {
+ return
}
- result := s.next_out_[:s.available_out_]
- s.total_out_ += s.available_out_
- s.available_out_ = 0
- checkFlushComplete(s)
- return result
+ _, w.err = w.dst.Write(data)
+ if w.err == nil {
+ checkFlushComplete(w)
+ }
}
diff --git a/vendor/github.com/andybalholm/brotli/entropy_encode.go b/vendor/github.com/andybalholm/brotli/entropy_encode.go
index d0c1dca250a..3f469a3dd94 100644
--- a/vendor/github.com/andybalholm/brotli/entropy_encode.go
+++ b/vendor/github.com/andybalholm/brotli/entropy_encode.go
@@ -24,7 +24,7 @@ func initHuffmanTree(self *huffmanTree, count uint32, left int16, right int16) {
}
/* Input size optimized Shell sort. */
-type huffmanTreeComparator func(*huffmanTree, *huffmanTree) bool
+type huffmanTreeComparator func(huffmanTree, huffmanTree) bool
var sortHuffmanTreeItems_gaps = []uint{132, 57, 23, 10, 4, 1}
@@ -36,14 +36,13 @@ func sortHuffmanTreeItems(items []huffmanTree, n uint, comparator huffmanTreeCom
var tmp huffmanTree = items[i]
var k uint = i
var j uint = i - 1
- for comparator(&tmp, &items[j]) {
+ for comparator(tmp, items[j]) {
items[k] = items[j]
k = j
- tmp10 := j
- j--
- if tmp10 == 0 {
+ if j == 0 {
break
}
+ j--
}
items[k] = tmp
@@ -63,7 +62,7 @@ func sortHuffmanTreeItems(items []huffmanTree, n uint, comparator huffmanTreeCom
for i = gap; i < n; i++ {
var j uint = i
var tmp huffmanTree = items[i]
- for ; j >= gap && comparator(&tmp, &items[j-gap]); j -= gap {
+ for ; j >= gap && comparator(tmp, items[j-gap]); j -= gap {
items[j] = items[j-gap]
}
@@ -105,7 +104,7 @@ func setDepth(p0 int, pool []huffmanTree, depth []byte, max_depth int) bool {
}
/* Sort the root nodes, least popular first. */
-func sortHuffmanTree(v0 *huffmanTree, v1 *huffmanTree) bool {
+func sortHuffmanTree(v0 huffmanTree, v1 huffmanTree) bool {
if v0.total_count_ != v1.total_count_ {
return v0.total_count_ < v1.total_count_
}
diff --git a/vendor/github.com/andybalholm/brotli/entropy_encode_static.go b/vendor/github.com/andybalholm/brotli/entropy_encode_static.go
index 5ddf3fcbaef..2543f8f07d0 100644
--- a/vendor/github.com/andybalholm/brotli/entropy_encode_static.go
+++ b/vendor/github.com/andybalholm/brotli/entropy_encode_static.go
@@ -778,8 +778,9 @@ var kStaticDistanceCodeDepth = [64]byte{
var kCodeLengthBits = [18]uint32{0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 15, 31, 0, 11, 7}
-func storeStaticCodeLengthCode(storage_ix *uint, storage []byte) {
- writeBits(40, 0x0000FF55555554, storage_ix, storage)
+func storeStaticCodeLengthCode(bw *bitWriter) {
+ bw.writeBits(32, 0x55555554)
+ bw.writeBits(8, 0xFF)
}
var kZeroRepsBits = [numCommandSymbols]uint64{
@@ -4317,9 +4318,10 @@ var kStaticCommandCodeBits = [numCommandSymbols]uint16{
2047,
}
-func storeStaticCommandHuffmanTree(storage_ix *uint, storage []byte) {
- writeBits(56, 0x92624416307003, storage_ix, storage)
- writeBits(3, 0x00000000, storage_ix, storage)
+func storeStaticCommandHuffmanTree(bw *bitWriter) {
+ bw.writeBits(32, 0x16307003)
+ bw.writeBits(24, 0x926244)
+ bw.writeBits(3, 0x00000000)
}
var kStaticDistanceCodeBits = [64]uint16{
@@ -4389,6 +4391,6 @@ var kStaticDistanceCodeBits = [64]uint16{
63,
}
-func storeStaticDistanceHuffmanTree(storage_ix *uint, storage []byte) {
- writeBits(28, 0x0369DC03, storage_ix, storage)
+func storeStaticDistanceHuffmanTree(bw *bitWriter) {
+ bw.writeBits(28, 0x0369DC03)
}
diff --git a/vendor/github.com/andybalholm/brotli/find_match_length.go b/vendor/github.com/andybalholm/brotli/find_match_length.go
index 14d350aa59a..09d2ae67268 100644
--- a/vendor/github.com/andybalholm/brotli/find_match_length.go
+++ b/vendor/github.com/andybalholm/brotli/find_match_length.go
@@ -1,5 +1,11 @@
package brotli
+import (
+ "encoding/binary"
+ "math/bits"
+ "runtime"
+)
+
/* Copyright 2010 Google Inc. All Rights Reserved.
Distributed under MIT license.
@@ -9,6 +15,29 @@ package brotli
/* Function to find maximal matching prefixes of strings. */
func findMatchLengthWithLimit(s1 []byte, s2 []byte, limit uint) uint {
var matched uint = 0
+ _, _ = s1[limit-1], s2[limit-1] // bounds check
+ switch runtime.GOARCH {
+ case "amd64":
+ // Compare 8 bytes at at time.
+ for matched+8 <= limit {
+ w1 := binary.LittleEndian.Uint64(s1[matched:])
+ w2 := binary.LittleEndian.Uint64(s2[matched:])
+ if w1 != w2 {
+ return matched + uint(bits.TrailingZeros64(w1^w2)>>3)
+ }
+ matched += 8
+ }
+ case "386":
+ // Compare 4 bytes at at time.
+ for matched+4 <= limit {
+ w1 := binary.LittleEndian.Uint32(s1[matched:])
+ w2 := binary.LittleEndian.Uint32(s2[matched:])
+ if w1 != w2 {
+ return matched + uint(bits.TrailingZeros32(w1^w2)>>3)
+ }
+ matched += 4
+ }
+ }
for matched < limit && s1[matched] == s2[matched] {
matched++
}
diff --git a/vendor/github.com/andybalholm/brotli/go.mod b/vendor/github.com/andybalholm/brotli/go.mod
index cb505fa05ab..8e609842f33 100644
--- a/vendor/github.com/andybalholm/brotli/go.mod
+++ b/vendor/github.com/andybalholm/brotli/go.mod
@@ -1,5 +1,3 @@
module github.com/andybalholm/brotli
go 1.12
-
-require github.com/golang/gddo v0.0.0-20190419222130-af0f2af80721
diff --git a/vendor/github.com/andybalholm/brotli/go.sum b/vendor/github.com/andybalholm/brotli/go.sum
index 2ed61f3836d..e69de29bb2d 100644
--- a/vendor/github.com/andybalholm/brotli/go.sum
+++ b/vendor/github.com/andybalholm/brotli/go.sum
@@ -1,2 +0,0 @@
-github.com/golang/gddo v0.0.0-20190419222130-af0f2af80721 h1:KRMr9A3qfbVM7iV/WcLY/rL5LICqwMHLhwRXKu99fXw=
-github.com/golang/gddo v0.0.0-20190419222130-af0f2af80721/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4=
diff --git a/vendor/github.com/andybalholm/brotli/histogram.go b/vendor/github.com/andybalholm/brotli/histogram.go
index f208ff74b97..0346622beb3 100644
--- a/vendor/github.com/andybalholm/brotli/histogram.go
+++ b/vendor/github.com/andybalholm/brotli/histogram.go
@@ -163,7 +163,7 @@ func initBlockSplitIterator(self *blockSplitIterator, split *blockSplit) {
self.split_ = split
self.idx_ = 0
self.type_ = 0
- if split.lengths != nil {
+ if len(split.lengths) > 0 {
self.length_ = uint(split.lengths[0])
} else {
self.length_ = 0
@@ -180,17 +180,16 @@ func blockSplitIteratorNext(self *blockSplitIterator) {
self.length_--
}
-func buildHistogramsWithContext(cmds []command, num_commands uint, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit, ringbuffer []byte, start_pos uint, mask uint, prev_byte byte, prev_byte2 byte, context_modes []int, literal_histograms []histogramLiteral, insert_and_copy_histograms []histogramCommand, copy_dist_histograms []histogramDistance) {
+func buildHistogramsWithContext(cmds []command, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit, ringbuffer []byte, start_pos uint, mask uint, prev_byte byte, prev_byte2 byte, context_modes []int, literal_histograms []histogramLiteral, insert_and_copy_histograms []histogramCommand, copy_dist_histograms []histogramDistance) {
var pos uint = start_pos
var literal_it blockSplitIterator
var insert_and_copy_it blockSplitIterator
var dist_it blockSplitIterator
- var i uint
initBlockSplitIterator(&literal_it, literal_split)
initBlockSplitIterator(&insert_and_copy_it, insert_and_copy_split)
initBlockSplitIterator(&dist_it, dist_split)
- for i = 0; i < num_commands; i++ {
+ for i := range cmds {
var cmd *command = &cmds[i]
var j uint
blockSplitIteratorNext(&insert_and_copy_it)
diff --git a/vendor/github.com/andybalholm/brotli/http.go b/vendor/github.com/andybalholm/brotli/http.go
new file mode 100644
index 00000000000..af58670f2cf
--- /dev/null
+++ b/vendor/github.com/andybalholm/brotli/http.go
@@ -0,0 +1,192 @@
+package brotli
+
+import (
+ "compress/gzip"
+ "io"
+ "net/http"
+ "strings"
+)
+
+// HTTPCompressor chooses a compression method (brotli, gzip, or none) based on
+// the Accept-Encoding header, sets the Content-Encoding header, and returns a
+// WriteCloser that implements that compression. The Close method must be called
+// before the current HTTP handler returns.
+//
+// Due to https://github.com/golang/go/issues/31753, the response will not be
+// compressed unless you set a Content-Type header before you call
+// HTTPCompressor.
+func HTTPCompressor(w http.ResponseWriter, r *http.Request) io.WriteCloser {
+ if w.Header().Get("Content-Type") == "" {
+ return nopCloser{w}
+ }
+
+ if w.Header().Get("Vary") == "" {
+ w.Header().Set("Vary", "Accept-Encoding")
+ }
+
+ encoding := negotiateContentEncoding(r, []string{"br", "gzip"})
+ switch encoding {
+ case "br":
+ w.Header().Set("Content-Encoding", "br")
+ return NewWriter(w)
+ case "gzip":
+ w.Header().Set("Content-Encoding", "gzip")
+ return gzip.NewWriter(w)
+ }
+ return nopCloser{w}
+}
+
+// negotiateContentEncoding returns the best offered content encoding for the
+// request's Accept-Encoding header. If two offers match with equal weight and
+// then the offer earlier in the list is preferred. If no offers are
+// acceptable, then "" is returned.
+func negotiateContentEncoding(r *http.Request, offers []string) string {
+ bestOffer := "identity"
+ bestQ := -1.0
+ specs := parseAccept(r.Header, "Accept-Encoding")
+ for _, offer := range offers {
+ for _, spec := range specs {
+ if spec.Q > bestQ &&
+ (spec.Value == "*" || spec.Value == offer) {
+ bestQ = spec.Q
+ bestOffer = offer
+ }
+ }
+ }
+ if bestQ == 0 {
+ bestOffer = ""
+ }
+ return bestOffer
+}
+
+// acceptSpec describes an Accept* header.
+type acceptSpec struct {
+ Value string
+ Q float64
+}
+
+// parseAccept parses Accept* headers.
+func parseAccept(header http.Header, key string) (specs []acceptSpec) {
+loop:
+ for _, s := range header[key] {
+ for {
+ var spec acceptSpec
+ spec.Value, s = expectTokenSlash(s)
+ if spec.Value == "" {
+ continue loop
+ }
+ spec.Q = 1.0
+ s = skipSpace(s)
+ if strings.HasPrefix(s, ";") {
+ s = skipSpace(s[1:])
+ if !strings.HasPrefix(s, "q=") {
+ continue loop
+ }
+ spec.Q, s = expectQuality(s[2:])
+ if spec.Q < 0.0 {
+ continue loop
+ }
+ }
+ specs = append(specs, spec)
+ s = skipSpace(s)
+ if !strings.HasPrefix(s, ",") {
+ continue loop
+ }
+ s = skipSpace(s[1:])
+ }
+ }
+ return
+}
+
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isSpace == 0 {
+ break
+ }
+ }
+ return s[i:]
+}
+
+func expectTokenSlash(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ b := s[i]
+ if (octetTypes[b]&isToken == 0) && b != '/' {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+func expectQuality(s string) (q float64, rest string) {
+ switch {
+ case len(s) == 0:
+ return -1, ""
+ case s[0] == '0':
+ q = 0
+ case s[0] == '1':
+ q = 1
+ default:
+ return -1, ""
+ }
+ s = s[1:]
+ if !strings.HasPrefix(s, ".") {
+ return q, s
+ }
+ s = s[1:]
+ i := 0
+ n := 0
+ d := 1
+ for ; i < len(s); i++ {
+ b := s[i]
+ if b < '0' || b > '9' {
+ break
+ }
+ n = n*10 + int(b) - '0'
+ d *= 10
+ }
+ return q + float64(n)/float64(d), s[i:]
+}
+
+// Octet types from RFC 2616.
+var octetTypes [256]octetType
+
+type octetType byte
+
+const (
+ isToken octetType = 1 << iota
+ isSpace
+)
+
+func init() {
+ // OCTET =
+ // CHAR =
+ // CTL =
+ // CR =
+ // LF =
+ // SP =
+ // HT =
+ // <"> =
+ // CRLF = CR LF
+ // LWS = [CRLF] 1*( SP | HT )
+ // TEXT =
+ // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
+ // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
+ // token = 1*
+ // qdtext = >
+
+ for c := 0; c < 256; c++ {
+ var t octetType
+ isCtl := c <= 31 || c == 127
+ isChar := 0 <= c && c <= 127
+ isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
+ if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
+ t |= isSpace
+ }
+ if isChar && !isCtl && !isSeparator {
+ t |= isToken
+ }
+ octetTypes[c] = t
+ }
+}
diff --git a/vendor/github.com/andybalholm/brotli/memory.go b/vendor/github.com/andybalholm/brotli/memory.go
index 7208a3bbf6f..a07c7050a07 100644
--- a/vendor/github.com/andybalholm/brotli/memory.go
+++ b/vendor/github.com/andybalholm/brotli/memory.go
@@ -23,12 +23,18 @@ func brotli_ensure_capacity_uint8_t(a *[]byte, c *uint, r uint) {
for new_size < r {
new_size *= 2
}
- var new_array []byte = make([]byte, new_size)
- if *c != 0 {
- copy(new_array, (*a)[:*c])
+
+ if cap(*a) < int(new_size) {
+ var new_array []byte = make([]byte, new_size)
+ if *c != 0 {
+ copy(new_array, (*a)[:*c])
+ }
+
+ *a = new_array
+ } else {
+ *a = (*a)[:new_size]
}
- *a = new_array
*c = new_size
}
}
@@ -45,12 +51,16 @@ func brotli_ensure_capacity_uint32_t(a *[]uint32, c *uint, r uint) {
new_size *= 2
}
- new_array = make([]uint32, new_size)
- if *c != 0 {
- copy(new_array, (*a)[:*c])
- }
+ if cap(*a) < int(new_size) {
+ new_array = make([]uint32, new_size)
+ if *c != 0 {
+ copy(new_array, (*a)[:*c])
+ }
- *a = new_array
+ *a = new_array
+ } else {
+ *a = (*a)[:new_size]
+ }
*c = new_size
}
}
diff --git a/vendor/github.com/andybalholm/brotli/metablock.go b/vendor/github.com/andybalholm/brotli/metablock.go
index 4a412cf4e24..3014df8cdf1 100644
--- a/vendor/github.com/andybalholm/brotli/metablock.go
+++ b/vendor/github.com/andybalholm/brotli/metablock.go
@@ -1,5 +1,9 @@
package brotli
+import (
+ "sync"
+)
+
/* Copyright 2014 Google Inc. All Rights Reserved.
Distributed under MIT license.
@@ -25,31 +29,30 @@ type metaBlockSplit struct {
distance_histograms_size uint
}
-func initMetaBlockSplit(mb *metaBlockSplit) {
- initBlockSplit(&mb.literal_split)
- initBlockSplit(&mb.command_split)
- initBlockSplit(&mb.distance_split)
- mb.literal_context_map = nil
- mb.literal_context_map_size = 0
- mb.distance_context_map = nil
- mb.distance_context_map_size = 0
- mb.literal_histograms = nil
- mb.literal_histograms_size = 0
- mb.command_histograms = nil
- mb.command_histograms_size = 0
- mb.distance_histograms = nil
- mb.distance_histograms_size = 0
+var metaBlockPool sync.Pool
+
+func getMetaBlockSplit() *metaBlockSplit {
+ mb, _ := metaBlockPool.Get().(*metaBlockSplit)
+
+ if mb == nil {
+ mb = &metaBlockSplit{}
+ } else {
+ initBlockSplit(&mb.literal_split)
+ initBlockSplit(&mb.command_split)
+ initBlockSplit(&mb.distance_split)
+ mb.literal_context_map = mb.literal_context_map[:0]
+ mb.literal_context_map_size = 0
+ mb.distance_context_map = mb.distance_context_map[:0]
+ mb.distance_context_map_size = 0
+ mb.literal_histograms = mb.literal_histograms[:0]
+ mb.command_histograms = mb.command_histograms[:0]
+ mb.distance_histograms = mb.distance_histograms[:0]
+ }
+ return mb
}
-func destroyMetaBlockSplit(mb *metaBlockSplit) {
- destroyBlockSplit(&mb.literal_split)
- destroyBlockSplit(&mb.command_split)
- destroyBlockSplit(&mb.distance_split)
- mb.literal_context_map = nil
- mb.distance_context_map = nil
- mb.literal_histograms = nil
- mb.command_histograms = nil
- mb.distance_histograms = nil
+func freeMetaBlockSplit(mb *metaBlockSplit) {
+ metaBlockPool.Put(mb)
}
func initDistanceParams(params *encoderParams, npostfix uint32, ndirect uint32) {
@@ -84,14 +87,12 @@ func initDistanceParams(params *encoderParams, npostfix uint32, ndirect uint32)
dist_params.max_distance = uint(max_distance)
}
-func recomputeDistancePrefixes(cmds []command, num_commands uint, orig_params *distanceParams, new_params *distanceParams) {
- var i uint
-
+func recomputeDistancePrefixes(cmds []command, orig_params *distanceParams, new_params *distanceParams) {
if orig_params.distance_postfix_bits == new_params.distance_postfix_bits && orig_params.num_direct_distance_codes == new_params.num_direct_distance_codes {
return
}
- for i = 0; i < num_commands; i++ {
+ for i := range cmds {
var cmd *command = &cmds[i]
if commandCopyLen(cmd) != 0 && cmd.cmd_prefix_ >= 128 {
prefixEncodeCopyDistance(uint(commandRestoreDistanceCode(cmd, orig_params)), uint(new_params.num_direct_distance_codes), uint(new_params.distance_postfix_bits), &cmd.dist_prefix_, &cmd.dist_extra_)
@@ -99,8 +100,7 @@ func recomputeDistancePrefixes(cmds []command, num_commands uint, orig_params *d
}
}
-func computeDistanceCost(cmds []command, num_commands uint, orig_params *distanceParams, new_params *distanceParams, cost *float64) bool {
- var i uint
+func computeDistanceCost(cmds []command, orig_params *distanceParams, new_params *distanceParams, cost *float64) bool {
var equal_params bool = false
var dist_prefix uint16
var dist_extra uint32
@@ -112,8 +112,8 @@ func computeDistanceCost(cmds []command, num_commands uint, orig_params *distanc
equal_params = true
}
- for i = 0; i < num_commands; i++ {
- var cmd *command = &cmds[i]
+ for i := range cmds {
+ cmd := &cmds[i]
if commandCopyLen(cmd) != 0 && cmd.cmd_prefix_ >= 128 {
if equal_params {
dist_prefix = cmd.dist_prefix_
@@ -137,7 +137,7 @@ func computeDistanceCost(cmds []command, num_commands uint, orig_params *distanc
var buildMetaBlock_kMaxNumberOfHistograms uint = 256
-func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParams, prev_byte byte, prev_byte2 byte, cmds []command, num_commands uint, literal_context_mode int, mb *metaBlockSplit) {
+func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParams, prev_byte byte, prev_byte2 byte, cmds []command, literal_context_mode int, mb *metaBlockSplit) {
var distance_histograms []histogramDistance
var literal_histograms []histogramLiteral
var literal_context_modes []int = nil
@@ -164,7 +164,7 @@ func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParam
check_orig = false
}
- skip = !computeDistanceCost(cmds, num_commands, &orig_params.dist, &new_params.dist, &dist_cost)
+ skip = !computeDistanceCost(cmds, &orig_params.dist, &new_params.dist, &dist_cost)
if skip || (dist_cost > best_dist_cost) {
break
}
@@ -181,7 +181,7 @@ func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParam
if check_orig {
var dist_cost float64
- computeDistanceCost(cmds, num_commands, &orig_params.dist, &orig_params.dist, &dist_cost)
+ computeDistanceCost(cmds, &orig_params.dist, &orig_params.dist, &dist_cost)
if dist_cost < best_dist_cost {
/* NB: currently unused; uncomment when more param tuning is added. */
/* best_dist_cost = dist_cost; */
@@ -189,9 +189,9 @@ func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParam
}
}
- recomputeDistancePrefixes(cmds, num_commands, &orig_params.dist, ¶ms.dist)
+ recomputeDistancePrefixes(cmds, &orig_params.dist, ¶ms.dist)
- splitBlock(cmds, num_commands, ringbuffer, pos, mask, params, &mb.literal_split, &mb.command_split, &mb.distance_split)
+ splitBlock(cmds, ringbuffer, pos, mask, params, &mb.literal_split, &mb.command_split, &mb.distance_split)
if !params.disable_literal_context_modeling {
literal_context_multiplier = 1 << literalContextBits
@@ -209,21 +209,30 @@ func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParam
distance_histograms = make([]histogramDistance, distance_histograms_size)
clearHistogramsDistance(distance_histograms, distance_histograms_size)
- assert(mb.command_histograms == nil)
mb.command_histograms_size = mb.command_split.num_types
- mb.command_histograms = make([]histogramCommand, (mb.command_histograms_size))
+ if cap(mb.command_histograms) < int(mb.command_histograms_size) {
+ mb.command_histograms = make([]histogramCommand, (mb.command_histograms_size))
+ } else {
+ mb.command_histograms = mb.command_histograms[:mb.command_histograms_size]
+ }
clearHistogramsCommand(mb.command_histograms, mb.command_histograms_size)
- buildHistogramsWithContext(cmds, num_commands, &mb.literal_split, &mb.command_split, &mb.distance_split, ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_modes, literal_histograms, mb.command_histograms, distance_histograms)
+ buildHistogramsWithContext(cmds, &mb.literal_split, &mb.command_split, &mb.distance_split, ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_modes, literal_histograms, mb.command_histograms, distance_histograms)
literal_context_modes = nil
- assert(mb.literal_context_map == nil)
mb.literal_context_map_size = mb.literal_split.num_types << literalContextBits
- mb.literal_context_map = make([]uint32, (mb.literal_context_map_size))
+ if cap(mb.literal_context_map) < int(mb.literal_context_map_size) {
+ mb.literal_context_map = make([]uint32, (mb.literal_context_map_size))
+ } else {
+ mb.literal_context_map = mb.literal_context_map[:mb.literal_context_map_size]
+ }
- assert(mb.literal_histograms == nil)
mb.literal_histograms_size = mb.literal_context_map_size
- mb.literal_histograms = make([]histogramLiteral, (mb.literal_histograms_size))
+ if cap(mb.literal_histograms) < int(mb.literal_histograms_size) {
+ mb.literal_histograms = make([]histogramLiteral, (mb.literal_histograms_size))
+ } else {
+ mb.literal_histograms = mb.literal_histograms[:mb.literal_histograms_size]
+ }
clusterHistogramsLiteral(literal_histograms, literal_histograms_size, buildMetaBlock_kMaxNumberOfHistograms, mb.literal_histograms, &mb.literal_histograms_size, mb.literal_context_map)
literal_histograms = nil
@@ -239,13 +248,19 @@ func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParam
}
}
- assert(mb.distance_context_map == nil)
mb.distance_context_map_size = mb.distance_split.num_types << distanceContextBits
- mb.distance_context_map = make([]uint32, (mb.distance_context_map_size))
+ if cap(mb.distance_context_map) < int(mb.distance_context_map_size) {
+ mb.distance_context_map = make([]uint32, (mb.distance_context_map_size))
+ } else {
+ mb.distance_context_map = mb.distance_context_map[:mb.distance_context_map_size]
+ }
- assert(mb.distance_histograms == nil)
mb.distance_histograms_size = mb.distance_context_map_size
- mb.distance_histograms = make([]histogramDistance, (mb.distance_histograms_size))
+ if cap(mb.distance_histograms) < int(mb.distance_histograms_size) {
+ mb.distance_histograms = make([]histogramDistance, (mb.distance_histograms_size))
+ } else {
+ mb.distance_histograms = mb.distance_histograms[:mb.distance_histograms_size]
+ }
clusterHistogramsDistance(distance_histograms, mb.distance_context_map_size, buildMetaBlock_kMaxNumberOfHistograms, mb.distance_histograms, &mb.distance_histograms_size, mb.distance_context_map)
distance_histograms = nil
@@ -298,9 +313,12 @@ func initContextBlockSplitter(self *contextBlockSplitter, alphabet_size uint, nu
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
split.num_blocks = max_num_blocks
- assert(*histograms == nil)
*histograms_size = max_num_types * num_contexts
- *histograms = make([]histogramLiteral, (*histograms_size))
+ if histograms == nil || cap(*histograms) < int(*histograms_size) {
+ *histograms = make([]histogramLiteral, (*histograms_size))
+ } else {
+ *histograms = (*histograms)[:*histograms_size]
+ }
self.histograms_ = *histograms
/* Clear only current histogram. */
@@ -453,9 +471,12 @@ func contextBlockSplitterAddSymbol(self *contextBlockSplitter, symbol uint, cont
func mapStaticContexts(num_contexts uint, static_context_map []uint32, mb *metaBlockSplit) {
var i uint
- assert(mb.literal_context_map == nil)
mb.literal_context_map_size = mb.literal_split.num_types << literalContextBits
- mb.literal_context_map = make([]uint32, (mb.literal_context_map_size))
+ if cap(mb.literal_context_map) < int(mb.literal_context_map_size) {
+ mb.literal_context_map = make([]uint32, (mb.literal_context_map_size))
+ } else {
+ mb.literal_context_map = mb.literal_context_map[:mb.literal_context_map_size]
+ }
for i = 0; i < mb.literal_split.num_types; i++ {
var offset uint32 = uint32(i * num_contexts)
@@ -466,7 +487,7 @@ func mapStaticContexts(num_contexts uint, static_context_map []uint32, mb *metaB
}
}
-func buildMetaBlockGreedyInternal(ringbuffer []byte, pos uint, mask uint, prev_byte byte, prev_byte2 byte, literal_context_lut contextLUT, num_contexts uint, static_context_map []uint32, commands []command, n_commands uint, mb *metaBlockSplit) {
+func buildMetaBlockGreedyInternal(ringbuffer []byte, pos uint, mask uint, prev_byte byte, prev_byte2 byte, literal_context_lut contextLUT, num_contexts uint, static_context_map []uint32, commands []command, mb *metaBlockSplit) {
var lit_blocks struct {
plain blockSplitterLiteral
ctx contextBlockSplitter
@@ -474,8 +495,7 @@ func buildMetaBlockGreedyInternal(ringbuffer []byte, pos uint, mask uint, prev_b
var cmd_blocks blockSplitterCommand
var dist_blocks blockSplitterDistance
var num_literals uint = 0
- var i uint
- for i = 0; i < n_commands; i++ {
+ for i := range commands {
num_literals += uint(commands[i].insert_len_)
}
@@ -485,11 +505,10 @@ func buildMetaBlockGreedyInternal(ringbuffer []byte, pos uint, mask uint, prev_b
initContextBlockSplitter(&lit_blocks.ctx, 256, num_contexts, 512, 400.0, num_literals, &mb.literal_split, &mb.literal_histograms, &mb.literal_histograms_size)
}
- initBlockSplitterCommand(&cmd_blocks, numCommandSymbols, 1024, 500.0, n_commands, &mb.command_split, &mb.command_histograms, &mb.command_histograms_size)
- initBlockSplitterDistance(&dist_blocks, 64, 512, 100.0, n_commands, &mb.distance_split, &mb.distance_histograms, &mb.distance_histograms_size)
+ initBlockSplitterCommand(&cmd_blocks, numCommandSymbols, 1024, 500.0, uint(len(commands)), &mb.command_split, &mb.command_histograms, &mb.command_histograms_size)
+ initBlockSplitterDistance(&dist_blocks, 64, 512, 100.0, uint(len(commands)), &mb.distance_split, &mb.distance_histograms, &mb.distance_histograms_size)
- for i = 0; i < n_commands; i++ {
- var cmd command = commands[i]
+ for _, cmd := range commands {
var j uint
blockSplitterAddSymbolCommand(&cmd_blocks, uint(cmd.cmd_prefix_))
for j = uint(cmd.insert_len_); j != 0; j-- {
@@ -530,11 +549,11 @@ func buildMetaBlockGreedyInternal(ringbuffer []byte, pos uint, mask uint, prev_b
}
}
-func buildMetaBlockGreedy(ringbuffer []byte, pos uint, mask uint, prev_byte byte, prev_byte2 byte, literal_context_lut contextLUT, num_contexts uint, static_context_map []uint32, commands []command, n_commands uint, mb *metaBlockSplit) {
+func buildMetaBlockGreedy(ringbuffer []byte, pos uint, mask uint, prev_byte byte, prev_byte2 byte, literal_context_lut contextLUT, num_contexts uint, static_context_map []uint32, commands []command, mb *metaBlockSplit) {
if num_contexts == 1 {
- buildMetaBlockGreedyInternal(ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_lut, 1, nil, commands, n_commands, mb)
+ buildMetaBlockGreedyInternal(ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_lut, 1, nil, commands, mb)
} else {
- buildMetaBlockGreedyInternal(ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_lut, num_contexts, static_context_map, commands, n_commands, mb)
+ buildMetaBlockGreedyInternal(ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_lut, num_contexts, static_context_map, commands, mb)
}
}
diff --git a/vendor/github.com/andybalholm/brotli/metablock_command.go b/vendor/github.com/andybalholm/brotli/metablock_command.go
index d47541c5e6a..14c7b77135d 100644
--- a/vendor/github.com/andybalholm/brotli/metablock_command.go
+++ b/vendor/github.com/andybalholm/brotli/metablock_command.go
@@ -43,9 +43,12 @@ func initBlockSplitterCommand(self *blockSplitterCommand, alphabet_size uint, mi
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
self.split_.num_blocks = max_num_blocks
- assert(*histograms == nil)
*histograms_size = max_num_types
- *histograms = make([]histogramCommand, (*histograms_size))
+ if histograms == nil || cap(*histograms) < int(*histograms_size) {
+ *histograms = make([]histogramCommand, (*histograms_size))
+ } else {
+ *histograms = (*histograms)[:*histograms_size]
+ }
self.histograms_ = *histograms
/* Clear only current histogram. */
diff --git a/vendor/github.com/andybalholm/brotli/metablock_distance.go b/vendor/github.com/andybalholm/brotli/metablock_distance.go
index 95923127a6d..5110a810e96 100644
--- a/vendor/github.com/andybalholm/brotli/metablock_distance.go
+++ b/vendor/github.com/andybalholm/brotli/metablock_distance.go
@@ -43,9 +43,12 @@ func initBlockSplitterDistance(self *blockSplitterDistance, alphabet_size uint,
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
self.split_.num_blocks = max_num_blocks
- assert(*histograms == nil)
*histograms_size = max_num_types
- *histograms = make([]histogramDistance, (*histograms_size))
+ if histograms == nil || cap(*histograms) < int(*histograms_size) {
+ *histograms = make([]histogramDistance, *histograms_size)
+ } else {
+ *histograms = (*histograms)[:*histograms_size]
+ }
self.histograms_ = *histograms
/* Clear only current histogram. */
diff --git a/vendor/github.com/andybalholm/brotli/metablock_literal.go b/vendor/github.com/andybalholm/brotli/metablock_literal.go
index d7e8a7c9233..307f8da88f4 100644
--- a/vendor/github.com/andybalholm/brotli/metablock_literal.go
+++ b/vendor/github.com/andybalholm/brotli/metablock_literal.go
@@ -43,9 +43,12 @@ func initBlockSplitterLiteral(self *blockSplitterLiteral, alphabet_size uint, mi
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
self.split_.num_blocks = max_num_blocks
- assert(*histograms == nil)
*histograms_size = max_num_types
- *histograms = make([]histogramLiteral, (*histograms_size))
+ if histograms == nil || cap(*histograms) < int(*histograms_size) {
+ *histograms = make([]histogramLiteral, *histograms_size)
+ } else {
+ *histograms = (*histograms)[:*histograms_size]
+ }
self.histograms_ = *histograms
/* Clear only current histogram. */
diff --git a/vendor/github.com/andybalholm/brotli/ringbuffer.go b/vendor/github.com/andybalholm/brotli/ringbuffer.go
index 693a3f65d3a..1c8f86feece 100644
--- a/vendor/github.com/andybalholm/brotli/ringbuffer.go
+++ b/vendor/github.com/andybalholm/brotli/ringbuffer.go
@@ -27,10 +27,7 @@ type ringBuffer struct {
}
func ringBufferInit(rb *ringBuffer) {
- rb.cur_size_ = 0
rb.pos_ = 0
- rb.data_ = nil
- rb.buffer_ = nil
}
func ringBufferSetup(params *encoderParams, rb *ringBuffer) {
@@ -47,11 +44,16 @@ const kSlackForEightByteHashingEverywhere uint = 7
/* Allocates or re-allocates data_ to the given length + plus some slack
region before and after. Fills the slack regions with zeros. */
func ringBufferInitBuffer(buflen uint32, rb *ringBuffer) {
- var new_data []byte = make([]byte, (2 + uint(buflen) + kSlackForEightByteHashingEverywhere))
+ var new_data []byte
var i uint
+ size := 2 + int(buflen) + int(kSlackForEightByteHashingEverywhere)
+ if cap(rb.data_) < size {
+ new_data = make([]byte, size)
+ } else {
+ new_data = rb.data_[:size]
+ }
if rb.data_ != nil {
copy(new_data, rb.data_[:2+rb.cur_size_+uint32(kSlackForEightByteHashingEverywhere)])
- rb.data_ = nil
}
rb.data_ = new_data
diff --git a/vendor/github.com/andybalholm/brotli/write_bits.go b/vendor/github.com/andybalholm/brotli/write_bits.go
index 8f15c20257f..2d216d7ccd9 100644
--- a/vendor/github.com/andybalholm/brotli/write_bits.go
+++ b/vendor/github.com/andybalholm/brotli/write_bits.go
@@ -8,49 +8,87 @@ package brotli
/* Write bits into a byte array. */
-/* This function writes bits into bytes in increasing addresses, and within
- a byte least-significant-bit first.
-
- The function can write up to 56 bits in one go with WriteBits
- Example: let's assume that 3 bits (Rs below) have been written already:
-
- BYTE-0 BYTE+1 BYTE+2
-
- 0000 0RRR 0000 0000 0000 0000
-
- Now, we could write 5 or less bits in MSB by just sifting by 3
- and OR'ing to BYTE-0.
-
- For n bits, we take the last 5 bits, OR that with high bits in BYTE-0,
- and locate the rest in BYTE+1, BYTE+2, etc. */
-func writeBits(n_bits uint, bits uint64, pos *uint, array []byte) {
- var array_pos []byte = array[*pos>>3:]
- var bits_reserved_in_first_byte uint = (*pos & 7)
- /* implicit & 0xFF is assumed for uint8_t arithmetics */
-
- var bits_left_to_write uint
- bits <<= bits_reserved_in_first_byte
- array_pos[0] |= byte(bits)
- array_pos = array_pos[1:]
- for bits_left_to_write = n_bits + bits_reserved_in_first_byte; bits_left_to_write >= 9; bits_left_to_write -= 8 {
- bits >>= 8
- array_pos[0] = byte(bits)
- array_pos = array_pos[1:]
- }
+type bitWriter struct {
+ dst []byte
+
+ // Data waiting to be written is the low nbits of bits.
+ bits uint64
+ nbits uint
+}
- array_pos[0] = 0
- *pos += n_bits
+func (w *bitWriter) writeBits(nb uint, b uint64) {
+ w.bits |= b << w.nbits
+ w.nbits += nb
+ if w.nbits >= 32 {
+ bits := w.bits
+ w.bits >>= 32
+ w.nbits -= 32
+ w.dst = append(w.dst,
+ byte(bits),
+ byte(bits>>8),
+ byte(bits>>16),
+ byte(bits>>24),
+ )
+ }
}
-func writeSingleBit(bit bool, pos *uint, array []byte) {
+func (w *bitWriter) writeSingleBit(bit bool) {
if bit {
- writeBits(1, 1, pos, array)
+ w.writeBits(1, 1)
} else {
- writeBits(1, 0, pos, array)
+ w.writeBits(1, 0)
+ }
+}
+
+func (w *bitWriter) jumpToByteBoundary() {
+ dst := w.dst
+ for w.nbits != 0 {
+ dst = append(dst, byte(w.bits))
+ w.bits >>= 8
+ if w.nbits > 8 { // Avoid underflow
+ w.nbits -= 8
+ } else {
+ w.nbits = 0
+ }
+ }
+ w.bits = 0
+ w.dst = dst
+}
+
+func (w *bitWriter) writeBytes(b []byte) {
+ if w.nbits&7 != 0 {
+ panic("writeBytes with unfinished bits")
}
+ for w.nbits != 0 {
+ w.dst = append(w.dst, byte(w.bits))
+ w.bits >>= 8
+ w.nbits -= 8
+ }
+ w.dst = append(w.dst, b...)
+}
+
+func (w *bitWriter) getPos() uint {
+ return uint(len(w.dst)<<3) + w.nbits
+}
+
+func (w *bitWriter) rewind(p uint) {
+ w.bits = uint64(w.dst[p>>3] & byte((1<<(p&7))-1))
+ w.nbits = p & 7
+ w.dst = w.dst[:p>>3]
}
-func writeBitsPrepareStorage(pos uint, array []byte) {
- assert(pos&7 == 0)
- array[pos>>3] = 0
+func (w *bitWriter) updateBits(n_bits uint, bits uint32, pos uint) {
+ for n_bits > 0 {
+ var byte_pos uint = pos >> 3
+ var n_unchanged_bits uint = pos & 7
+ var n_changed_bits uint = brotli_min_size_t(n_bits, 8-n_unchanged_bits)
+ var total_bits uint = n_unchanged_bits + n_changed_bits
+ var mask uint32 = (^((1 << total_bits) - 1)) | ((1 << n_unchanged_bits) - 1)
+ var unchanged_bits uint32 = uint32(w.dst[byte_pos]) & mask
+ var changed_bits uint32 = bits & ((1 << n_changed_bits) - 1)
+ w.dst[byte_pos] = byte(changed_bits<>= n_changed_bits
+ pos += n_changed_bits
+ }
}
diff --git a/vendor/github.com/andybalholm/brotli/writer.go b/vendor/github.com/andybalholm/brotli/writer.go
index 22f0faf0b5d..63676b46737 100644
--- a/vendor/github.com/andybalholm/brotli/writer.go
+++ b/vendor/github.com/andybalholm/brotli/writer.go
@@ -1,12 +1,8 @@
package brotli
import (
- "compress/gzip"
"errors"
"io"
- "net/http"
-
- "github.com/golang/gddo/httputil"
)
const (
@@ -71,6 +67,9 @@ func (w *Writer) writeChunk(p []byte, op int) (n int, err error) {
if w.dst == nil {
return 0, errWriterClosed
}
+ if w.err != nil {
+ return 0, w.err
+ }
for {
availableIn := uint(len(p))
@@ -83,16 +82,8 @@ func (w *Writer) writeChunk(p []byte, op int) (n int, err error) {
return n, errEncode
}
- outputData := encoderTakeOutput(w)
-
- if len(outputData) > 0 {
- _, err = w.dst.Write(outputData)
- if err != nil {
- return n, err
- }
- }
- if len(p) == 0 {
- return n, nil
+ if len(p) == 0 || w.err != nil {
+ return n, w.err
}
}
}
@@ -125,32 +116,3 @@ type nopCloser struct {
}
func (nopCloser) Close() error { return nil }
-
-// HTTPCompressor chooses a compression method (brotli, gzip, or none) based on
-// the Accept-Encoding header, sets the Content-Encoding header, and returns a
-// WriteCloser that implements that compression. The Close method must be called
-// before the current HTTP handler returns.
-//
-// Due to https://github.com/golang/go/issues/31753, the response will not be
-// compressed unless you set a Content-Type header before you call
-// HTTPCompressor.
-func HTTPCompressor(w http.ResponseWriter, r *http.Request) io.WriteCloser {
- if w.Header().Get("Content-Type") == "" {
- return nopCloser{w}
- }
-
- if w.Header().Get("Vary") == "" {
- w.Header().Set("Vary", "Accept-Encoding")
- }
-
- encoding := httputil.NegotiateContentEncoding(r, []string{"br", "gzip"})
- switch encoding {
- case "br":
- w.Header().Set("Content-Encoding", "br")
- return NewWriter(w)
- case "gzip":
- w.Header().Set("Content-Encoding", "gzip")
- return gzip.NewWriter(w)
- }
- return nopCloser{w}
-}
diff --git a/vendor/github.com/andybalholm/cascadia/README.md b/vendor/github.com/andybalholm/cascadia/README.md
index 9021cb92aa3..26f4c37b360 100644
--- a/vendor/github.com/andybalholm/cascadia/README.md
+++ b/vendor/github.com/andybalholm/cascadia/README.md
@@ -5,3 +5,5 @@
The Cascadia package implements CSS selectors for use with the parse trees produced by the html package.
To test CSS selectors without writing Go code, check out [cascadia](https://github.com/suntong/cascadia) the command line tool, a thin wrapper around this package.
+
+[Refer to godoc here](https://godoc.org/github.com/andybalholm/cascadia).
diff --git a/vendor/github.com/andybalholm/cascadia/parser.go b/vendor/github.com/andybalholm/cascadia/parser.go
index 495db9ccfc9..4f8810e3ef9 100644
--- a/vendor/github.com/andybalholm/cascadia/parser.go
+++ b/vendor/github.com/andybalholm/cascadia/parser.go
@@ -7,8 +7,6 @@ import (
"regexp"
"strconv"
"strings"
-
- "golang.org/x/net/html"
)
// a parser for CSS selectors
@@ -56,6 +54,26 @@ func (p *parser) parseEscape() (result string, err error) {
return result, nil
}
+// toLowerASCII returns s with all ASCII capital letters lowercased.
+func toLowerASCII(s string) string {
+ var b []byte
+ for i := 0; i < len(s); i++ {
+ if c := s[i]; 'A' <= c && c <= 'Z' {
+ if b == nil {
+ b = make([]byte, len(s))
+ copy(b, s)
+ }
+ b[i] = s[i] + ('a' - 'A')
+ }
+ }
+
+ if b == nil {
+ return s
+ }
+
+ return string(b)
+}
+
func hexDigit(c byte) bool {
return '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F'
}
@@ -280,92 +298,92 @@ func (p *parser) consumeClosingParenthesis() bool {
}
// parseTypeSelector parses a type selector (one that matches by tag name).
-func (p *parser) parseTypeSelector() (result Selector, err error) {
+func (p *parser) parseTypeSelector() (result tagSelector, err error) {
tag, err := p.parseIdentifier()
if err != nil {
- return nil, err
+ return
}
-
- return typeSelector(tag), nil
+ return tagSelector{tag: toLowerASCII(tag)}, nil
}
// parseIDSelector parses a selector that matches by id attribute.
-func (p *parser) parseIDSelector() (Selector, error) {
+func (p *parser) parseIDSelector() (idSelector, error) {
if p.i >= len(p.s) {
- return nil, fmt.Errorf("expected id selector (#id), found EOF instead")
+ return idSelector{}, fmt.Errorf("expected id selector (#id), found EOF instead")
}
if p.s[p.i] != '#' {
- return nil, fmt.Errorf("expected id selector (#id), found '%c' instead", p.s[p.i])
+ return idSelector{}, fmt.Errorf("expected id selector (#id), found '%c' instead", p.s[p.i])
}
p.i++
id, err := p.parseName()
if err != nil {
- return nil, err
+ return idSelector{}, err
}
- return attributeEqualsSelector("id", id), nil
+ return idSelector{id: id}, nil
}
// parseClassSelector parses a selector that matches by class attribute.
-func (p *parser) parseClassSelector() (Selector, error) {
+func (p *parser) parseClassSelector() (classSelector, error) {
if p.i >= len(p.s) {
- return nil, fmt.Errorf("expected class selector (.class), found EOF instead")
+ return classSelector{}, fmt.Errorf("expected class selector (.class), found EOF instead")
}
if p.s[p.i] != '.' {
- return nil, fmt.Errorf("expected class selector (.class), found '%c' instead", p.s[p.i])
+ return classSelector{}, fmt.Errorf("expected class selector (.class), found '%c' instead", p.s[p.i])
}
p.i++
class, err := p.parseIdentifier()
if err != nil {
- return nil, err
+ return classSelector{}, err
}
- return attributeIncludesSelector("class", class), nil
+ return classSelector{class: class}, nil
}
// parseAttributeSelector parses a selector that matches by attribute value.
-func (p *parser) parseAttributeSelector() (Selector, error) {
+func (p *parser) parseAttributeSelector() (attrSelector, error) {
if p.i >= len(p.s) {
- return nil, fmt.Errorf("expected attribute selector ([attribute]), found EOF instead")
+ return attrSelector{}, fmt.Errorf("expected attribute selector ([attribute]), found EOF instead")
}
if p.s[p.i] != '[' {
- return nil, fmt.Errorf("expected attribute selector ([attribute]), found '%c' instead", p.s[p.i])
+ return attrSelector{}, fmt.Errorf("expected attribute selector ([attribute]), found '%c' instead", p.s[p.i])
}
p.i++
p.skipWhitespace()
key, err := p.parseIdentifier()
if err != nil {
- return nil, err
+ return attrSelector{}, err
}
+ key = toLowerASCII(key)
p.skipWhitespace()
if p.i >= len(p.s) {
- return nil, errors.New("unexpected EOF in attribute selector")
+ return attrSelector{}, errors.New("unexpected EOF in attribute selector")
}
if p.s[p.i] == ']' {
p.i++
- return attributeExistsSelector(key), nil
+ return attrSelector{key: key, operation: ""}, nil
}
if p.i+2 >= len(p.s) {
- return nil, errors.New("unexpected EOF in attribute selector")
+ return attrSelector{}, errors.New("unexpected EOF in attribute selector")
}
op := p.s[p.i : p.i+2]
if op[0] == '=' {
op = "="
} else if op[1] != '=' {
- return nil, fmt.Errorf(`expected equality operator, found "%s" instead`, op)
+ return attrSelector{}, fmt.Errorf(`expected equality operator, found "%s" instead`, op)
}
p.i += len(op)
p.skipWhitespace()
if p.i >= len(p.s) {
- return nil, errors.New("unexpected EOF in attribute selector")
+ return attrSelector{}, errors.New("unexpected EOF in attribute selector")
}
var val string
var rx *regexp.Regexp
@@ -380,46 +398,32 @@ func (p *parser) parseAttributeSelector() (Selector, error) {
}
}
if err != nil {
- return nil, err
+ return attrSelector{}, err
}
p.skipWhitespace()
if p.i >= len(p.s) {
- return nil, errors.New("unexpected EOF in attribute selector")
+ return attrSelector{}, errors.New("unexpected EOF in attribute selector")
}
if p.s[p.i] != ']' {
- return nil, fmt.Errorf("expected ']', found '%c' instead", p.s[p.i])
+ return attrSelector{}, fmt.Errorf("expected ']', found '%c' instead", p.s[p.i])
}
p.i++
switch op {
- case "=":
- return attributeEqualsSelector(key, val), nil
- case "!=":
- return attributeNotEqualSelector(key, val), nil
- case "~=":
- return attributeIncludesSelector(key, val), nil
- case "|=":
- return attributeDashmatchSelector(key, val), nil
- case "^=":
- return attributePrefixSelector(key, val), nil
- case "$=":
- return attributeSuffixSelector(key, val), nil
- case "*=":
- return attributeSubstringSelector(key, val), nil
- case "#=":
- return attributeRegexSelector(key, rx), nil
- }
-
- return nil, fmt.Errorf("attribute operator %q is not supported", op)
+ case "=", "!=", "~=", "|=", "^=", "$=", "*=", "#=":
+ return attrSelector{key: key, val: val, operation: op, regexp: rx}, nil
+ default:
+ return attrSelector{}, fmt.Errorf("attribute operator %q is not supported", op)
+ }
}
var errExpectedParenthesis = errors.New("expected '(' but didn't find it")
var errExpectedClosingParenthesis = errors.New("expected ')' but didn't find it")
var errUnmatchedParenthesis = errors.New("unmatched '('")
-// parsePseudoclassSelector parses a pseudoclass selector like :not(p).
-func (p *parser) parsePseudoclassSelector() (Selector, error) {
+// parsePseudoclassSelector parses a pseudoclass selector like :not(p)
+func (p *parser) parsePseudoclassSelector() (out Sel, err error) {
if p.i >= len(p.s) {
return nil, fmt.Errorf("expected pseudoclass selector (:pseudoclass), found EOF instead")
}
@@ -428,40 +432,36 @@ func (p *parser) parsePseudoclassSelector() (Selector, error) {
}
p.i++
+ if p.s[p.i] == ':' { // we found a pseudo-element
+ p.i++
+ }
+
name, err := p.parseIdentifier()
if err != nil {
- return nil, err
+ return
}
name = toLowerASCII(name)
-
switch name {
case "not", "has", "haschild":
if !p.consumeParenthesis() {
- return nil, errExpectedParenthesis
+ return out, errExpectedParenthesis
}
sel, parseErr := p.parseSelectorGroup()
if parseErr != nil {
- return nil, parseErr
+ return out, parseErr
}
if !p.consumeClosingParenthesis() {
- return nil, errExpectedClosingParenthesis
+ return out, errExpectedClosingParenthesis
}
- switch name {
- case "not":
- return negatedSelector(sel), nil
- case "has":
- return hasDescendantSelector(sel), nil
- case "haschild":
- return hasChildSelector(sel), nil
- }
+ out = relativePseudoClassSelector{name: name, match: sel}
case "contains", "containsown":
if !p.consumeParenthesis() {
- return nil, errExpectedParenthesis
+ return out, errExpectedParenthesis
}
if p.i == len(p.s) {
- return nil, errUnmatchedParenthesis
+ return out, errUnmatchedParenthesis
}
var val string
switch p.s[p.i] {
@@ -471,95 +471,75 @@ func (p *parser) parsePseudoclassSelector() (Selector, error) {
val, err = p.parseIdentifier()
}
if err != nil {
- return nil, err
+ return out, err
}
val = strings.ToLower(val)
p.skipWhitespace()
if p.i >= len(p.s) {
- return nil, errors.New("unexpected EOF in pseudo selector")
+ return out, errors.New("unexpected EOF in pseudo selector")
}
if !p.consumeClosingParenthesis() {
- return nil, errExpectedClosingParenthesis
+ return out, errExpectedClosingParenthesis
}
- switch name {
- case "contains":
- return textSubstrSelector(val), nil
- case "containsown":
- return ownTextSubstrSelector(val), nil
- }
+ out = containsPseudoClassSelector{own: name == "containsown", value: val}
case "matches", "matchesown":
if !p.consumeParenthesis() {
- return nil, errExpectedParenthesis
+ return out, errExpectedParenthesis
}
rx, err := p.parseRegex()
if err != nil {
- return nil, err
+ return out, err
}
if p.i >= len(p.s) {
- return nil, errors.New("unexpected EOF in pseudo selector")
+ return out, errors.New("unexpected EOF in pseudo selector")
}
if !p.consumeClosingParenthesis() {
- return nil, errExpectedClosingParenthesis
+ return out, errExpectedClosingParenthesis
}
- switch name {
- case "matches":
- return textRegexSelector(rx), nil
- case "matchesown":
- return ownTextRegexSelector(rx), nil
- }
+ out = regexpPseudoClassSelector{own: name == "matchesown", regexp: rx}
case "nth-child", "nth-last-child", "nth-of-type", "nth-last-of-type":
if !p.consumeParenthesis() {
- return nil, errExpectedParenthesis
+ return out, errExpectedParenthesis
}
a, b, err := p.parseNth()
if err != nil {
- return nil, err
+ return out, err
}
if !p.consumeClosingParenthesis() {
- return nil, errExpectedClosingParenthesis
- }
- if a == 0 {
- switch name {
- case "nth-child":
- return simpleNthChildSelector(b, false), nil
- case "nth-of-type":
- return simpleNthChildSelector(b, true), nil
- case "nth-last-child":
- return simpleNthLastChildSelector(b, false), nil
- case "nth-last-of-type":
- return simpleNthLastChildSelector(b, true), nil
- }
+ return out, errExpectedClosingParenthesis
}
- return nthChildSelector(a, b,
- name == "nth-last-child" || name == "nth-last-of-type",
- name == "nth-of-type" || name == "nth-last-of-type"),
- nil
+ last := name == "nth-last-child" || name == "nth-last-of-type"
+ ofType := name == "nth-of-type" || name == "nth-last-of-type"
+ out = nthPseudoClassSelector{a: a, b: b, last: last, ofType: ofType}
case "first-child":
- return simpleNthChildSelector(1, false), nil
+ out = nthPseudoClassSelector{a: 0, b: 1, ofType: false, last: false}
case "last-child":
- return simpleNthLastChildSelector(1, false), nil
+ out = nthPseudoClassSelector{a: 0, b: 1, ofType: false, last: true}
case "first-of-type":
- return simpleNthChildSelector(1, true), nil
+ out = nthPseudoClassSelector{a: 0, b: 1, ofType: true, last: false}
case "last-of-type":
- return simpleNthLastChildSelector(1, true), nil
+ out = nthPseudoClassSelector{a: 0, b: 1, ofType: true, last: true}
case "only-child":
- return onlyChildSelector(false), nil
+ out = onlyChildPseudoClassSelector{ofType: false}
case "only-of-type":
- return onlyChildSelector(true), nil
+ out = onlyChildPseudoClassSelector{ofType: true}
case "input":
- return inputSelector, nil
+ out = inputPseudoClassSelector{}
case "empty":
- return emptyElementSelector, nil
+ out = emptyElementPseudoClassSelector{}
case "root":
- return rootSelector, nil
+ out = rootPseudoClassSelector{}
+ case "after", "backdrop", "before", "cue", "first-letter", "first-line", "grammar-error", "marker", "placeholder", "selection", "spelling-error":
+ return out, errors.New("pseudo-elements are not yet supported")
+ default:
+ return out, fmt.Errorf("unknown pseudoclass or pseudoelement :%s", name)
}
-
- return nil, fmt.Errorf("unknown pseudoclass :%s", name)
+ return
}
// parseInteger parses a decimal integer.
@@ -705,8 +685,8 @@ invalid:
// parseSimpleSelectorSequence parses a selector sequence that applies to
// a single element.
-func (p *parser) parseSimpleSelectorSequence() (Selector, error) {
- var result Selector
+func (p *parser) parseSimpleSelectorSequence() (Sel, error) {
+ var selectors []Sel
if p.i >= len(p.s) {
return nil, errors.New("expected selector, found EOF instead")
@@ -723,13 +703,15 @@ func (p *parser) parseSimpleSelectorSequence() (Selector, error) {
if err != nil {
return nil, err
}
- result = r
+ selectors = append(selectors, r)
}
loop:
for p.i < len(p.s) {
- var ns Selector
- var err error
+ var (
+ ns Sel
+ err error
+ )
switch p.s[p.i] {
case '#':
ns, err = p.parseIDSelector()
@@ -745,37 +727,33 @@ loop:
if err != nil {
return nil, err
}
- if result == nil {
- result = ns
- } else {
- result = intersectionSelector(result, ns)
- }
- }
- if result == nil {
- result = func(n *html.Node) bool {
- return n.Type == html.ElementNode
- }
+ selectors = append(selectors, ns)
}
-
- return result, nil
+ if len(selectors) == 1 { // no need wrap the selectors in compoundSelector
+ return selectors[0], nil
+ }
+ return compoundSelector{selectors: selectors}, nil
}
// parseSelector parses a selector that may include combinators.
-func (p *parser) parseSelector() (result Selector, err error) {
+func (p *parser) parseSelector() (Sel, error) {
p.skipWhitespace()
- result, err = p.parseSimpleSelectorSequence()
+ result, err := p.parseSimpleSelectorSequence()
if err != nil {
- return
+ return nil, err
}
for {
- var combinator byte
+ var (
+ combinator byte
+ c Sel
+ )
if p.skipWhitespace() {
combinator = ' '
}
if p.i >= len(p.s) {
- return
+ return result, nil
}
switch p.s[p.i] {
@@ -785,51 +763,39 @@ func (p *parser) parseSelector() (result Selector, err error) {
p.skipWhitespace()
case ',', ')':
// These characters can't begin a selector, but they can legally occur after one.
- return
+ return result, nil
}
if combinator == 0 {
- return
+ return result, nil
}
- c, err := p.parseSimpleSelectorSequence()
+ c, err = p.parseSimpleSelectorSequence()
if err != nil {
return nil, err
}
-
- switch combinator {
- case ' ':
- result = descendantSelector(result, c)
- case '>':
- result = childSelector(result, c)
- case '+':
- result = siblingSelector(result, c, true)
- case '~':
- result = siblingSelector(result, c, false)
- }
+ result = combinedSelector{first: result, combinator: combinator, second: c}
}
-
- panic("unreachable")
}
// parseSelectorGroup parses a group of selectors, separated by commas.
-func (p *parser) parseSelectorGroup() (result Selector, err error) {
- result, err = p.parseSelector()
+func (p *parser) parseSelectorGroup() (SelectorGroup, error) {
+ current, err := p.parseSelector()
if err != nil {
- return
+ return nil, err
}
+ result := SelectorGroup{current}
for p.i < len(p.s) {
if p.s[p.i] != ',' {
- return result, nil
+ break
}
p.i++
c, err := p.parseSelector()
if err != nil {
return nil, err
}
- result = unionSelector(result, c)
+ result = append(result, c)
}
-
- return
+ return result, nil
}
diff --git a/vendor/github.com/andybalholm/cascadia/selector.go b/vendor/github.com/andybalholm/cascadia/selector.go
index 9fb05ccb777..18ce1164402 100644
--- a/vendor/github.com/andybalholm/cascadia/selector.go
+++ b/vendor/github.com/andybalholm/cascadia/selector.go
@@ -9,36 +9,37 @@ import (
"golang.org/x/net/html"
)
-// the Selector type, and functions for creating them
+// Matcher is the interface for basic selector functionality.
+// Match returns whether a selector matches n.
+type Matcher interface {
+ Match(n *html.Node) bool
+}
-// A Selector is a function which tells whether a node matches or not.
-type Selector func(*html.Node) bool
+// Sel is the interface for all the functionality provided by selectors.
+// It is currently the same as Matcher, but other methods may be added in the
+// future.
+type Sel interface {
+ Matcher
+ Specificity() Specificity
+}
-// hasChildMatch returns whether n has any child that matches a.
-func hasChildMatch(n *html.Node, a Selector) bool {
- for c := n.FirstChild; c != nil; c = c.NextSibling {
- if a(c) {
- return true
- }
+// Parse parses a selector.
+func Parse(sel string) (Sel, error) {
+ p := &parser{s: sel}
+ compiled, err := p.parseSelector()
+ if err != nil {
+ return nil, err
}
- return false
-}
-// hasDescendantMatch performs a depth-first search of n's descendants,
-// testing whether any of them match a. It returns true as soon as a match is
-// found, or false if no match is found.
-func hasDescendantMatch(n *html.Node, a Selector) bool {
- for c := n.FirstChild; c != nil; c = c.NextSibling {
- if a(c) || (c.Type == html.ElementNode && hasDescendantMatch(c, a)) {
- return true
- }
+ if p.i < len(sel) {
+ return nil, fmt.Errorf("parsing %q: %d bytes left over", sel, len(sel)-p.i)
}
- return false
+
+ return compiled, nil
}
-// Compile parses a selector and returns, if successful, a Selector object
-// that can be used to match against html.Node objects.
-func Compile(sel string) (Selector, error) {
+// ParseGroup parses a selector, or a group of selectors separated by commas.
+func ParseGroup(sel string) (SelectorGroup, error) {
p := &parser{s: sel}
compiled, err := p.parseSelectorGroup()
if err != nil {
@@ -52,6 +53,23 @@ func Compile(sel string) (Selector, error) {
return compiled, nil
}
+// A Selector is a function which tells whether a node matches or not.
+//
+// This type is maintained for compatibility; I recommend using the newer and
+// more idiomatic interfaces Sel and Matcher.
+type Selector func(*html.Node) bool
+
+// Compile parses a selector and returns, if successful, a Selector object
+// that can be used to match against html.Node objects.
+func Compile(sel string) (Selector, error) {
+ compiled, err := ParseGroup(sel)
+ if err != nil {
+ return nil, err
+ }
+
+ return Selector(compiled.Match), nil
+}
+
// MustCompile is like Compile, but panics instead of returning an error.
func MustCompile(sel string) Selector {
compiled, err := Compile(sel)
@@ -79,6 +97,23 @@ func (s Selector) matchAllInto(n *html.Node, storage []*html.Node) []*html.Node
return storage
}
+func queryInto(n *html.Node, m Matcher, storage []*html.Node) []*html.Node {
+ for child := n.FirstChild; child != nil; child = child.NextSibling {
+ if m.Match(child) {
+ storage = append(storage, child)
+ }
+ storage = queryInto(child, m, storage)
+ }
+
+ return storage
+}
+
+// QueryAll returns a slice of all the nodes that match m, from the descendants
+// of n.
+func QueryAll(n *html.Node, m Matcher) []*html.Node {
+ return queryInto(n, m, nil)
+}
+
// Match returns true if the node matches the selector.
func (s Selector) Match(n *html.Node) bool {
return s(n)
@@ -99,6 +134,21 @@ func (s Selector) MatchFirst(n *html.Node) *html.Node {
return nil
}
+// Query returns the first node that matches m, from the descendants of n.
+// If none matches, it returns nil.
+func Query(n *html.Node, m Matcher) *html.Node {
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if m.Match(c) {
+ return c
+ }
+ if matched := Query(c, m); matched != nil {
+ return matched
+ }
+ }
+
+ return nil
+}
+
// Filter returns the nodes in nodes that match the selector.
func (s Selector) Filter(nodes []*html.Node) (result []*html.Node) {
for _, n := range nodes {
@@ -109,106 +159,136 @@ func (s Selector) Filter(nodes []*html.Node) (result []*html.Node) {
return result
}
-// typeSelector returns a Selector that matches elements with a given tag name.
-func typeSelector(tag string) Selector {
- tag = toLowerASCII(tag)
- return func(n *html.Node) bool {
- return n.Type == html.ElementNode && n.Data == tag
+// Filter returns the nodes that match m.
+func Filter(nodes []*html.Node, m Matcher) (result []*html.Node) {
+ for _, n := range nodes {
+ if m.Match(n) {
+ result = append(result, n)
+ }
}
+ return result
}
-// toLowerASCII returns s with all ASCII capital letters lowercased.
-func toLowerASCII(s string) string {
- var b []byte
- for i := 0; i < len(s); i++ {
- if c := s[i]; 'A' <= c && c <= 'Z' {
- if b == nil {
- b = make([]byte, len(s))
- copy(b, s)
- }
- b[i] = s[i] + ('a' - 'A')
- }
- }
+type tagSelector struct {
+ tag string
+}
- if b == nil {
- return s
- }
+// Matches elements with a given tag name.
+func (t tagSelector) Match(n *html.Node) bool {
+ return n.Type == html.ElementNode && n.Data == t.tag
+}
- return string(b)
+func (c tagSelector) Specificity() Specificity {
+ return Specificity{0, 0, 1}
}
-// attributeSelector returns a Selector that matches elements
-// where the attribute named key satisifes the function f.
-func attributeSelector(key string, f func(string) bool) Selector {
- key = toLowerASCII(key)
- return func(n *html.Node) bool {
- if n.Type != html.ElementNode {
- return false
- }
- for _, a := range n.Attr {
- if a.Key == key && f(a.Val) {
- return true
- }
- }
- return false
- }
+type classSelector struct {
+ class string
}
-// attributeExistsSelector returns a Selector that matches elements that have
-// an attribute named key.
-func attributeExistsSelector(key string) Selector {
- return attributeSelector(key, func(string) bool { return true })
+// Matches elements by class attribute.
+func (t classSelector) Match(n *html.Node) bool {
+ return matchAttribute(n, "class", func(s string) bool {
+ return matchInclude(t.class, s)
+ })
}
-// attributeEqualsSelector returns a Selector that matches elements where
-// the attribute named key has the value val.
-func attributeEqualsSelector(key, val string) Selector {
- return attributeSelector(key,
- func(s string) bool {
- return s == val
- })
+func (c classSelector) Specificity() Specificity {
+ return Specificity{0, 1, 0}
+}
+
+type idSelector struct {
+ id string
+}
+
+// Matches elements by id attribute.
+func (t idSelector) Match(n *html.Node) bool {
+ return matchAttribute(n, "id", func(s string) bool {
+ return s == t.id
+ })
+}
+
+func (c idSelector) Specificity() Specificity {
+ return Specificity{1, 0, 0}
+}
+
+type attrSelector struct {
+ key, val, operation string
+ regexp *regexp.Regexp
+}
+
+// Matches elements by attribute value.
+func (t attrSelector) Match(n *html.Node) bool {
+ switch t.operation {
+ case "":
+ return matchAttribute(n, t.key, func(string) bool { return true })
+ case "=":
+ return matchAttribute(n, t.key, func(s string) bool { return s == t.val })
+ case "!=":
+ return attributeNotEqualMatch(t.key, t.val, n)
+ case "~=":
+ // matches elements where the attribute named key is a whitespace-separated list that includes val.
+ return matchAttribute(n, t.key, func(s string) bool { return matchInclude(t.val, s) })
+ case "|=":
+ return attributeDashMatch(t.key, t.val, n)
+ case "^=":
+ return attributePrefixMatch(t.key, t.val, n)
+ case "$=":
+ return attributeSuffixMatch(t.key, t.val, n)
+ case "*=":
+ return attributeSubstringMatch(t.key, t.val, n)
+ case "#=":
+ return attributeRegexMatch(t.key, t.regexp, n)
+ default:
+ panic(fmt.Sprintf("unsuported operation : %s", t.operation))
+ }
}
-// attributeNotEqualSelector returns a Selector that matches elements where
+// matches elements where the attribute named key satisifes the function f.
+func matchAttribute(n *html.Node, key string, f func(string) bool) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+ for _, a := range n.Attr {
+ if a.Key == key && f(a.Val) {
+ return true
+ }
+ }
+ return false
+}
+
+// attributeNotEqualMatch matches elements where
// the attribute named key does not have the value val.
-func attributeNotEqualSelector(key, val string) Selector {
- key = toLowerASCII(key)
- return func(n *html.Node) bool {
- if n.Type != html.ElementNode {
+func attributeNotEqualMatch(key, val string, n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+ for _, a := range n.Attr {
+ if a.Key == key && a.Val == val {
return false
}
- for _, a := range n.Attr {
- if a.Key == key && a.Val == val {
- return false
- }
- }
- return true
}
+ return true
}
-// attributeIncludesSelector returns a Selector that matches elements where
-// the attribute named key is a whitespace-separated list that includes val.
-func attributeIncludesSelector(key, val string) Selector {
- return attributeSelector(key,
- func(s string) bool {
- for s != "" {
- i := strings.IndexAny(s, " \t\r\n\f")
- if i == -1 {
- return s == val
- }
- if s[:i] == val {
- return true
- }
- s = s[i+1:]
- }
- return false
- })
+// returns true if s is a whitespace-separated list that includes val.
+func matchInclude(val, s string) bool {
+ for s != "" {
+ i := strings.IndexAny(s, " \t\r\n\f")
+ if i == -1 {
+ return s == val
+ }
+ if s[:i] == val {
+ return true
+ }
+ s = s[i+1:]
+ }
+ return false
}
-// attributeDashmatchSelector returns a Selector that matches elements where
-// the attribute named key equals val or starts with val plus a hyphen.
-func attributeDashmatchSelector(key, val string) Selector {
- return attributeSelector(key,
+// matches elements where the attribute named key equals val or starts with val plus a hyphen.
+func attributeDashMatch(key, val string, n *html.Node) bool {
+ return matchAttribute(n, key,
func(s string) bool {
if s == val {
return true
@@ -223,10 +303,10 @@ func attributeDashmatchSelector(key, val string) Selector {
})
}
-// attributePrefixSelector returns a Selector that matches elements where
+// attributePrefixMatch returns a Selector that matches elements where
// the attribute named key starts with val.
-func attributePrefixSelector(key, val string) Selector {
- return attributeSelector(key,
+func attributePrefixMatch(key, val string, n *html.Node) bool {
+ return matchAttribute(n, key,
func(s string) bool {
if strings.TrimSpace(s) == "" {
return false
@@ -235,10 +315,10 @@ func attributePrefixSelector(key, val string) Selector {
})
}
-// attributeSuffixSelector returns a Selector that matches elements where
+// attributeSuffixMatch matches elements where
// the attribute named key ends with val.
-func attributeSuffixSelector(key, val string) Selector {
- return attributeSelector(key,
+func attributeSuffixMatch(key, val string, n *html.Node) bool {
+ return matchAttribute(n, key,
func(s string) bool {
if strings.TrimSpace(s) == "" {
return false
@@ -247,10 +327,10 @@ func attributeSuffixSelector(key, val string) Selector {
})
}
-// attributeSubstringSelector returns a Selector that matches nodes where
+// attributeSubstringMatch matches nodes where
// the attribute named key contains val.
-func attributeSubstringSelector(key, val string) Selector {
- return attributeSelector(key,
+func attributeSubstringMatch(key, val string, n *html.Node) bool {
+ return matchAttribute(n, key,
func(s string) bool {
if strings.TrimSpace(s) == "" {
return false
@@ -259,39 +339,118 @@ func attributeSubstringSelector(key, val string) Selector {
})
}
-// attributeRegexSelector returns a Selector that matches nodes where
+// attributeRegexMatch matches nodes where
// the attribute named key matches the regular expression rx
-func attributeRegexSelector(key string, rx *regexp.Regexp) Selector {
- return attributeSelector(key,
+func attributeRegexMatch(key string, rx *regexp.Regexp, n *html.Node) bool {
+ return matchAttribute(n, key,
func(s string) bool {
return rx.MatchString(s)
})
}
-// intersectionSelector returns a selector that matches nodes that match
-// both a and b.
-func intersectionSelector(a, b Selector) Selector {
- return func(n *html.Node) bool {
- return a(n) && b(n)
+func (c attrSelector) Specificity() Specificity {
+ return Specificity{0, 1, 0}
+}
+
+// ---------------- Pseudo class selectors ----------------
+// we use severals concrete types of pseudo-class selectors
+
+type relativePseudoClassSelector struct {
+ name string // one of "not", "has", "haschild"
+ match SelectorGroup
+}
+
+func (s relativePseudoClassSelector) Match(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+ switch s.name {
+ case "not":
+ // matches elements that do not match a.
+ return !s.match.Match(n)
+ case "has":
+ // matches elements with any descendant that matches a.
+ return hasDescendantMatch(n, s.match)
+ case "haschild":
+ // matches elements with a child that matches a.
+ return hasChildMatch(n, s.match)
+ default:
+ panic(fmt.Sprintf("unsupported relative pseudo class selector : %s", s.name))
}
}
-// unionSelector returns a selector that matches elements that match
-// either a or b.
-func unionSelector(a, b Selector) Selector {
- return func(n *html.Node) bool {
- return a(n) || b(n)
+// hasChildMatch returns whether n has any child that matches a.
+func hasChildMatch(n *html.Node, a Matcher) bool {
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if a.Match(c) {
+ return true
+ }
}
+ return false
}
-// negatedSelector returns a selector that matches elements that do not match a.
-func negatedSelector(a Selector) Selector {
- return func(n *html.Node) bool {
- if n.Type != html.ElementNode {
- return false
+// hasDescendantMatch performs a depth-first search of n's descendants,
+// testing whether any of them match a. It returns true as soon as a match is
+// found, or false if no match is found.
+func hasDescendantMatch(n *html.Node, a Matcher) bool {
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if a.Match(c) || (c.Type == html.ElementNode && hasDescendantMatch(c, a)) {
+ return true
+ }
+ }
+ return false
+}
+
+// Specificity returns the specificity of the most specific selectors
+// in the pseudo-class arguments.
+// See https://www.w3.org/TR/selectors/#specificity-rules
+func (s relativePseudoClassSelector) Specificity() Specificity {
+ var max Specificity
+ for _, sel := range s.match {
+ newSpe := sel.Specificity()
+ if max.Less(newSpe) {
+ max = newSpe
}
- return !a(n)
}
+ return max
+}
+
+type containsPseudoClassSelector struct {
+ own bool
+ value string
+}
+
+func (s containsPseudoClassSelector) Match(n *html.Node) bool {
+ var text string
+ if s.own {
+ // matches nodes that directly contain the given text
+ text = strings.ToLower(nodeOwnText(n))
+ } else {
+ // matches nodes that contain the given text.
+ text = strings.ToLower(nodeText(n))
+ }
+ return strings.Contains(text, s.value)
+}
+
+func (s containsPseudoClassSelector) Specificity() Specificity {
+ return Specificity{0, 1, 0}
+}
+
+type regexpPseudoClassSelector struct {
+ own bool
+ regexp *regexp.Regexp
+}
+
+func (s regexpPseudoClassSelector) Match(n *html.Node) bool {
+ var text string
+ if s.own {
+ // matches nodes whose text directly matches the specified regular expression
+ text = nodeOwnText(n)
+ } else {
+ // matches nodes whose text matches the specified regular expression
+ text = nodeText(n)
+ }
+ return s.regexp.MatchString(text)
}
// writeNodeText writes the text contained in n and its descendants to b.
@@ -325,221 +484,198 @@ func nodeOwnText(n *html.Node) string {
return b.String()
}
-// textSubstrSelector returns a selector that matches nodes that
-// contain the given text.
-func textSubstrSelector(val string) Selector {
- return func(n *html.Node) bool {
- text := strings.ToLower(nodeText(n))
- return strings.Contains(text, val)
- }
+func (s regexpPseudoClassSelector) Specificity() Specificity {
+ return Specificity{0, 1, 0}
}
-// ownTextSubstrSelector returns a selector that matches nodes that
-// directly contain the given text
-func ownTextSubstrSelector(val string) Selector {
- return func(n *html.Node) bool {
- text := strings.ToLower(nodeOwnText(n))
- return strings.Contains(text, val)
- }
+type nthPseudoClassSelector struct {
+ a, b int
+ last, ofType bool
}
-// textRegexSelector returns a selector that matches nodes whose text matches
-// the specified regular expression
-func textRegexSelector(rx *regexp.Regexp) Selector {
- return func(n *html.Node) bool {
- return rx.MatchString(nodeText(n))
+func (s nthPseudoClassSelector) Match(n *html.Node) bool {
+ if s.a == 0 {
+ if s.last {
+ return simpleNthLastChildMatch(s.b, s.ofType, n)
+ } else {
+ return simpleNthChildMatch(s.b, s.ofType, n)
+ }
}
+ return nthChildMatch(s.a, s.b, s.last, s.ofType, n)
}
-// ownTextRegexSelector returns a selector that matches nodes whose text
-// directly matches the specified regular expression
-func ownTextRegexSelector(rx *regexp.Regexp) Selector {
- return func(n *html.Node) bool {
- return rx.MatchString(nodeOwnText(n))
+// nthChildMatch implements :nth-child(an+b).
+// If last is true, implements :nth-last-child instead.
+// If ofType is true, implements :nth-of-type instead.
+func nthChildMatch(a, b int, last, ofType bool, n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
}
-}
-// hasChildSelector returns a selector that matches elements
-// with a child that matches a.
-func hasChildSelector(a Selector) Selector {
- return func(n *html.Node) bool {
- if n.Type != html.ElementNode {
- return false
- }
- return hasChildMatch(n, a)
+ parent := n.Parent
+ if parent == nil {
+ return false
}
-}
-// hasDescendantSelector returns a selector that matches elements
-// with any descendant that matches a.
-func hasDescendantSelector(a Selector) Selector {
- return func(n *html.Node) bool {
- if n.Type != html.ElementNode {
- return false
+ if parent.Type == html.DocumentNode {
+ return false
+ }
+
+ i := -1
+ count := 0
+ for c := parent.FirstChild; c != nil; c = c.NextSibling {
+ if (c.Type != html.ElementNode) || (ofType && c.Data != n.Data) {
+ continue
+ }
+ count++
+ if c == n {
+ i = count
+ if !last {
+ break
+ }
}
- return hasDescendantMatch(n, a)
}
+
+ if i == -1 {
+ // This shouldn't happen, since n should always be one of its parent's children.
+ return false
+ }
+
+ if last {
+ i = count - i + 1
+ }
+
+ i -= b
+ if a == 0 {
+ return i == 0
+ }
+
+ return i%a == 0 && i/a >= 0
}
-// nthChildSelector returns a selector that implements :nth-child(an+b).
-// If last is true, implements :nth-last-child instead.
+// simpleNthChildMatch implements :nth-child(b).
// If ofType is true, implements :nth-of-type instead.
-func nthChildSelector(a, b int, last, ofType bool) Selector {
- return func(n *html.Node) bool {
- if n.Type != html.ElementNode {
- return false
- }
+func simpleNthChildMatch(b int, ofType bool, n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
- parent := n.Parent
- if parent == nil {
- return false
- }
+ parent := n.Parent
+ if parent == nil {
+ return false
+ }
- if parent.Type == html.DocumentNode {
- return false
- }
+ if parent.Type == html.DocumentNode {
+ return false
+ }
- i := -1
- count := 0
- for c := parent.FirstChild; c != nil; c = c.NextSibling {
- if (c.Type != html.ElementNode) || (ofType && c.Data != n.Data) {
- continue
- }
- count++
- if c == n {
- i = count
- if !last {
- break
- }
- }
+ count := 0
+ for c := parent.FirstChild; c != nil; c = c.NextSibling {
+ if c.Type != html.ElementNode || (ofType && c.Data != n.Data) {
+ continue
}
-
- if i == -1 {
- // This shouldn't happen, since n should always be one of its parent's children.
+ count++
+ if c == n {
+ return count == b
+ }
+ if count >= b {
return false
}
+ }
+ return false
+}
- if last {
- i = count - i + 1
- }
+// simpleNthLastChildMatch implements :nth-last-child(b).
+// If ofType is true, implements :nth-last-of-type instead.
+func simpleNthLastChildMatch(b int, ofType bool, n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
- i -= b
- if a == 0 {
- return i == 0
- }
+ parent := n.Parent
+ if parent == nil {
+ return false
+ }
- return i%a == 0 && i/a >= 0
+ if parent.Type == html.DocumentNode {
+ return false
}
-}
-// simpleNthChildSelector returns a selector that implements :nth-child(b).
-// If ofType is true, implements :nth-of-type instead.
-func simpleNthChildSelector(b int, ofType bool) Selector {
- return func(n *html.Node) bool {
- if n.Type != html.ElementNode {
- return false
+ count := 0
+ for c := parent.LastChild; c != nil; c = c.PrevSibling {
+ if c.Type != html.ElementNode || (ofType && c.Data != n.Data) {
+ continue
}
-
- parent := n.Parent
- if parent == nil {
- return false
+ count++
+ if c == n {
+ return count == b
}
-
- if parent.Type == html.DocumentNode {
+ if count >= b {
return false
}
-
- count := 0
- for c := parent.FirstChild; c != nil; c = c.NextSibling {
- if c.Type != html.ElementNode || (ofType && c.Data != n.Data) {
- continue
- }
- count++
- if c == n {
- return count == b
- }
- if count >= b {
- return false
- }
- }
- return false
}
+ return false
}
-// simpleNthLastChildSelector returns a selector that implements
-// :nth-last-child(b). If ofType is true, implements :nth-last-of-type
-// instead.
-func simpleNthLastChildSelector(b int, ofType bool) Selector {
- return func(n *html.Node) bool {
- if n.Type != html.ElementNode {
- return false
- }
+// Specificity for nth-child pseudo-class.
+// Does not support a list of selectors
+func (s nthPseudoClassSelector) Specificity() Specificity {
+ return Specificity{0, 1, 0}
+}
- parent := n.Parent
- if parent == nil {
- return false
- }
+type onlyChildPseudoClassSelector struct {
+ ofType bool
+}
- if parent.Type == html.DocumentNode {
- return false
- }
+// Match implements :only-child.
+// If `ofType` is true, it implements :only-of-type instead.
+func (s onlyChildPseudoClassSelector) Match(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
- count := 0
- for c := parent.LastChild; c != nil; c = c.PrevSibling {
- if c.Type != html.ElementNode || (ofType && c.Data != n.Data) {
- continue
- }
- count++
- if c == n {
- return count == b
- }
- if count >= b {
- return false
- }
- }
+ parent := n.Parent
+ if parent == nil {
return false
}
-}
-// onlyChildSelector returns a selector that implements :only-child.
-// If ofType is true, it implements :only-of-type instead.
-func onlyChildSelector(ofType bool) Selector {
- return func(n *html.Node) bool {
- if n.Type != html.ElementNode {
- return false
- }
+ if parent.Type == html.DocumentNode {
+ return false
+ }
- parent := n.Parent
- if parent == nil {
- return false
+ count := 0
+ for c := parent.FirstChild; c != nil; c = c.NextSibling {
+ if (c.Type != html.ElementNode) || (s.ofType && c.Data != n.Data) {
+ continue
}
-
- if parent.Type == html.DocumentNode {
+ count++
+ if count > 1 {
return false
}
+ }
- count := 0
- for c := parent.FirstChild; c != nil; c = c.NextSibling {
- if (c.Type != html.ElementNode) || (ofType && c.Data != n.Data) {
- continue
- }
- count++
- if count > 1 {
- return false
- }
- }
+ return count == 1
+}
- return count == 1
- }
+func (s onlyChildPseudoClassSelector) Specificity() Specificity {
+ return Specificity{0, 1, 0}
}
-// inputSelector is a Selector that matches input, select, textarea and button elements.
-func inputSelector(n *html.Node) bool {
+type inputPseudoClassSelector struct{}
+
+// Matches input, select, textarea and button elements.
+func (s inputPseudoClassSelector) Match(n *html.Node) bool {
return n.Type == html.ElementNode && (n.Data == "input" || n.Data == "select" || n.Data == "textarea" || n.Data == "button")
}
-// emptyElementSelector is a Selector that matches empty elements.
-func emptyElementSelector(n *html.Node) bool {
+func (s inputPseudoClassSelector) Specificity() Specificity {
+ return Specificity{0, 1, 0}
+}
+
+type emptyElementPseudoClassSelector struct{}
+
+// Matches empty elements.
+func (s emptyElementPseudoClassSelector) Match(n *html.Node) bool {
if n.Type != html.ElementNode {
return false
}
@@ -554,69 +690,144 @@ func emptyElementSelector(n *html.Node) bool {
return true
}
-// descendantSelector returns a Selector that matches an element if
-// it matches d and has an ancestor that matches a.
-func descendantSelector(a, d Selector) Selector {
- return func(n *html.Node) bool {
- if !d(n) {
- return false
- }
+func (s emptyElementPseudoClassSelector) Specificity() Specificity {
+ return Specificity{0, 1, 0}
+}
- for p := n.Parent; p != nil; p = p.Parent {
- if a(p) {
- return true
- }
- }
+type rootPseudoClassSelector struct{}
+// Match implements :root
+func (s rootPseudoClassSelector) Match(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+ if n.Parent == nil {
return false
}
+ return n.Parent.Type == html.DocumentNode
}
-// childSelector returns a Selector that matches an element if
-// it matches d and its parent matches a.
-func childSelector(a, d Selector) Selector {
- return func(n *html.Node) bool {
- return d(n) && n.Parent != nil && a(n.Parent)
- }
+func (s rootPseudoClassSelector) Specificity() Specificity {
+ return Specificity{0, 1, 0}
}
-// siblingSelector returns a Selector that matches an element
-// if it matches s2 and in is preceded by an element that matches s1.
-// If adjacent is true, the sibling must be immediately before the element.
-func siblingSelector(s1, s2 Selector, adjacent bool) Selector {
- return func(n *html.Node) bool {
- if !s2(n) {
- return false
- }
+type compoundSelector struct {
+ selectors []Sel
+}
- if adjacent {
- for n = n.PrevSibling; n != nil; n = n.PrevSibling {
- if n.Type == html.TextNode || n.Type == html.CommentNode {
- continue
- }
- return s1(n)
- }
+// Matches elements if each sub-selectors matches.
+func (t compoundSelector) Match(n *html.Node) bool {
+ if len(t.selectors) == 0 {
+ return n.Type == html.ElementNode
+ }
+
+ for _, sel := range t.selectors {
+ if !sel.Match(n) {
return false
}
+ }
+ return true
+}
- // Walk backwards looking for element that matches s1
- for c := n.PrevSibling; c != nil; c = c.PrevSibling {
- if s1(c) {
- return true
- }
- }
+func (s compoundSelector) Specificity() Specificity {
+ var out Specificity
+ for _, sel := range s.selectors {
+ out = out.Add(sel.Specificity())
+ }
+ return out
+}
+
+type combinedSelector struct {
+ first Sel
+ combinator byte
+ second Sel
+}
+
+func (t combinedSelector) Match(n *html.Node) bool {
+ if t.first == nil {
+ return false // maybe we should panic
+ }
+ switch t.combinator {
+ case 0:
+ return t.first.Match(n)
+ case ' ':
+ return descendantMatch(t.first, t.second, n)
+ case '>':
+ return childMatch(t.first, t.second, n)
+ case '+':
+ return siblingMatch(t.first, t.second, true, n)
+ case '~':
+ return siblingMatch(t.first, t.second, false, n)
+ default:
+ panic("unknown combinator")
+ }
+}
+// matches an element if it matches d and has an ancestor that matches a.
+func descendantMatch(a, d Matcher, n *html.Node) bool {
+ if !d.Match(n) {
return false
}
+
+ for p := n.Parent; p != nil; p = p.Parent {
+ if a.Match(p) {
+ return true
+ }
+ }
+
+ return false
}
-// rootSelector implements :root
-func rootSelector(n *html.Node) bool {
- if n.Type != html.ElementNode {
+// matches an element if it matches d and its parent matches a.
+func childMatch(a, d Matcher, n *html.Node) bool {
+ return d.Match(n) && n.Parent != nil && a.Match(n.Parent)
+}
+
+// matches an element if it matches s2 and is preceded by an element that matches s1.
+// If adjacent is true, the sibling must be immediately before the element.
+func siblingMatch(s1, s2 Matcher, adjacent bool, n *html.Node) bool {
+ if !s2.Match(n) {
return false
}
- if n.Parent == nil {
+
+ if adjacent {
+ for n = n.PrevSibling; n != nil; n = n.PrevSibling {
+ if n.Type == html.TextNode || n.Type == html.CommentNode {
+ continue
+ }
+ return s1.Match(n)
+ }
return false
}
- return n.Parent.Type == html.DocumentNode
+
+ // Walk backwards looking for element that matches s1
+ for c := n.PrevSibling; c != nil; c = c.PrevSibling {
+ if s1.Match(c) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (s combinedSelector) Specificity() Specificity {
+ spec := s.first.Specificity()
+ if s.second != nil {
+ spec = spec.Add(s.second.Specificity())
+ }
+ return spec
+}
+
+// A SelectorGroup is a list of selectors, which matches if any of the
+// individual selectors matches.
+type SelectorGroup []Sel
+
+// Match returns true if the node matches one of the single selectors.
+func (s SelectorGroup) Match(n *html.Node) bool {
+ for _, sel := range s {
+ if sel.Match(n) {
+ return true
+ }
+ }
+ return false
}
diff --git a/vendor/github.com/andybalholm/cascadia/specificity.go b/vendor/github.com/andybalholm/cascadia/specificity.go
new file mode 100644
index 00000000000..8db864f9beb
--- /dev/null
+++ b/vendor/github.com/andybalholm/cascadia/specificity.go
@@ -0,0 +1,26 @@
+package cascadia
+
+// Specificity is the CSS specificity as defined in
+// https://www.w3.org/TR/selectors/#specificity-rules
+// with the convention Specificity = [A,B,C].
+type Specificity [3]int
+
+// returns `true` if s < other (strictly), false otherwise
+func (s Specificity) Less(other Specificity) bool {
+ for i := range s {
+ if s[i] < other[i] {
+ return true
+ }
+ if s[i] > other[i] {
+ return false
+ }
+ }
+ return false
+}
+
+func (s Specificity) Add(other Specificity) Specificity {
+ for i, sp := range other {
+ s[i] += sp
+ }
+ return s
+}
diff --git a/vendor/github.com/golang/gddo/LICENSE b/vendor/github.com/golang/gddo/LICENSE
deleted file mode 100644
index 65d761bc9f2..00000000000
--- a/vendor/github.com/golang/gddo/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2013 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/golang/gddo/httputil/buster.go b/vendor/github.com/golang/gddo/httputil/buster.go
deleted file mode 100644
index beab151a4bd..00000000000
--- a/vendor/github.com/golang/gddo/httputil/buster.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file or at
-// https://developers.google.com/open-source/licenses/bsd.
-
-package httputil
-
-import (
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "strings"
- "sync"
-)
-
-type busterWriter struct {
- headerMap http.Header
- status int
- io.Writer
-}
-
-func (bw *busterWriter) Header() http.Header {
- return bw.headerMap
-}
-
-func (bw *busterWriter) WriteHeader(status int) {
- bw.status = status
-}
-
-// CacheBusters maintains a cache of cache busting tokens for static resources served by Handler.
-type CacheBusters struct {
- Handler http.Handler
-
- mu sync.Mutex
- tokens map[string]string
-}
-
-func sanitizeTokenRune(r rune) rune {
- if r <= ' ' || r >= 127 {
- return -1
- }
- // Convert percent encoding reserved characters to '-'.
- if strings.ContainsRune("!#$&'()*+,/:;=?@[]", r) {
- return '-'
- }
- return r
-}
-
-// Get returns the cache busting token for path. If the token is not already
-// cached, Get issues a HEAD request on handler and uses the response ETag and
-// Last-Modified headers to compute a token.
-func (cb *CacheBusters) Get(path string) string {
- cb.mu.Lock()
- if cb.tokens == nil {
- cb.tokens = make(map[string]string)
- }
- token, ok := cb.tokens[path]
- cb.mu.Unlock()
- if ok {
- return token
- }
-
- w := busterWriter{
- Writer: ioutil.Discard,
- headerMap: make(http.Header),
- }
- r := &http.Request{URL: &url.URL{Path: path}, Method: "HEAD"}
- cb.Handler.ServeHTTP(&w, r)
-
- if w.status == 200 {
- token = w.headerMap.Get("Etag")
- if token == "" {
- token = w.headerMap.Get("Last-Modified")
- }
- token = strings.Trim(token, `" `)
- token = strings.Map(sanitizeTokenRune, token)
- }
-
- cb.mu.Lock()
- cb.tokens[path] = token
- cb.mu.Unlock()
-
- return token
-}
-
-// AppendQueryParam appends the token as a query parameter to path.
-func (cb *CacheBusters) AppendQueryParam(path string, name string) string {
- token := cb.Get(path)
- if token == "" {
- return path
- }
- return path + "?" + name + "=" + token
-}
diff --git a/vendor/github.com/golang/gddo/httputil/header/header.go b/vendor/github.com/golang/gddo/httputil/header/header.go
deleted file mode 100644
index 0f1572e3f95..00000000000
--- a/vendor/github.com/golang/gddo/httputil/header/header.go
+++ /dev/null
@@ -1,298 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file or at
-// https://developers.google.com/open-source/licenses/bsd.
-
-// Package header provides functions for parsing HTTP headers.
-package header
-
-import (
- "net/http"
- "strings"
- "time"
-)
-
-// Octet types from RFC 2616.
-var octetTypes [256]octetType
-
-type octetType byte
-
-const (
- isToken octetType = 1 << iota
- isSpace
-)
-
-func init() {
- // OCTET =
- // CHAR =
- // CTL =
- // CR =
- // LF =
- // SP =
- // HT =
- // <"> =
- // CRLF = CR LF
- // LWS = [CRLF] 1*( SP | HT )
- // TEXT =
- // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
- // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
- // token = 1*
- // qdtext = >
-
- for c := 0; c < 256; c++ {
- var t octetType
- isCtl := c <= 31 || c == 127
- isChar := 0 <= c && c <= 127
- isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
- if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
- t |= isSpace
- }
- if isChar && !isCtl && !isSeparator {
- t |= isToken
- }
- octetTypes[c] = t
- }
-}
-
-// Copy returns a shallow copy of the header.
-func Copy(header http.Header) http.Header {
- h := make(http.Header)
- for k, vs := range header {
- h[k] = vs
- }
- return h
-}
-
-var timeLayouts = []string{"Mon, 02 Jan 2006 15:04:05 GMT", time.RFC850, time.ANSIC}
-
-// ParseTime parses the header as time. The zero value is returned if the
-// header is not present or there is an error parsing the
-// header.
-func ParseTime(header http.Header, key string) time.Time {
- if s := header.Get(key); s != "" {
- for _, layout := range timeLayouts {
- if t, err := time.Parse(layout, s); err == nil {
- return t.UTC()
- }
- }
- }
- return time.Time{}
-}
-
-// ParseList parses a comma separated list of values. Commas are ignored in
-// quoted strings. Quoted values are not unescaped or unquoted. Whitespace is
-// trimmed.
-func ParseList(header http.Header, key string) []string {
- var result []string
- for _, s := range header[http.CanonicalHeaderKey(key)] {
- begin := 0
- end := 0
- escape := false
- quote := false
- for i := 0; i < len(s); i++ {
- b := s[i]
- switch {
- case escape:
- escape = false
- end = i + 1
- case quote:
- switch b {
- case '\\':
- escape = true
- case '"':
- quote = false
- }
- end = i + 1
- case b == '"':
- quote = true
- end = i + 1
- case octetTypes[b]&isSpace != 0:
- if begin == end {
- begin = i + 1
- end = begin
- }
- case b == ',':
- if begin < end {
- result = append(result, s[begin:end])
- }
- begin = i + 1
- end = begin
- default:
- end = i + 1
- }
- }
- if begin < end {
- result = append(result, s[begin:end])
- }
- }
- return result
-}
-
-// ParseValueAndParams parses a comma separated list of values with optional
-// semicolon separated name-value pairs. Content-Type and Content-Disposition
-// headers are in this format.
-func ParseValueAndParams(header http.Header, key string) (value string, params map[string]string) {
- params = make(map[string]string)
- s := header.Get(key)
- value, s = expectTokenSlash(s)
- if value == "" {
- return
- }
- value = strings.ToLower(value)
- s = skipSpace(s)
- for strings.HasPrefix(s, ";") {
- var pkey string
- pkey, s = expectToken(skipSpace(s[1:]))
- if pkey == "" {
- return
- }
- if !strings.HasPrefix(s, "=") {
- return
- }
- var pvalue string
- pvalue, s = expectTokenOrQuoted(s[1:])
- if pvalue == "" {
- return
- }
- pkey = strings.ToLower(pkey)
- params[pkey] = pvalue
- s = skipSpace(s)
- }
- return
-}
-
-// AcceptSpec describes an Accept* header.
-type AcceptSpec struct {
- Value string
- Q float64
-}
-
-// ParseAccept parses Accept* headers.
-func ParseAccept(header http.Header, key string) (specs []AcceptSpec) {
-loop:
- for _, s := range header[key] {
- for {
- var spec AcceptSpec
- spec.Value, s = expectTokenSlash(s)
- if spec.Value == "" {
- continue loop
- }
- spec.Q = 1.0
- s = skipSpace(s)
- if strings.HasPrefix(s, ";") {
- s = skipSpace(s[1:])
- if !strings.HasPrefix(s, "q=") {
- continue loop
- }
- spec.Q, s = expectQuality(s[2:])
- if spec.Q < 0.0 {
- continue loop
- }
- }
- specs = append(specs, spec)
- s = skipSpace(s)
- if !strings.HasPrefix(s, ",") {
- continue loop
- }
- s = skipSpace(s[1:])
- }
- }
- return
-}
-
-func skipSpace(s string) (rest string) {
- i := 0
- for ; i < len(s); i++ {
- if octetTypes[s[i]]&isSpace == 0 {
- break
- }
- }
- return s[i:]
-}
-
-func expectToken(s string) (token, rest string) {
- i := 0
- for ; i < len(s); i++ {
- if octetTypes[s[i]]&isToken == 0 {
- break
- }
- }
- return s[:i], s[i:]
-}
-
-func expectTokenSlash(s string) (token, rest string) {
- i := 0
- for ; i < len(s); i++ {
- b := s[i]
- if (octetTypes[b]&isToken == 0) && b != '/' {
- break
- }
- }
- return s[:i], s[i:]
-}
-
-func expectQuality(s string) (q float64, rest string) {
- switch {
- case len(s) == 0:
- return -1, ""
- case s[0] == '0':
- q = 0
- case s[0] == '1':
- q = 1
- default:
- return -1, ""
- }
- s = s[1:]
- if !strings.HasPrefix(s, ".") {
- return q, s
- }
- s = s[1:]
- i := 0
- n := 0
- d := 1
- for ; i < len(s); i++ {
- b := s[i]
- if b < '0' || b > '9' {
- break
- }
- n = n*10 + int(b) - '0'
- d *= 10
- }
- return q + float64(n)/float64(d), s[i:]
-}
-
-func expectTokenOrQuoted(s string) (value string, rest string) {
- if !strings.HasPrefix(s, "\"") {
- return expectToken(s)
- }
- s = s[1:]
- for i := 0; i < len(s); i++ {
- switch s[i] {
- case '"':
- return s[:i], s[i+1:]
- case '\\':
- p := make([]byte, len(s)-1)
- j := copy(p, s[:i])
- escape := true
- for i = i + 1; i < len(s); i++ {
- b := s[i]
- switch {
- case escape:
- escape = false
- p[j] = b
- j++
- case b == '\\':
- escape = true
- case b == '"':
- return string(p[:j]), s[i+1:]
- default:
- p[j] = b
- j++
- }
- }
- return "", ""
- }
- }
- return "", ""
-}
diff --git a/vendor/github.com/golang/gddo/httputil/httputil.go b/vendor/github.com/golang/gddo/httputil/httputil.go
deleted file mode 100644
index a03717c073f..00000000000
--- a/vendor/github.com/golang/gddo/httputil/httputil.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file or at
-// https://developers.google.com/open-source/licenses/bsd.
-
-// Package httputil is a toolkit for the Go net/http package.
-package httputil
-
-import (
- "net"
- "net/http"
-)
-
-// StripPort removes the port specification from an address.
-func StripPort(s string) string {
- if h, _, err := net.SplitHostPort(s); err == nil {
- s = h
- }
- return s
-}
-
-// Error defines a type for a function that accepts a ResponseWriter for
-// a Request with the HTTP status code and error.
-type Error func(w http.ResponseWriter, r *http.Request, status int, err error)
diff --git a/vendor/github.com/golang/gddo/httputil/negotiate.go b/vendor/github.com/golang/gddo/httputil/negotiate.go
deleted file mode 100644
index 6af3e4ca614..00000000000
--- a/vendor/github.com/golang/gddo/httputil/negotiate.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file or at
-// https://developers.google.com/open-source/licenses/bsd.
-
-package httputil
-
-import (
- "github.com/golang/gddo/httputil/header"
- "net/http"
- "strings"
-)
-
-// NegotiateContentEncoding returns the best offered content encoding for the
-// request's Accept-Encoding header. If two offers match with equal weight and
-// then the offer earlier in the list is preferred. If no offers are
-// acceptable, then "" is returned.
-func NegotiateContentEncoding(r *http.Request, offers []string) string {
- bestOffer := "identity"
- bestQ := -1.0
- specs := header.ParseAccept(r.Header, "Accept-Encoding")
- for _, offer := range offers {
- for _, spec := range specs {
- if spec.Q > bestQ &&
- (spec.Value == "*" || spec.Value == offer) {
- bestQ = spec.Q
- bestOffer = offer
- }
- }
- }
- if bestQ == 0 {
- bestOffer = ""
- }
- return bestOffer
-}
-
-// NegotiateContentType returns the best offered content type for the request's
-// Accept header. If two offers match with equal weight, then the more specific
-// offer is preferred. For example, text/* trumps */*. If two offers match
-// with equal weight and specificity, then the offer earlier in the list is
-// preferred. If no offers match, then defaultOffer is returned.
-func NegotiateContentType(r *http.Request, offers []string, defaultOffer string) string {
- bestOffer := defaultOffer
- bestQ := -1.0
- bestWild := 3
- specs := header.ParseAccept(r.Header, "Accept")
- for _, offer := range offers {
- for _, spec := range specs {
- switch {
- case spec.Q == 0.0:
- // ignore
- case spec.Q < bestQ:
- // better match found
- case spec.Value == "*/*":
- if spec.Q > bestQ || bestWild > 2 {
- bestQ = spec.Q
- bestWild = 2
- bestOffer = offer
- }
- case strings.HasSuffix(spec.Value, "/*"):
- if strings.HasPrefix(offer, spec.Value[:len(spec.Value)-1]) &&
- (spec.Q > bestQ || bestWild > 1) {
- bestQ = spec.Q
- bestWild = 1
- bestOffer = offer
- }
- default:
- if spec.Value == offer &&
- (spec.Q > bestQ || bestWild > 0) {
- bestQ = spec.Q
- bestWild = 0
- bestOffer = offer
- }
- }
- }
- }
- return bestOffer
-}
diff --git a/vendor/github.com/golang/gddo/httputil/respbuf.go b/vendor/github.com/golang/gddo/httputil/respbuf.go
deleted file mode 100644
index 051af2114b1..00000000000
--- a/vendor/github.com/golang/gddo/httputil/respbuf.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file or at
-// https://developers.google.com/open-source/licenses/bsd.
-
-package httputil
-
-import (
- "bytes"
- "net/http"
- "strconv"
-)
-
-// ResponseBuffer is the current response being composed by its owner.
-// It implements http.ResponseWriter and io.WriterTo.
-type ResponseBuffer struct {
- buf bytes.Buffer
- status int
- header http.Header
-}
-
-// Write implements the http.ResponseWriter interface.
-func (rb *ResponseBuffer) Write(p []byte) (int, error) {
- return rb.buf.Write(p)
-}
-
-// WriteHeader implements the http.ResponseWriter interface.
-func (rb *ResponseBuffer) WriteHeader(status int) {
- rb.status = status
-}
-
-// Header implements the http.ResponseWriter interface.
-func (rb *ResponseBuffer) Header() http.Header {
- if rb.header == nil {
- rb.header = make(http.Header)
- }
- return rb.header
-}
-
-// WriteTo implements the io.WriterTo interface.
-func (rb *ResponseBuffer) WriteTo(w http.ResponseWriter) error {
- for k, v := range rb.header {
- w.Header()[k] = v
- }
- if rb.buf.Len() > 0 {
- w.Header().Set("Content-Length", strconv.Itoa(rb.buf.Len()))
- }
- if rb.status != 0 {
- w.WriteHeader(rb.status)
- }
- if rb.buf.Len() > 0 {
- if _, err := w.Write(rb.buf.Bytes()); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/golang/gddo/httputil/static.go b/vendor/github.com/golang/gddo/httputil/static.go
deleted file mode 100644
index 6610dde0da8..00000000000
--- a/vendor/github.com/golang/gddo/httputil/static.go
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file or at
-// https://developers.google.com/open-source/licenses/bsd.
-
-package httputil
-
-import (
- "bytes"
- "crypto/sha1"
- "errors"
- "fmt"
- "github.com/golang/gddo/httputil/header"
- "io"
- "io/ioutil"
- "mime"
- "net/http"
- "os"
- "path"
- "path/filepath"
- "strconv"
- "strings"
- "sync"
- "time"
-)
-
-// StaticServer serves static files.
-type StaticServer struct {
- // Dir specifies the location of the directory containing the files to serve.
- Dir string
-
- // MaxAge specifies the maximum age for the cache control and expiration
- // headers.
- MaxAge time.Duration
-
- // Error specifies the function used to generate error responses. If Error
- // is nil, then http.Error is used to generate error responses.
- Error Error
-
- // MIMETypes is a map from file extensions to MIME types.
- MIMETypes map[string]string
-
- mu sync.Mutex
- etags map[string]string
-}
-
-func (ss *StaticServer) resolve(fname string) string {
- if path.IsAbs(fname) {
- panic("Absolute path not allowed when creating a StaticServer handler")
- }
- dir := ss.Dir
- if dir == "" {
- dir = "."
- }
- fname = filepath.FromSlash(fname)
- return filepath.Join(dir, fname)
-}
-
-func (ss *StaticServer) mimeType(fname string) string {
- ext := path.Ext(fname)
- var mimeType string
- if ss.MIMETypes != nil {
- mimeType = ss.MIMETypes[ext]
- }
- if mimeType == "" {
- mimeType = mime.TypeByExtension(ext)
- }
- if mimeType == "" {
- mimeType = "application/octet-stream"
- }
- return mimeType
-}
-
-func (ss *StaticServer) openFile(fname string) (io.ReadCloser, int64, string, error) {
- f, err := os.Open(fname)
- if err != nil {
- return nil, 0, "", err
- }
- fi, err := f.Stat()
- if err != nil {
- f.Close()
- return nil, 0, "", err
- }
- const modeType = os.ModeDir | os.ModeSymlink | os.ModeNamedPipe | os.ModeSocket | os.ModeDevice
- if fi.Mode()&modeType != 0 {
- f.Close()
- return nil, 0, "", errors.New("not a regular file")
- }
- return f, fi.Size(), ss.mimeType(fname), nil
-}
-
-// FileHandler returns a handler that serves a single file. The file is
-// specified by a slash separated path relative to the static server's Dir
-// field.
-func (ss *StaticServer) FileHandler(fileName string) http.Handler {
- id := fileName
- fileName = ss.resolve(fileName)
- return &staticHandler{
- ss: ss,
- id: func(_ string) string { return id },
- open: func(_ string) (io.ReadCloser, int64, string, error) { return ss.openFile(fileName) },
- }
-}
-
-// DirectoryHandler returns a handler that serves files from a directory tree.
-// The directory is specified by a slash separated path relative to the static
-// server's Dir field.
-func (ss *StaticServer) DirectoryHandler(prefix, dirName string) http.Handler {
- if !strings.HasSuffix(prefix, "/") {
- prefix += "/"
- }
- idBase := dirName
- dirName = ss.resolve(dirName)
- return &staticHandler{
- ss: ss,
- id: func(p string) string {
- if !strings.HasPrefix(p, prefix) {
- return "."
- }
- return path.Join(idBase, p[len(prefix):])
- },
- open: func(p string) (io.ReadCloser, int64, string, error) {
- if !strings.HasPrefix(p, prefix) {
- return nil, 0, "", errors.New("request url does not match directory prefix")
- }
- p = p[len(prefix):]
- return ss.openFile(filepath.Join(dirName, filepath.FromSlash(p)))
- },
- }
-}
-
-// FilesHandler returns a handler that serves the concatentation of the
-// specified files. The files are specified by slash separated paths relative
-// to the static server's Dir field.
-func (ss *StaticServer) FilesHandler(fileNames ...string) http.Handler {
-
- // todo: cache concatenated files on disk and serve from there.
-
- mimeType := ss.mimeType(fileNames[0])
- var buf []byte
- var openErr error
-
- for _, fileName := range fileNames {
- p, err := ioutil.ReadFile(ss.resolve(fileName))
- if err != nil {
- openErr = err
- buf = nil
- break
- }
- buf = append(buf, p...)
- }
-
- id := strings.Join(fileNames, " ")
-
- return &staticHandler{
- ss: ss,
- id: func(_ string) string { return id },
- open: func(p string) (io.ReadCloser, int64, string, error) {
- return ioutil.NopCloser(bytes.NewReader(buf)), int64(len(buf)), mimeType, openErr
- },
- }
-}
-
-type staticHandler struct {
- id func(fname string) string
- open func(p string) (io.ReadCloser, int64, string, error)
- ss *StaticServer
-}
-
-func (h *staticHandler) error(w http.ResponseWriter, r *http.Request, status int, err error) {
- http.Error(w, http.StatusText(status), status)
-}
-
-func (h *staticHandler) etag(p string) (string, error) {
- id := h.id(p)
-
- h.ss.mu.Lock()
- if h.ss.etags == nil {
- h.ss.etags = make(map[string]string)
- }
- etag := h.ss.etags[id]
- h.ss.mu.Unlock()
-
- if etag != "" {
- return etag, nil
- }
-
- // todo: if a concurrent goroutine is calculating the hash, then wait for
- // it instead of computing it again here.
-
- rc, _, _, err := h.open(p)
- if err != nil {
- return "", err
- }
-
- defer rc.Close()
-
- w := sha1.New()
- _, err = io.Copy(w, rc)
- if err != nil {
- return "", err
- }
-
- etag = fmt.Sprintf(`"%x"`, w.Sum(nil))
-
- h.ss.mu.Lock()
- h.ss.etags[id] = etag
- h.ss.mu.Unlock()
-
- return etag, nil
-}
-
-func (h *staticHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- p := path.Clean(r.URL.Path)
- if p != r.URL.Path {
- http.Redirect(w, r, p, 301)
- return
- }
-
- etag, err := h.etag(p)
- if err != nil {
- h.error(w, r, http.StatusNotFound, err)
- return
- }
-
- maxAge := h.ss.MaxAge
- if maxAge == 0 {
- maxAge = 24 * time.Hour
- }
- if r.FormValue("v") != "" {
- maxAge = 365 * 24 * time.Hour
- }
-
- cacheControl := fmt.Sprintf("public, max-age=%d", maxAge/time.Second)
-
- for _, e := range header.ParseList(r.Header, "If-None-Match") {
- if e == etag {
- w.Header().Set("Cache-Control", cacheControl)
- w.Header().Set("Etag", etag)
- w.WriteHeader(http.StatusNotModified)
- return
- }
- }
-
- rc, cl, ct, err := h.open(p)
- if err != nil {
- h.error(w, r, http.StatusNotFound, err)
- return
- }
- defer rc.Close()
-
- w.Header().Set("Cache-Control", cacheControl)
- w.Header().Set("Etag", etag)
- if ct != "" {
- w.Header().Set("Content-Type", ct)
- }
- if cl != 0 {
- w.Header().Set("Content-Length", strconv.FormatInt(cl, 10))
- }
- w.WriteHeader(http.StatusOK)
- if r.Method != "HEAD" {
- io.Copy(w, rc)
- }
-}
diff --git a/vendor/github.com/golang/gddo/httputil/transport.go b/vendor/github.com/golang/gddo/httputil/transport.go
deleted file mode 100644
index fdad3b47809..00000000000
--- a/vendor/github.com/golang/gddo/httputil/transport.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file or at
-// https://developers.google.com/open-source/licenses/bsd.
-
-// This file implements a http.RoundTripper that authenticates
-// requests issued against api.github.com endpoint.
-
-package httputil
-
-import (
- "net/http"
- "net/url"
-)
-
-// AuthTransport is an implementation of http.RoundTripper that authenticates
-// with the GitHub API.
-//
-// When both a token and client credentials are set, the latter is preferred.
-type AuthTransport struct {
- UserAgent string
- GithubToken string
- GithubClientID string
- GithubClientSecret string
- Base http.RoundTripper
-}
-
-// RoundTrip implements the http.RoundTripper interface.
-func (t *AuthTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- var reqCopy *http.Request
- if t.UserAgent != "" {
- reqCopy = copyRequest(req)
- reqCopy.Header.Set("User-Agent", t.UserAgent)
- }
- if req.URL.Host == "api.github.com" && req.URL.Scheme == "https" {
- switch {
- case t.GithubClientID != "" && t.GithubClientSecret != "":
- if reqCopy == nil {
- reqCopy = copyRequest(req)
- }
- if reqCopy.URL.RawQuery == "" {
- reqCopy.URL.RawQuery = "client_id=" + t.GithubClientID + "&client_secret=" + t.GithubClientSecret
- } else {
- reqCopy.URL.RawQuery += "&client_id=" + t.GithubClientID + "&client_secret=" + t.GithubClientSecret
- }
- case t.GithubToken != "":
- if reqCopy == nil {
- reqCopy = copyRequest(req)
- }
- reqCopy.Header.Set("Authorization", "token "+t.GithubToken)
- }
- }
- if reqCopy != nil {
- return t.base().RoundTrip(reqCopy)
- }
- return t.base().RoundTrip(req)
-}
-
-// CancelRequest cancels an in-flight request by closing its connection.
-func (t *AuthTransport) CancelRequest(req *http.Request) {
- type canceler interface {
- CancelRequest(req *http.Request)
- }
- if cr, ok := t.base().(canceler); ok {
- cr.CancelRequest(req)
- }
-}
-
-func (t *AuthTransport) base() http.RoundTripper {
- if t.Base != nil {
- return t.Base
- }
- return http.DefaultTransport
-}
-
-func copyRequest(req *http.Request) *http.Request {
- req2 := new(http.Request)
- *req2 = *req
- req2.URL = new(url.URL)
- *req2.URL = *req.URL
- req2.Header = make(http.Header, len(req.Header))
- for k, s := range req.Header {
- req2.Header[k] = append([]string(nil), s...)
- }
- return req2
-}
diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE
index 74487567632..1eb75ef68e4 100644
--- a/vendor/github.com/klauspost/compress/LICENSE
+++ b/vendor/github.com/klauspost/compress/LICENSE
@@ -1,4 +1,5 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2019 Klaus Post. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go
index b9db204f59d..f65eb3909cf 100644
--- a/vendor/github.com/klauspost/compress/fse/bitreader.go
+++ b/vendor/github.com/klauspost/compress/fse/bitreader.go
@@ -6,6 +6,7 @@
package fse
import (
+ "encoding/binary"
"errors"
"io"
)
@@ -34,8 +35,12 @@ func (b *bitReader) init(in []byte) error {
}
b.bitsRead = 64
b.value = 0
- b.fill()
- b.fill()
+ if len(in) >= 8 {
+ b.fillFastStart()
+ } else {
+ b.fill()
+ b.fill()
+ }
b.bitsRead += 8 - uint8(highBits(uint32(v)))
return nil
}
@@ -63,8 +68,9 @@ func (b *bitReader) fillFast() {
if b.bitsRead < 32 {
return
}
- // Do single re-slice to avoid bounds checks.
- v := b.in[b.off-4 : b.off]
+ // 2 bounds checks.
+ v := b.in[b.off-4:]
+ v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low)
b.bitsRead -= 32
@@ -77,7 +83,8 @@ func (b *bitReader) fill() {
return
}
if b.off > 4 {
- v := b.in[b.off-4 : b.off]
+ v := b.in[b.off-4:]
+ v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low)
b.bitsRead -= 32
@@ -91,9 +98,17 @@ func (b *bitReader) fill() {
}
}
+// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
+func (b *bitReader) fillFastStart() {
+ // Do single re-slice to avoid bounds checks.
+ b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+ b.bitsRead = 0
+ b.off -= 8
+}
+
// finished returns true if all bits have been read from the bit stream.
func (b *bitReader) finished() bool {
- return b.off == 0 && b.bitsRead >= 64
+ return b.bitsRead >= 64 && b.off == 0
}
// close the bitstream and returns an error if out-of-buffer reads occurred.
diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go
index f228a46cdf6..abade2d6052 100644
--- a/vendor/github.com/klauspost/compress/fse/bytereader.go
+++ b/vendor/github.com/klauspost/compress/fse/bytereader.go
@@ -25,19 +25,10 @@ func (b *byteReader) advance(n uint) {
b.off += int(n)
}
-// Int32 returns a little endian int32 starting at current offset.
-func (b byteReader) Int32() int32 {
- b2 := b.b[b.off : b.off+4 : b.off+4]
- v3 := int32(b2[3])
- v2 := int32(b2[2])
- v1 := int32(b2[1])
- v0 := int32(b2[0])
- return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
-}
-
// Uint32 returns a little endian uint32 starting at current offset.
func (b byteReader) Uint32() uint32 {
- b2 := b.b[b.off : b.off+4 : b.off+4]
+ b2 := b.b[b.off:]
+ b2 = b2[:4]
v3 := uint32(b2[3])
v2 := uint32(b2[2])
v1 := uint32(b2[1])
diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go
index b69237c9b8f..0d31f5ebc9a 100644
--- a/vendor/github.com/klauspost/compress/fse/compress.go
+++ b/vendor/github.com/klauspost/compress/fse/compress.go
@@ -301,7 +301,7 @@ func (s *Scratch) writeCount() error {
out[outP+1] = byte(bitStream >> 8)
outP += (bitCount + 7) / 8
- if uint16(charnum) > s.symbolLen {
+ if charnum > s.symbolLen {
return errors.New("internal error: charnum > s.symbolLen")
}
s.Out = out[:outP]
@@ -331,7 +331,7 @@ type cTable struct {
func (s *Scratch) allocCtable() {
tableSize := 1 << s.actualTableLog
// get tableSymbol that is big enough.
- if cap(s.ct.tableSymbol) < int(tableSize) {
+ if cap(s.ct.tableSymbol) < tableSize {
s.ct.tableSymbol = make([]byte, tableSize)
}
s.ct.tableSymbol = s.ct.tableSymbol[:tableSize]
@@ -565,8 +565,8 @@ func (s *Scratch) normalizeCount2() error {
distributed uint32
total = uint32(s.br.remain())
tableLog = s.actualTableLog
- lowThreshold = uint32(total >> tableLog)
- lowOne = uint32((total * 3) >> (tableLog + 1))
+ lowThreshold = total >> tableLog
+ lowOne = (total * 3) >> (tableLog + 1)
)
for i, cnt := range s.count[:s.symbolLen] {
if cnt == 0 {
@@ -591,7 +591,7 @@ func (s *Scratch) normalizeCount2() error {
if (total / toDistribute) > lowOne {
// risk of rounding to zero
- lowOne = uint32((total * 3) / (toDistribute * 2))
+ lowOne = (total * 3) / (toDistribute * 2)
for i, cnt := range s.count[:s.symbolLen] {
if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) {
s.norm[i] = 1
diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go
index 202f36a99b3..926f5f15356 100644
--- a/vendor/github.com/klauspost/compress/fse/decompress.go
+++ b/vendor/github.com/klauspost/compress/fse/decompress.go
@@ -172,7 +172,7 @@ type decSymbol struct {
// allocDtable will allocate decoding tables if they are not big enough.
func (s *Scratch) allocDtable() {
tableSize := 1 << s.actualTableLog
- if cap(s.decTable) < int(tableSize) {
+ if cap(s.decTable) < tableSize {
s.decTable = make([]decSymbol, tableSize)
}
s.decTable = s.decTable[:tableSize]
@@ -243,7 +243,7 @@ func (s *Scratch) buildDtable() error {
nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
s.decTable[u].nbBits = nBits
newState := (nextState << nBits) - tableSize
- if newState > tableSize {
+ if newState >= tableSize {
return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
}
if newState == uint16(u) && nBits == 0 {
@@ -281,8 +281,12 @@ func (s *Scratch) decompress() error {
tmp[off+2] = s1.nextFast()
tmp[off+3] = s2.nextFast()
off += 4
+ // When off is 0, we have overflowed and should write.
if off == 0 {
s.Out = append(s.Out, tmp...)
+ if len(s.Out) >= s.DecompressLimit {
+ return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
+ }
}
}
} else {
@@ -296,7 +300,7 @@ func (s *Scratch) decompress() error {
off += 4
if off == 0 {
s.Out = append(s.Out, tmp...)
- off = 0
+ // When off is 0, we have overflowed and should write.
if len(s.Out) >= s.DecompressLimit {
return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
}
@@ -336,7 +340,7 @@ type decoder struct {
func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) {
d.dt = dt
d.br = in
- d.state = uint16(in.getBits(tableLog))
+ d.state = in.getBits(tableLog)
}
// next returns the next symbol and sets the next state.
diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go
index 075357b5b16..535cbadfdea 100644
--- a/vendor/github.com/klauspost/compress/fse/fse.go
+++ b/vendor/github.com/klauspost/compress/fse/fse.go
@@ -44,18 +44,14 @@ var (
// Scratch provides temporary storage for compression and decompression.
type Scratch struct {
// Private
- count [maxSymbolValue + 1]uint32
- norm [maxSymbolValue + 1]int16
- symbolLen uint16 // Length of active part of the symbol table.
- actualTableLog uint8 // Selected tablelog.
- br byteReader
- bits bitReader
- bw bitWriter
- ct cTable // Compression tables.
- decTable []decSymbol // Decompression table.
- zeroBits bool // no bits has prob > 50%.
- clearCount bool // clear count
- maxCount int // count of the most probable symbol
+ count [maxSymbolValue + 1]uint32
+ norm [maxSymbolValue + 1]int16
+ br byteReader
+ bits bitReader
+ bw bitWriter
+ ct cTable // Compression tables.
+ decTable []decSymbol // Decompression table.
+ maxCount int // count of the most probable symbol
// Per block parameters.
// These can be used to override compression parameters of the block.
@@ -68,17 +64,22 @@ type Scratch struct {
// and allocation will be avoided.
Out []byte
- // MaxSymbolValue will override the maximum symbol value of the next block.
- MaxSymbolValue uint8
-
- // TableLog will attempt to override the tablelog for the next block.
- TableLog uint8
-
// DecompressLimit limits the maximum decoded size acceptable.
// If > 0 decompression will stop when approximately this many bytes
// has been decoded.
// If 0, maximum size will be 2GB.
DecompressLimit int
+
+ symbolLen uint16 // Length of active part of the symbol table.
+ actualTableLog uint8 // Selected tablelog.
+ zeroBits bool // no bits has prob > 50%.
+ clearCount bool // clear count
+
+ // MaxSymbolValue will override the maximum symbol value of the next block.
+ MaxSymbolValue uint8
+
+ // TableLog will attempt to override the tablelog for the next block.
+ TableLog uint8
}
// Histogram allows to populate the histogram and skip that step in the compression,
diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md
index 0a8448ce9f9..8b6e5c66383 100644
--- a/vendor/github.com/klauspost/compress/huff0/README.md
+++ b/vendor/github.com/klauspost/compress/huff0/README.md
@@ -12,11 +12,11 @@ but it can be used as a secondary step to compressors (like Snappy) that does no
* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0)
-THIS PACKAGE IS NOT CONSIDERED STABLE AND API OR ENCODING MAY CHANGE IN THE FUTURE.
-
## News
- * Mar 2018: First implementation released. Consider this beta software for now.
+This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package.
+
+This ensures that most functionality is well tested.
# Usage
@@ -75,6 +75,8 @@ which can be given to the decompressor.
Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X)
or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function.
+For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size.
+
You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back
your input was likely corrupted.
@@ -84,4 +86,4 @@ There are no integrity checks, so relying on errors from the decompressor does n
# Contributing
Contributions are always welcome. Be aware that adding public functions will require good justification and breaking
-changes will likely not be accepted. If in doubt open an issue before writing the PR.
\ No newline at end of file
+changes will likely not be accepted. If in doubt open an issue before writing the PR.
diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go
index 7d0903c7010..a4979e8868a 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitreader.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go
@@ -6,6 +6,7 @@
package huff0
import (
+ "encoding/binary"
"errors"
"io"
)
@@ -34,29 +35,16 @@ func (b *bitReader) init(in []byte) error {
}
b.bitsRead = 64
b.value = 0
- b.fill()
- b.fill()
+ if len(in) >= 8 {
+ b.fillFastStart()
+ } else {
+ b.fill()
+ b.fill()
+ }
b.bitsRead += 8 - uint8(highBit32(uint32(v)))
return nil
}
-// getBits will return n bits. n can be 0.
-func (b *bitReader) getBits(n uint8) uint16 {
- if n == 0 || b.bitsRead >= 64 {
- return 0
- }
- return b.getBitsFast(n)
-}
-
-// getBitsFast requires that at least one bit is requested every time.
-// There are no checks if the buffer is filled.
-func (b *bitReader) getBitsFast(n uint8) uint16 {
- const regMask = 64 - 1
- v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
- b.bitsRead += n
- return v
-}
-
// peekBitsFast requires that at least one bit is requested every time.
// There are no checks if the buffer is filled.
func (b *bitReader) peekBitsFast(n uint8) uint16 {
@@ -71,21 +59,36 @@ func (b *bitReader) fillFast() {
if b.bitsRead < 32 {
return
}
- // Do single re-slice to avoid bounds checks.
+
+ // 2 bounds checks.
v := b.in[b.off-4 : b.off]
+ v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low)
b.bitsRead -= 32
b.off -= 4
}
+func (b *bitReader) advance(n uint8) {
+ b.bitsRead += n
+}
+
+// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
+func (b *bitReader) fillFastStart() {
+ // Do single re-slice to avoid bounds checks.
+ b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+ b.bitsRead = 0
+ b.off -= 8
+}
+
// fill() will make sure at least 32 bits are available.
func (b *bitReader) fill() {
if b.bitsRead < 32 {
return
}
if b.off > 4 {
- v := b.in[b.off-4 : b.off]
+ v := b.in[b.off-4:]
+ v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low)
b.bitsRead -= 32
@@ -113,3 +116,214 @@ func (b *bitReader) close() error {
}
return nil
}
+
+// bitReader reads a bitstream in reverse.
+// The last set bit indicates the start of the stream and is used
+// for aligning the input.
+type bitReaderBytes struct {
+ in []byte
+ off uint // next byte to read is at in[off - 1]
+ value uint64
+ bitsRead uint8
+}
+
+// init initializes and resets the bit reader.
+func (b *bitReaderBytes) init(in []byte) error {
+ if len(in) < 1 {
+ return errors.New("corrupt stream: too short")
+ }
+ b.in = in
+ b.off = uint(len(in))
+ // The highest bit of the last byte indicates where to start
+ v := in[len(in)-1]
+ if v == 0 {
+ return errors.New("corrupt stream, did not find end of stream")
+ }
+ b.bitsRead = 64
+ b.value = 0
+ if len(in) >= 8 {
+ b.fillFastStart()
+ } else {
+ b.fill()
+ b.fill()
+ }
+ b.advance(8 - uint8(highBit32(uint32(v))))
+ return nil
+}
+
+// peekBitsFast requires that at least one bit is requested every time.
+// There are no checks if the buffer is filled.
+func (b *bitReaderBytes) peekByteFast() uint8 {
+ got := uint8(b.value >> 56)
+ return got
+}
+
+func (b *bitReaderBytes) advance(n uint8) {
+ b.bitsRead += n
+ b.value <<= n & 63
+}
+
+// fillFast() will make sure at least 32 bits are available.
+// There must be at least 4 bytes available.
+func (b *bitReaderBytes) fillFast() {
+ if b.bitsRead < 32 {
+ return
+ }
+
+ // 2 bounds checks.
+ v := b.in[b.off-4 : b.off]
+ v = v[:4]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ b.value |= uint64(low) << (b.bitsRead - 32)
+ b.bitsRead -= 32
+ b.off -= 4
+}
+
+// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read.
+func (b *bitReaderBytes) fillFastStart() {
+ // Do single re-slice to avoid bounds checks.
+ b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+ b.bitsRead = 0
+ b.off -= 8
+}
+
+// fill() will make sure at least 32 bits are available.
+func (b *bitReaderBytes) fill() {
+ if b.bitsRead < 32 {
+ return
+ }
+ if b.off > 4 {
+ v := b.in[b.off-4:]
+ v = v[:4]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ b.value |= uint64(low) << (b.bitsRead - 32)
+ b.bitsRead -= 32
+ b.off -= 4
+ return
+ }
+ for b.off > 0 {
+ b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8)
+ b.bitsRead -= 8
+ b.off--
+ }
+}
+
+// finished returns true if all bits have been read from the bit stream.
+func (b *bitReaderBytes) finished() bool {
+ return b.off == 0 && b.bitsRead >= 64
+}
+
+// close the bitstream and returns an error if out-of-buffer reads occurred.
+func (b *bitReaderBytes) close() error {
+ // Release reference.
+ b.in = nil
+ if b.bitsRead > 64 {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+
+// bitReaderShifted reads a bitstream in reverse.
+// The last set bit indicates the start of the stream and is used
+// for aligning the input.
+type bitReaderShifted struct {
+ in []byte
+ off uint // next byte to read is at in[off - 1]
+ value uint64
+ bitsRead uint8
+}
+
+// init initializes and resets the bit reader.
+func (b *bitReaderShifted) init(in []byte) error {
+ if len(in) < 1 {
+ return errors.New("corrupt stream: too short")
+ }
+ b.in = in
+ b.off = uint(len(in))
+ // The highest bit of the last byte indicates where to start
+ v := in[len(in)-1]
+ if v == 0 {
+ return errors.New("corrupt stream, did not find end of stream")
+ }
+ b.bitsRead = 64
+ b.value = 0
+ if len(in) >= 8 {
+ b.fillFastStart()
+ } else {
+ b.fill()
+ b.fill()
+ }
+ b.advance(8 - uint8(highBit32(uint32(v))))
+ return nil
+}
+
+// peekBitsFast requires that at least one bit is requested every time.
+// There are no checks if the buffer is filled.
+func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
+ return uint16(b.value >> ((64 - n) & 63))
+}
+
+func (b *bitReaderShifted) advance(n uint8) {
+ b.bitsRead += n
+ b.value <<= n & 63
+}
+
+// fillFast() will make sure at least 32 bits are available.
+// There must be at least 4 bytes available.
+func (b *bitReaderShifted) fillFast() {
+ if b.bitsRead < 32 {
+ return
+ }
+
+ // 2 bounds checks.
+ v := b.in[b.off-4 : b.off]
+ v = v[:4]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
+ b.bitsRead -= 32
+ b.off -= 4
+}
+
+// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read.
+func (b *bitReaderShifted) fillFastStart() {
+ // Do single re-slice to avoid bounds checks.
+ b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+ b.bitsRead = 0
+ b.off -= 8
+}
+
+// fill() will make sure at least 32 bits are available.
+func (b *bitReaderShifted) fill() {
+ if b.bitsRead < 32 {
+ return
+ }
+ if b.off > 4 {
+ v := b.in[b.off-4:]
+ v = v[:4]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
+ b.bitsRead -= 32
+ b.off -= 4
+ return
+ }
+ for b.off > 0 {
+ b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63)
+ b.bitsRead -= 8
+ b.off--
+ }
+}
+
+// finished returns true if all bits have been read from the bit stream.
+func (b *bitReaderShifted) finished() bool {
+ return b.off == 0 && b.bitsRead >= 64
+}
+
+// close the bitstream and returns an error if out-of-buffer reads occurred.
+func (b *bitReaderShifted) close() error {
+ // Release reference.
+ b.in = nil
+ if b.bitsRead > 64 {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
index ec0c3fc53b5..6bce4e87d4f 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
@@ -38,14 +38,38 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
b.nBits += bits
}
-// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
+// encSymbol will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) encSymbol(ct cTable, symbol byte) {
enc := ct[symbol]
b.bitContainer |= uint64(enc.val) << (b.nBits & 63)
+ if false {
+ if enc.nBits == 0 {
+ panic("nbits 0")
+ }
+ }
b.nBits += enc.nBits
}
+// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
+ encA := ct[av]
+ encB := ct[bv]
+ sh := b.nBits & 63
+ combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63))
+ b.bitContainer |= combined << sh
+ if false {
+ if encA.nBits == 0 {
+ panic("nbitsA 0")
+ }
+ if encB.nBits == 0 {
+ panic("nbitsB 0")
+ }
+ }
+ b.nBits += encA.nBits + encB.nBits
+}
+
// addBits16ZeroNC will add up to 16 bits.
// It will not check if there is space for them,
// so the caller must ensure that it has flushed recently.
diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go
index dd4f7fefb9f..dea115b3341 100644
--- a/vendor/github.com/klauspost/compress/huff0/compress.go
+++ b/vendor/github.com/klauspost/compress/huff0/compress.go
@@ -54,6 +54,12 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
canReuse = s.canUseTable(s.prevTable)
}
+ // We want the output size to be less than this:
+ wantSize := len(in)
+ if s.WantLogLess > 0 {
+ wantSize -= wantSize >> s.WantLogLess
+ }
+
// Reset for next run.
s.clearCount = true
s.maxCount = 0
@@ -71,22 +77,30 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
// Each symbol present maximum once or too well distributed.
return nil, false, ErrIncompressible
}
-
- if s.Reuse == ReusePolicyPrefer && canReuse {
+ if s.Reuse == ReusePolicyMust && !canReuse {
+ // We must reuse, but we can't.
+ return nil, false, ErrIncompressible
+ }
+ if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse {
keepTable := s.cTable
+ keepTL := s.actualTableLog
s.cTable = s.prevTable
+ s.actualTableLog = s.prevTableLog
s.Out, err = compressor(in)
s.cTable = keepTable
- if err == nil && len(s.Out) < len(in) {
+ s.actualTableLog = keepTL
+ if err == nil && len(s.Out) < wantSize {
s.OutData = s.Out
return s.Out, true, nil
}
+ if s.Reuse == ReusePolicyMust {
+ return nil, false, ErrIncompressible
+ }
// Do not attempt to re-use later.
s.prevTable = s.prevTable[:0]
}
// Calculate new table.
- s.optimalTableLog()
err = s.buildCTable()
if err != nil {
return nil, false, err
@@ -100,13 +114,22 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
hSize := len(s.Out)
oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen])
newSize := s.cTable.estimateSize(s.count[:s.symbolLen])
- if oldSize <= hSize+newSize || hSize+12 >= len(in) {
+ if oldSize <= hSize+newSize || hSize+12 >= wantSize {
// Retain cTable even if we re-use.
keepTable := s.cTable
+ keepTL := s.actualTableLog
+
s.cTable = s.prevTable
+ s.actualTableLog = s.prevTableLog
s.Out, err = compressor(in)
+
+ // Restore ctable.
s.cTable = keepTable
- if len(s.Out) >= len(in) {
+ s.actualTableLog = keepTL
+ if err != nil {
+ return nil, false, err
+ }
+ if len(s.Out) >= wantSize {
return nil, false, ErrIncompressible
}
s.OutData = s.Out
@@ -128,12 +151,12 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
s.OutTable = nil
return nil, false, err
}
- if len(s.Out) >= len(in) {
+ if len(s.Out) >= wantSize {
s.OutTable = nil
return nil, false, ErrIncompressible
}
// Move current table into previous.
- s.prevTable, s.cTable = s.cTable, s.prevTable[:0]
+ s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0]
s.OutData = s.Out[len(s.OutTable):]
return s.Out, false, nil
}
@@ -154,28 +177,23 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
for i := len(src) & 3; i > 0; i-- {
bw.encSymbol(cTable, src[n+i-1])
}
+ n -= 4
if s.actualTableLog <= 8 {
- n -= 4
for ; n >= 0; n -= 4 {
tmp := src[n : n+4]
// tmp should be len 4
bw.flush32()
- bw.encSymbol(cTable, tmp[3])
- bw.encSymbol(cTable, tmp[2])
- bw.encSymbol(cTable, tmp[1])
- bw.encSymbol(cTable, tmp[0])
+ bw.encTwoSymbols(cTable, tmp[3], tmp[2])
+ bw.encTwoSymbols(cTable, tmp[1], tmp[0])
}
} else {
- n -= 4
for ; n >= 0; n -= 4 {
tmp := src[n : n+4]
// tmp should be len 4
bw.flush32()
- bw.encSymbol(cTable, tmp[3])
- bw.encSymbol(cTable, tmp[2])
+ bw.encTwoSymbols(cTable, tmp[3], tmp[2])
bw.flush32()
- bw.encSymbol(cTable, tmp[1])
- bw.encSymbol(cTable, tmp[0])
+ bw.encTwoSymbols(cTable, tmp[1], tmp[0])
}
}
err := bw.close()
@@ -313,9 +331,26 @@ func (s *Scratch) canUseTable(c cTable) bool {
return true
}
+func (s *Scratch) validateTable(c cTable) bool {
+ if len(c) < int(s.symbolLen) {
+ return false
+ }
+ for i, v := range s.count[:s.symbolLen] {
+ if v != 0 {
+ if c[i].nBits == 0 {
+ return false
+ }
+ if c[i].nBits > s.actualTableLog {
+ return false
+ }
+ }
+ }
+ return true
+}
+
// minTableLog provides the minimum logSize to safely represent a distribution.
func (s *Scratch) minTableLog() uint8 {
- minBitsSrc := highBit32(uint32(s.br.remain()-1)) + 1
+ minBitsSrc := highBit32(uint32(s.br.remain())) + 1
minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2
if minBitsSrc < minBitsSymbols {
return uint8(minBitsSrc)
@@ -327,7 +362,7 @@ func (s *Scratch) minTableLog() uint8 {
func (s *Scratch) optimalTableLog() {
tableLog := s.TableLog
minBits := s.minTableLog()
- maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 2
+ maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1
if maxBitsSrc < tableLog {
// Accuracy can be reduced
tableLog = maxBitsSrc
@@ -354,6 +389,7 @@ type cTableEntry struct {
const huffNodesMask = huffNodesLen - 1
func (s *Scratch) buildCTable() error {
+ s.optimalTableLog()
s.huffSort()
if cap(s.cTable) < maxSymbolValue+1 {
s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1)
@@ -367,7 +403,7 @@ func (s *Scratch) buildCTable() error {
var startNode = int16(s.symbolLen)
nonNullRank := s.symbolLen - 1
- nodeNb := int16(startNode)
+ nodeNb := startNode
huffNode := s.nodes[1 : huffNodesLen+1]
// This overlays the slice above, but allows "-1" index lookups.
@@ -430,7 +466,7 @@ func (s *Scratch) buildCTable() error {
return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax)
}
var nbPerRank [tableLogMax + 1]uint16
- var valPerRank [tableLogMax + 1]uint16
+ var valPerRank [16]uint16
for _, v := range huffNode[:nonNullRank+1] {
nbPerRank[v.nbBits]++
}
@@ -446,16 +482,17 @@ func (s *Scratch) buildCTable() error {
}
// push nbBits per symbol, symbol order
- // TODO: changed `s.symbolLen` -> `nonNullRank+1` (micro-opt)
for _, v := range huffNode[:nonNullRank+1] {
s.cTable[v.symbol].nBits = v.nbBits
}
// assign value within rank, symbol order
- for n, val := range s.cTable[:s.symbolLen] {
- v := valPerRank[val.nBits]
- s.cTable[n].val = v
- valPerRank[val.nBits] = v + 1
+ t := s.cTable[:s.symbolLen]
+ for n, val := range t {
+ nbits := val.nBits & 15
+ v := valPerRank[nbits]
+ t[n].val = v
+ valPerRank[nbits] = v + 1
}
return nil
@@ -479,10 +516,12 @@ func (s *Scratch) huffSort() {
r := highBit32(v+1) & 31
rank[r].base++
}
- for n := 30; n > 0; n-- {
+ // maxBitLength is log2(BlockSizeMax) + 1
+ const maxBitLength = 18 + 1
+ for n := maxBitLength; n > 0; n-- {
rank[n-1].base += rank[n].base
}
- for n := range rank[:] {
+ for n := range rank[:maxBitLength] {
rank[n].current = rank[n].base
}
for n, c := range s.count[:s.symbolLen] {
@@ -501,7 +540,7 @@ func (s *Scratch) huffSort() {
}
func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
- maxNbBits := s.TableLog
+ maxNbBits := s.actualTableLog
huffNode := s.nodes[1 : huffNodesLen+1]
//huffNode = huffNode[: huffNodesLen]
@@ -541,7 +580,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
// Get pos of last (smallest) symbol per rank
{
- currentNbBits := uint8(maxNbBits)
+ currentNbBits := maxNbBits
for pos := int(n); pos >= 0; pos-- {
if huffNode[pos].nbBits >= currentNbBits {
continue
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 261c54274c0..41703bba4d6 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -15,8 +15,7 @@ type dTable struct {
// single-symbols decoding
type dEntrySingle struct {
- byte uint8
- nBits uint8
+ entry uint16
}
// double-symbols decoding
@@ -26,11 +25,14 @@ type dEntryDouble struct {
len uint8
}
+// Uses special code for all tables that are < 8 bits.
+const use8BitTables = true
+
// ReadTable will read a table from the input.
// The size of the input may be larger than the table definition.
// Any content remaining after the table definition will be returned.
// If no Scratch is provided a new one is allocated.
-// The returned Scratch can be used for decoding input using this table.
+// The returned Scratch can be used for encoding or decoding input using this table.
func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
s, err = s.prepare(in)
if err != nil {
@@ -56,8 +58,8 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
s.symbolLen = uint16(oSize)
in = in[iSize:]
} else {
- if len(in) <= int(iSize) {
- return s, nil, errors.New("input too small for table")
+ if len(in) < int(iSize) {
+ return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in))
}
// FSE compressed weights
s.fse.DecompressLimit = 255
@@ -76,14 +78,16 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
}
// collect weight stats
- var rankStats [tableLogMax + 1]uint32
+ var rankStats [16]uint32
weightTotal := uint32(0)
for _, v := range s.huffWeight[:s.symbolLen] {
if v > tableLogMax {
return s, nil, errors.New("corrupt input: weight too large")
}
- rankStats[v]++
- weightTotal += (1 << (v & 15)) >> 1
+ v2 := v & 15
+ rankStats[v2]++
+ // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0.
+ weightTotal += (1 << v2) >> 1
}
if weightTotal == 0 {
return s, nil, errors.New("corrupt input: weights zero")
@@ -134,18 +138,40 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
if len(s.dt.single) != tSize {
s.dt.single = make([]dEntrySingle, tSize)
}
+ cTable := s.prevTable
+ if cap(cTable) < maxSymbolValue+1 {
+ cTable = make([]cTableEntry, 0, maxSymbolValue+1)
+ }
+ cTable = cTable[:maxSymbolValue+1]
+ s.prevTable = cTable[:s.symbolLen]
+ s.prevTableLog = s.actualTableLog
for n, w := range s.huffWeight[:s.symbolLen] {
+ if w == 0 {
+ cTable[n] = cTableEntry{
+ val: 0,
+ nBits: 0,
+ }
+ continue
+ }
length := (uint32(1) << w) >> 1
d := dEntrySingle{
- byte: uint8(n),
- nBits: s.actualTableLog + 1 - w,
+ entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8),
}
- for u := rankStats[w]; u < rankStats[w]+length; u++ {
- s.dt.single[u] = d
+
+ rank := &rankStats[w]
+ cTable[n] = cTableEntry{
+ val: uint16(*rank >> (w - 1)),
+ nBits: uint8(d.entry),
+ }
+
+ single := s.dt.single[*rank : *rank+length]
+ for i := range single {
+ single[i] = d
}
- rankStats[w] += length
+ *rank += length
}
+
return s, in, nil
}
@@ -153,176 +179,917 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
// The length of the supplied input must match the end of a block exactly.
// Before this is called, the table must be initialized with ReadTable unless
// the encoder re-used the table.
+// deprecated: Use the stateless Decoder() to get a concurrent version.
func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) {
- if len(s.dt.single) == 0 {
- return nil, errors.New("no table loaded")
+ if cap(s.Out) < s.MaxDecodedSize {
+ s.Out = make([]byte, s.MaxDecodedSize)
}
- var br bitReader
- err = br.init(in)
- if err != nil {
- return nil, err
+ s.Out = s.Out[:0:s.MaxDecodedSize]
+ s.Out, err = s.Decoder().Decompress1X(s.Out, in)
+ return s.Out, err
+}
+
+// Decompress4X will decompress a 4X encoded stream.
+// Before this is called, the table must be initialized with ReadTable unless
+// the encoder re-used the table.
+// The length of the supplied input must match the end of a block exactly.
+// The destination size of the uncompressed data must be known and provided.
+// deprecated: Use the stateless Decoder() to get a concurrent version.
+func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) {
+ if dstSize > s.MaxDecodedSize {
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ if cap(s.Out) < dstSize {
+ s.Out = make([]byte, s.MaxDecodedSize)
}
- s.Out = s.Out[:0]
+ s.Out = s.Out[:0:dstSize]
+ s.Out, err = s.Decoder().Decompress4X(s.Out, in)
+ return s.Out, err
+}
- decode := func() byte {
- val := br.peekBitsFast(s.actualTableLog) /* note : actualTableLog >= 1 */
- v := s.dt.single[val]
- br.bitsRead += v.nBits
- return v.byte
+// Decoder will return a stateless decoder that can be used by multiple
+// decompressors concurrently.
+// Before this is called, the table must be initialized with ReadTable.
+// The Decoder is still linked to the scratch buffer so that cannot be reused.
+// However, it is safe to discard the scratch.
+func (s *Scratch) Decoder() *Decoder {
+ return &Decoder{
+ dt: s.dt,
+ actualTableLog: s.actualTableLog,
}
- hasDec := func(v dEntrySingle) byte {
- br.bitsRead += v.nBits
- return v.byte
+}
+
+// Decoder provides stateless decoding.
+type Decoder struct {
+ dt dTable
+ actualTableLog uint8
+}
+
+// Decompress1X will decompress a 1X encoded stream.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
}
+ if use8BitTables && d.actualTableLog <= 8 {
+ return d.decompress1X8Bit(dst, src)
+ }
+ var br bitReaderShifted
+ err := br.init(src)
+ if err != nil {
+ return dst, err
+ }
+ maxDecodedSize := cap(dst)
+ dst = dst[:0]
// Avoid bounds check by always having full sized table.
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
- dt := s.dt.single[:tlSize]
+ dt := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
- var tmp = s.huffWeight[:256]
+ var buf [256]byte
var off uint8
for br.off >= 8 {
br.fillFast()
- tmp[off+0] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask])
- tmp[off+1] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask])
+ v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ // Refill
br.fillFast()
- tmp[off+2] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask])
- tmp[off+3] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask])
+
+ v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
off += 4
if off == 0 {
- s.Out = append(s.Out, tmp...)
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
}
}
- s.Out = append(s.Out, tmp[:off]...)
+ if len(dst)+int(off) > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:off]...)
- for !br.finished() {
+ // br < 8, so uint8 is fine
+ bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
+ for bitsLeft > 0 {
br.fill()
- s.Out = append(s.Out, decode())
+ if false && br.bitsRead >= 32 {
+ if br.off >= 4 {
+ v := br.in[br.off-4:]
+ v = v[:4]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ br.value = (br.value << 32) | uint64(low)
+ br.bitsRead -= 32
+ br.off -= 4
+ } else {
+ for br.off > 0 {
+ br.value = (br.value << 8) | uint64(br.in[br.off-1])
+ br.bitsRead -= 8
+ br.off--
+ }
+ }
+ }
+ if len(dst) >= maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
+ nBits := uint8(v.entry)
+ br.advance(nBits)
+ bitsLeft -= nBits
+ dst = append(dst, uint8(v.entry>>8))
+ }
+ return dst, br.close()
+}
+
+// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
+ if d.actualTableLog == 8 {
+ return d.decompress1X8BitExactly(dst, src)
+ }
+ var br bitReaderBytes
+ err := br.init(src)
+ if err != nil {
+ return dst, err
+ }
+ maxDecodedSize := cap(dst)
+ dst = dst[:0]
+
+ // Avoid bounds check by always having full sized table.
+ dt := d.dt.single[:256]
+
+ // Use temp table to avoid bound checks/append penalty.
+ var buf [256]byte
+ var off uint8
+
+ shift := (8 - d.actualTableLog) & 7
+
+ //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog)
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[br.peekByteFast()>>shift]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[br.peekByteFast()>>shift]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[br.peekByteFast()>>shift]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[br.peekByteFast()>>shift]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+
+ if len(dst)+int(off) > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:off]...)
+
+ // br < 4, so uint8 is fine
+ bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead))
+ for bitsLeft > 0 {
+ if br.bitsRead >= 64-8 {
+ for br.off > 0 {
+ br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8)
+ br.bitsRead -= 8
+ br.off--
+ }
+ }
+ if len(dst) >= maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ v := dt[br.peekByteFast()>>shift]
+ nBits := uint8(v.entry)
+ br.advance(nBits)
+ bitsLeft -= int8(nBits)
+ dst = append(dst, uint8(v.entry>>8))
}
- return s.Out, br.close()
+ return dst, br.close()
+}
+
+// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
+ var br bitReaderBytes
+ err := br.init(src)
+ if err != nil {
+ return dst, err
+ }
+ maxDecodedSize := cap(dst)
+ dst = dst[:0]
+
+ // Avoid bounds check by always having full sized table.
+ dt := d.dt.single[:256]
+
+ // Use temp table to avoid bound checks/append penalty.
+ var buf [256]byte
+ var off uint8
+
+ const shift = 0
+
+ //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog)
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[br.peekByteFast()>>shift]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[br.peekByteFast()>>shift]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[br.peekByteFast()>>shift]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[br.peekByteFast()>>shift]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+
+ if len(dst)+int(off) > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:off]...)
+
+ // br < 4, so uint8 is fine
+ bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead))
+ for bitsLeft > 0 {
+ if br.bitsRead >= 64-8 {
+ for br.off > 0 {
+ br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8)
+ br.bitsRead -= 8
+ br.off--
+ }
+ }
+ if len(dst) >= maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ v := dt[br.peekByteFast()>>shift]
+ nBits := uint8(v.entry)
+ br.advance(nBits)
+ bitsLeft -= int8(nBits)
+ dst = append(dst, uint8(v.entry>>8))
+ }
+ return dst, br.close()
}
// Decompress4X will decompress a 4X encoded stream.
-// Before this is called, the table must be initialized with ReadTable unless
-// the encoder re-used the table.
// The length of the supplied input must match the end of a block exactly.
-// The destination size of the uncompressed data must be known and provided.
-func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) {
- if len(s.dt.single) == 0 {
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
- if len(in) < 6+(4*1) {
+ if len(src) < 6+(4*1) {
return nil, errors.New("input too small")
}
- // TODO: We do not detect when we overrun a buffer, except if the last one does.
+ if use8BitTables && d.actualTableLog <= 8 {
+ return d.decompress4X8bit(dst, src)
+ }
- var br [4]bitReader
+ var br [4]bitReaderShifted
start := 6
for i := 0; i < 3; i++ {
- length := int(in[i*2]) | (int(in[i*2+1]) << 8)
- if start+length >= len(in) {
+ length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+ if start+length >= len(src) {
return nil, errors.New("truncated input (or invalid offset)")
}
- err = br[i].init(in[start : start+length])
+ err := br[i].init(src[start : start+length])
if err != nil {
return nil, err
}
start += length
}
- err = br[3].init(in[start:])
+ err := br[3].init(src[start:])
if err != nil {
return nil, err
}
- // Prepare output
- if cap(s.Out) < dstSize {
- s.Out = make([]byte, 0, dstSize)
- }
- s.Out = s.Out[:dstSize]
// destination, offset to match first output
- dstOut := s.Out
+ dstSize := cap(dst)
+ dst = dst[:dstSize]
+ out := dst
dstEvery := (dstSize + 3) / 4
- decode := func(br *bitReader) byte {
- val := br.peekBitsFast(s.actualTableLog) /* note : actualTableLog >= 1 */
- v := s.dt.single[val]
- br.bitsRead += v.nBits
- return v.byte
- }
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+ single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
- var tmp = s.huffWeight[:256]
+ var buf [256]byte
var off uint8
+ var decoded int
// Decode 2 values from each decoder/loop.
const bufoff = 256 / 4
-bigloop:
for {
- for i := range br {
- if br[i].off < 4 {
- break bigloop
- }
- br[i].fillFast()
- }
- tmp[off] = decode(&br[0])
- tmp[off+bufoff] = decode(&br[1])
- tmp[off+bufoff*2] = decode(&br[2])
- tmp[off+bufoff*3] = decode(&br[3])
- tmp[off+1] = decode(&br[0])
- tmp[off+1+bufoff] = decode(&br[1])
- tmp[off+1+bufoff*2] = decode(&br[2])
- tmp[off+1+bufoff*3] = decode(&br[3])
+ if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+ break
+ }
+
+ {
+ const stream = 0
+ const stream2 = 1
+ br[stream].fillFast()
+ br[stream2].fillFast()
+
+ val := br[stream].peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask]
+ br[stream].advance(uint8(v.entry))
+ buf[off+bufoff*stream] = uint8(v.entry >> 8)
+
+ val2 := br[stream2].peekBitsFast(d.actualTableLog)
+ v2 := single[val2&tlMask]
+ br[stream2].advance(uint8(v2.entry))
+ buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
+
+ val = br[stream].peekBitsFast(d.actualTableLog)
+ v = single[val&tlMask]
+ br[stream].advance(uint8(v.entry))
+ buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
+
+ val2 = br[stream2].peekBitsFast(d.actualTableLog)
+ v2 = single[val2&tlMask]
+ br[stream2].advance(uint8(v2.entry))
+ buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
+ }
+
+ {
+ const stream = 2
+ const stream2 = 3
+ br[stream].fillFast()
+ br[stream2].fillFast()
+
+ val := br[stream].peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask]
+ br[stream].advance(uint8(v.entry))
+ buf[off+bufoff*stream] = uint8(v.entry >> 8)
+
+ val2 := br[stream2].peekBitsFast(d.actualTableLog)
+ v2 := single[val2&tlMask]
+ br[stream2].advance(uint8(v2.entry))
+ buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
+
+ val = br[stream].peekBitsFast(d.actualTableLog)
+ v = single[val&tlMask]
+ br[stream].advance(uint8(v.entry))
+ buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
+
+ val2 = br[stream2].peekBitsFast(d.actualTableLog)
+ v2 = single[val2&tlMask]
+ br[stream2].advance(uint8(v2.entry))
+ buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
+ }
+
off += 2
+
if off == bufoff {
if bufoff > dstEvery {
- return nil, errors.New("corruption detected: stream overrun")
+ return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(dstOut, tmp[:bufoff])
- copy(dstOut[dstEvery:], tmp[bufoff:bufoff*2])
- copy(dstOut[dstEvery*2:], tmp[bufoff*2:bufoff*3])
- copy(dstOut[dstEvery*3:], tmp[bufoff*3:bufoff*4])
+ copy(out, buf[:bufoff])
+ copy(out[dstEvery:], buf[bufoff:bufoff*2])
+ copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
+ copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
off = 0
- dstOut = dstOut[bufoff:]
+ out = out[bufoff:]
+ decoded += 256
// There must at least be 3 buffers left.
- if len(dstOut) < dstEvery*3+3 {
- return nil, errors.New("corruption detected: stream overrun")
+ if len(out) < dstEvery*3 {
+ return nil, errors.New("corruption detected: stream overrun 2")
}
}
}
if off > 0 {
ioff := int(off)
- if len(dstOut) < dstEvery*3+ioff {
- return nil, errors.New("corruption detected: stream overrun")
+ if len(out) < dstEvery*3+ioff {
+ return nil, errors.New("corruption detected: stream overrun 3")
}
- copy(dstOut, tmp[:off])
- copy(dstOut[dstEvery:dstEvery+ioff], tmp[bufoff:bufoff*2])
- copy(dstOut[dstEvery*2:dstEvery*2+ioff], tmp[bufoff*2:bufoff*3])
- copy(dstOut[dstEvery*3:dstEvery*3+ioff], tmp[bufoff*3:bufoff*4])
- dstOut = dstOut[off:]
+ copy(out, buf[:off])
+ copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
+ copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
+ copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
+ decoded += int(off) * 4
+ out = out[off:]
}
+ // Decode remaining.
for i := range br {
offset := dstEvery * i
br := &br[i]
- for !br.finished() {
+ bitsLeft := br.off*8 + uint(64-br.bitsRead)
+ for bitsLeft > 0 {
br.fill()
- if offset >= len(dstOut) {
- return nil, errors.New("corruption detected: stream overrun")
+ if false && br.bitsRead >= 32 {
+ if br.off >= 4 {
+ v := br.in[br.off-4:]
+ v = v[:4]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ br.value = (br.value << 32) | uint64(low)
+ br.bitsRead -= 32
+ br.off -= 4
+ } else {
+ for br.off > 0 {
+ br.value = (br.value << 8) | uint64(br.in[br.off-1])
+ br.bitsRead -= 8
+ br.off--
+ }
+ }
+ }
+ // end inline...
+ if offset >= len(out) {
+ return nil, errors.New("corruption detected: stream overrun 4")
}
- dstOut[offset] = decode(br)
+
+ // Read value and increment offset.
+ val := br.peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask].entry
+ nBits := uint8(v)
+ br.advance(nBits)
+ bitsLeft -= uint(nBits)
+ out[offset] = uint8(v >> 8)
offset++
}
+ decoded += offset - dstEvery*i
err = br.close()
if err != nil {
return nil, err
}
}
+ if dstSize != decoded {
+ return nil, errors.New("corruption detected: short output block")
+ }
+ return dst, nil
+}
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
+ if d.actualTableLog == 8 {
+ return d.decompress4X8bitExactly(dst, src)
+ }
- return s.Out, nil
+ var br [4]bitReaderBytes
+ start := 6
+ for i := 0; i < 3; i++ {
+ length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+ if start+length >= len(src) {
+ return nil, errors.New("truncated input (or invalid offset)")
+ }
+ err := br[i].init(src[start : start+length])
+ if err != nil {
+ return nil, err
+ }
+ start += length
+ }
+ err := br[3].init(src[start:])
+ if err != nil {
+ return nil, err
+ }
+
+ // destination, offset to match first output
+ dstSize := cap(dst)
+ dst = dst[:dstSize]
+ out := dst
+ dstEvery := (dstSize + 3) / 4
+
+ shift := (8 - d.actualTableLog) & 7
+
+ const tlSize = 1 << 8
+ const tlMask = tlSize - 1
+ single := d.dt.single[:tlSize]
+
+ // Use temp table to avoid bound checks/append penalty.
+ var buf [256]byte
+ var off uint8
+ var decoded int
+
+ // Decode 4 values from each decoder/loop.
+ const bufoff = 256 / 4
+ for {
+ if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+ break
+ }
+
+ {
+ // Interleave 2 decodes.
+ const stream = 0
+ const stream2 = 1
+ br[stream].fillFast()
+ br[stream2].fillFast()
+
+ v := single[br[stream].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream] = uint8(v >> 8)
+ br[stream].advance(uint8(v))
+
+ v2 := single[br[stream2].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream2] = uint8(v2 >> 8)
+ br[stream2].advance(uint8(v2))
+
+ v = single[br[stream].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream+1] = uint8(v >> 8)
+ br[stream].advance(uint8(v))
+
+ v2 = single[br[stream2].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
+ br[stream2].advance(uint8(v2))
+
+ v = single[br[stream].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream+2] = uint8(v >> 8)
+ br[stream].advance(uint8(v))
+
+ v2 = single[br[stream2].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
+ br[stream2].advance(uint8(v2))
+
+ v = single[br[stream].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream+3] = uint8(v >> 8)
+ br[stream].advance(uint8(v))
+
+ v2 = single[br[stream2].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
+ br[stream2].advance(uint8(v2))
+ }
+
+ {
+ const stream = 2
+ const stream2 = 3
+ br[stream].fillFast()
+ br[stream2].fillFast()
+
+ v := single[br[stream].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream] = uint8(v >> 8)
+ br[stream].advance(uint8(v))
+
+ v2 := single[br[stream2].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream2] = uint8(v2 >> 8)
+ br[stream2].advance(uint8(v2))
+
+ v = single[br[stream].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream+1] = uint8(v >> 8)
+ br[stream].advance(uint8(v))
+
+ v2 = single[br[stream2].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
+ br[stream2].advance(uint8(v2))
+
+ v = single[br[stream].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream+2] = uint8(v >> 8)
+ br[stream].advance(uint8(v))
+
+ v2 = single[br[stream2].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
+ br[stream2].advance(uint8(v2))
+
+ v = single[br[stream].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream+3] = uint8(v >> 8)
+ br[stream].advance(uint8(v))
+
+ v2 = single[br[stream2].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
+ br[stream2].advance(uint8(v2))
+ }
+
+ off += 4
+
+ if off == bufoff {
+ if bufoff > dstEvery {
+ return nil, errors.New("corruption detected: stream overrun 1")
+ }
+ copy(out, buf[:bufoff])
+ copy(out[dstEvery:], buf[bufoff:bufoff*2])
+ copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
+ copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
+ off = 0
+ out = out[bufoff:]
+ decoded += 256
+ // There must at least be 3 buffers left.
+ if len(out) < dstEvery*3 {
+ return nil, errors.New("corruption detected: stream overrun 2")
+ }
+ }
+ }
+ if off > 0 {
+ ioff := int(off)
+ if len(out) < dstEvery*3+ioff {
+ return nil, errors.New("corruption detected: stream overrun 3")
+ }
+ copy(out, buf[:off])
+ copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
+ copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
+ copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
+ decoded += int(off) * 4
+ out = out[off:]
+ }
+
+ // Decode remaining.
+ for i := range br {
+ offset := dstEvery * i
+ br := &br[i]
+ bitsLeft := int(br.off*8) + int(64-br.bitsRead)
+ for bitsLeft > 0 {
+ if br.finished() {
+ return nil, io.ErrUnexpectedEOF
+ }
+ if br.bitsRead >= 56 {
+ if br.off >= 4 {
+ v := br.in[br.off-4:]
+ v = v[:4]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ br.value |= uint64(low) << (br.bitsRead - 32)
+ br.bitsRead -= 32
+ br.off -= 4
+ } else {
+ for br.off > 0 {
+ br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8)
+ br.bitsRead -= 8
+ br.off--
+ }
+ }
+ }
+ // end inline...
+ if offset >= len(out) {
+ return nil, errors.New("corruption detected: stream overrun 4")
+ }
+
+ // Read value and increment offset.
+ v := single[br.peekByteFast()>>shift].entry
+ nBits := uint8(v)
+ br.advance(nBits)
+ bitsLeft -= int(nBits)
+ out[offset] = uint8(v >> 8)
+ offset++
+ }
+ decoded += offset - dstEvery*i
+ err = br.close()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if dstSize != decoded {
+ return nil, errors.New("corruption detected: short output block")
+ }
+ return dst, nil
+}
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
+ var br [4]bitReaderBytes
+ start := 6
+ for i := 0; i < 3; i++ {
+ length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+ if start+length >= len(src) {
+ return nil, errors.New("truncated input (or invalid offset)")
+ }
+ err := br[i].init(src[start : start+length])
+ if err != nil {
+ return nil, err
+ }
+ start += length
+ }
+ err := br[3].init(src[start:])
+ if err != nil {
+ return nil, err
+ }
+
+ // destination, offset to match first output
+ dstSize := cap(dst)
+ dst = dst[:dstSize]
+ out := dst
+ dstEvery := (dstSize + 3) / 4
+
+ const shift = 0
+ const tlSize = 1 << 8
+ const tlMask = tlSize - 1
+ single := d.dt.single[:tlSize]
+
+ // Use temp table to avoid bound checks/append penalty.
+ var buf [256]byte
+ var off uint8
+ var decoded int
+
+ // Decode 4 values from each decoder/loop.
+ const bufoff = 256 / 4
+ for {
+ if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+ break
+ }
+
+ {
+ // Interleave 2 decodes.
+ const stream = 0
+ const stream2 = 1
+ br[stream].fillFast()
+ br[stream2].fillFast()
+
+ v := single[br[stream].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream] = uint8(v >> 8)
+ br[stream].advance(uint8(v))
+
+ v2 := single[br[stream2].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream2] = uint8(v2 >> 8)
+ br[stream2].advance(uint8(v2))
+
+ v = single[br[stream].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream+1] = uint8(v >> 8)
+ br[stream].advance(uint8(v))
+
+ v2 = single[br[stream2].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
+ br[stream2].advance(uint8(v2))
+
+ v = single[br[stream].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream+2] = uint8(v >> 8)
+ br[stream].advance(uint8(v))
+
+ v2 = single[br[stream2].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
+ br[stream2].advance(uint8(v2))
+
+ v = single[br[stream].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream+3] = uint8(v >> 8)
+ br[stream].advance(uint8(v))
+
+ v2 = single[br[stream2].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
+ br[stream2].advance(uint8(v2))
+ }
+
+ {
+ const stream = 2
+ const stream2 = 3
+ br[stream].fillFast()
+ br[stream2].fillFast()
+
+ v := single[br[stream].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream] = uint8(v >> 8)
+ br[stream].advance(uint8(v))
+
+ v2 := single[br[stream2].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream2] = uint8(v2 >> 8)
+ br[stream2].advance(uint8(v2))
+
+ v = single[br[stream].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream+1] = uint8(v >> 8)
+ br[stream].advance(uint8(v))
+
+ v2 = single[br[stream2].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
+ br[stream2].advance(uint8(v2))
+
+ v = single[br[stream].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream+2] = uint8(v >> 8)
+ br[stream].advance(uint8(v))
+
+ v2 = single[br[stream2].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
+ br[stream2].advance(uint8(v2))
+
+ v = single[br[stream].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream+3] = uint8(v >> 8)
+ br[stream].advance(uint8(v))
+
+ v2 = single[br[stream2].peekByteFast()>>shift].entry
+ buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
+ br[stream2].advance(uint8(v2))
+ }
+
+ off += 4
+
+ if off == bufoff {
+ if bufoff > dstEvery {
+ return nil, errors.New("corruption detected: stream overrun 1")
+ }
+ copy(out, buf[:bufoff])
+ copy(out[dstEvery:], buf[bufoff:bufoff*2])
+ copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
+ copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
+ off = 0
+ out = out[bufoff:]
+ decoded += 256
+ // There must at least be 3 buffers left.
+ if len(out) < dstEvery*3 {
+ return nil, errors.New("corruption detected: stream overrun 2")
+ }
+ }
+ }
+ if off > 0 {
+ ioff := int(off)
+ if len(out) < dstEvery*3+ioff {
+ return nil, errors.New("corruption detected: stream overrun 3")
+ }
+ copy(out, buf[:off])
+ copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
+ copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
+ copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
+ decoded += int(off) * 4
+ out = out[off:]
+ }
+
+ // Decode remaining.
+ for i := range br {
+ offset := dstEvery * i
+ br := &br[i]
+ bitsLeft := int(br.off*8) + int(64-br.bitsRead)
+ for bitsLeft > 0 {
+ if br.finished() {
+ return nil, io.ErrUnexpectedEOF
+ }
+ if br.bitsRead >= 56 {
+ if br.off >= 4 {
+ v := br.in[br.off-4:]
+ v = v[:4]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ br.value |= uint64(low) << (br.bitsRead - 32)
+ br.bitsRead -= 32
+ br.off -= 4
+ } else {
+ for br.off > 0 {
+ br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8)
+ br.bitsRead -= 8
+ br.off--
+ }
+ }
+ }
+ // end inline...
+ if offset >= len(out) {
+ return nil, errors.New("corruption detected: stream overrun 4")
+ }
+
+ // Read value and increment offset.
+ v := single[br.peekByteFast()>>shift].entry
+ nBits := uint8(v)
+ br.advance(nBits)
+ bitsLeft -= int(nBits)
+ out[offset] = uint8(v >> 8)
+ offset++
+ }
+ decoded += offset - dstEvery*i
+ err = br.close()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if dstSize != decoded {
+ return nil, errors.New("corruption detected: short output block")
+ }
+ return dst, nil
}
// matches will compare a decoding table to a coding table.
@@ -341,7 +1108,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
broken++
if enc.nBits == 0 {
for _, dec := range dt {
- if dec.byte == byte(sym) {
+ if uint8(dec.entry>>8) == byte(sym) {
fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym)
errs++
break
@@ -357,12 +1124,12 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
top := enc.val << ub
// decoder looks at top bits.
dec := dt[top]
- if dec.nBits != enc.nBits {
- fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, dec.nBits)
+ if uint8(dec.entry) != enc.nBits {
+ fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry))
errs++
}
- if dec.byte != uint8(sym) {
- fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, dec.byte)
+ if uint8(dec.entry>>8) != uint8(sym) {
+ fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8))
errs++
}
if errs > 0 {
@@ -373,12 +1140,12 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
for i := uint16(0); i < (1 << ub); i++ {
vval := top | i
dec := dt[vval]
- if dec.nBits != enc.nBits {
- fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, dec.nBits)
+ if uint8(dec.entry) != enc.nBits {
+ fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry))
errs++
}
- if dec.byte != uint8(sym) {
- fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, dec.byte)
+ if uint8(dec.entry>>8) != uint8(sym) {
+ fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8))
errs++
}
if errs > 20 {
diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go
index 50d02e4405e..7ec2022b650 100644
--- a/vendor/github.com/klauspost/compress/huff0/huff0.go
+++ b/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -35,6 +35,9 @@ var (
// ErrTooBig is return if input is too large for a single block.
ErrTooBig = errors.New("input too big")
+
+ // ErrMaxDecodedSizeExceeded is return if input is too large for a single block.
+ ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded")
)
type ReusePolicy uint8
@@ -52,6 +55,9 @@ const (
// ReusePolicyNone will disable re-use of tables.
// This is slightly faster than ReusePolicyAllow but may produce larger output.
ReusePolicyNone
+
+ // ReusePolicyMust must allow reuse and produce smaller output.
+ ReusePolicyMust
)
type Scratch struct {
@@ -76,21 +82,34 @@ type Scratch struct {
// Slice of the returned data.
OutData []byte
+ // MaxDecodedSize will set the maximum allowed output size.
+ // This value will automatically be set to BlockSizeMax if not set.
+ // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
+ MaxDecodedSize int
+
+ br byteReader
+
// MaxSymbolValue will override the maximum symbol value of the next block.
MaxSymbolValue uint8
// TableLog will attempt to override the tablelog for the next block.
- // Must be <= 11.
+ // Must be <= 11 and >= 5.
TableLog uint8
// Reuse will specify the reuse policy
Reuse ReusePolicy
- br byteReader
+ // WantLogLess allows to specify a log 2 reduction that should at least be achieved,
+ // otherwise the block will be returned as incompressible.
+ // The reduction should then at least be (input size >> WantLogLess)
+ // If WantLogLess == 0 any improvement will do.
+ WantLogLess uint8
+
symbolLen uint16 // Length of active part of the symbol table.
maxCount int // count of the most probable symbol
clearCount bool // clear count
actualTableLog uint8 // Selected tablelog.
+ prevTableLog uint8 // Tablelog for previous table
prevTable cTable // Table used for previous compression.
cTable cTable // compression table
dt dTable // decompression table
@@ -100,6 +119,16 @@ type Scratch struct {
huffWeight [maxSymbolValue + 1]byte
}
+// TransferCTable will transfer the previously used compression table.
+func (s *Scratch) TransferCTable(src *Scratch) {
+ if cap(s.prevTable) < len(src.prevTable) {
+ s.prevTable = make(cTable, 0, maxSymbolValue+1)
+ }
+ s.prevTable = s.prevTable[:len(src.prevTable)]
+ copy(s.prevTable, src.prevTable)
+ s.prevTableLog = src.prevTableLog
+}
+
func (s *Scratch) prepare(in []byte) (*Scratch, error) {
if len(in) > BlockSizeMax {
return nil, ErrTooBig
@@ -113,8 +142,11 @@ func (s *Scratch) prepare(in []byte) (*Scratch, error) {
if s.TableLog == 0 {
s.TableLog = tableLogDefault
}
- if s.TableLog > tableLogMax {
- return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, tableLogMax)
+ if s.TableLog > tableLogMax || s.TableLog < minTablelog {
+ return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax)
+ }
+ if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax {
+ s.MaxDecodedSize = BlockSizeMax
}
if s.clearCount && s.maxCount == 0 {
for i := range s.count {
diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s
index e6179f65e35..1c66e37234d 100644
--- a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s
+++ b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s
@@ -184,9 +184,7 @@ tagLit60Plus:
// checks. In the asm version, we code it once instead of once per switch case.
ADDQ CX, SI
SUBQ $58, SI
- MOVQ SI, BX
- SUBQ R11, BX
- CMPQ BX, R12
+ CMPQ SI, R13
JA errCorrupt
// case x == 60:
@@ -232,9 +230,7 @@ tagCopy4:
ADDQ $5, SI
// if uint(s) > uint(len(src)) { etc }
- MOVQ SI, BX
- SUBQ R11, BX
- CMPQ BX, R12
+ CMPQ SI, R13
JA errCorrupt
// length = 1 + int(src[s-5])>>2
@@ -251,9 +247,7 @@ tagCopy2:
ADDQ $3, SI
// if uint(s) > uint(len(src)) { etc }
- MOVQ SI, BX
- SUBQ R11, BX
- CMPQ BX, R12
+ CMPQ SI, R13
JA errCorrupt
// length = 1 + int(src[s-3])>>2
@@ -277,9 +271,7 @@ tagCopy:
ADDQ $2, SI
// if uint(s) > uint(len(src)) { etc }
- MOVQ SI, BX
- SUBQ R11, BX
- CMPQ BX, R12
+ CMPQ SI, R13
JA errCorrupt
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
diff --git a/vendor/github.com/klauspost/compress/snappy/decode_other.go b/vendor/github.com/klauspost/compress/snappy/decode_other.go
index 8c9f2049bc7..94a96c5d7b8 100644
--- a/vendor/github.com/klauspost/compress/snappy/decode_other.go
+++ b/vendor/github.com/klauspost/compress/snappy/decode_other.go
@@ -85,14 +85,28 @@ func decode(dst, src []byte) int {
if offset <= 0 || d < offset || length > len(dst)-d {
return decodeErrCodeCorrupt
}
- // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
- // the built-in copy function, this byte-by-byte copy always runs
+ // Copy from an earlier sub-slice of dst to a later sub-slice.
+ // If no overlap, use the built-in copy:
+ if offset > length {
+ copy(dst[d:d+length], dst[d-offset:])
+ d += length
+ continue
+ }
+
+ // Unlike the built-in copy function, this byte-by-byte copy always runs
// forwards, even if the slices overlap. Conceptually, this is:
//
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
- for end := d + length; d != end; d++ {
- dst[d] = dst[d-offset]
+ //
+ // We align the slices into a and b and show the compiler they are the same size.
+ // This allows the loop to run without bounds checks.
+ a := dst[d : d+length]
+ b := dst[d-offset:]
+ b = b[:len(a)]
+ for i := range a {
+ a[i] = b[i]
}
+ d += length
}
if d != len(dst) {
return decodeErrCodeCorrupt
diff --git a/vendor/github.com/klauspost/compress/snappy/snappy.go b/vendor/github.com/klauspost/compress/snappy/snappy.go
index 74a36689e87..ea58ced8824 100644
--- a/vendor/github.com/klauspost/compress/snappy/snappy.go
+++ b/vendor/github.com/klauspost/compress/snappy/snappy.go
@@ -94,5 +94,5 @@ var crcTable = crc32.MakeTable(crc32.Castagnoli)
// https://github.com/google/snappy/blob/master/framing_format.txt
func crc(b []byte) uint32 {
c := crc32.Update(0, crcTable, b)
- return uint32(c>>15|c<<17) + 0xa282ead8
+ return c>>15 | c<<17 + 0xa282ead8
}
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index 670f98af44d..7680bfe1dd6 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -5,11 +5,8 @@ It offers a very wide range of compression / speed trade-off, while being backed
A high performance compression algorithm is implemented. For now focused on speed.
This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content.
-Note that custom dictionaries are not supported yet, so if your code relies on that,
-you cannot use the package as-is.
This package is pure Go and without use of "unsafe".
-If a significant speedup can be achieved using "unsafe", it may be added as an option later.
The `zstd` package is provided as open source software using a Go standard license.
@@ -26,17 +23,21 @@ Godoc Documentation: https://godoc.org/github.com/klauspost/compress/zstd
### Status:
-BETA - there may still be subtle bugs, but a wide variety of content has been tested.
-There may still be implementation specific stuff in regards to error handling that could lead to edge cases.
+STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively
+used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates.
-For now, a high speed (fastest) and medium-fast (default) compressor has been implemented.
+There may still be specific combinations of data types/size/settings that could lead to edge cases,
+so as always, testing is recommended.
-The "Fastest" compression ratio is roughly equivalent to zstd level 1.
-The "Default" compression ration is roughly equivalent to zstd level 3 (default).
+For now, a high speed (fastest) and medium-fast (default) compressor has been implemented.
-In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. The compression ratio compared to stdlib is around level 3, but usually 3x as fast.
+* The "Fastest" compression ratio is roughly equivalent to zstd level 1.
+* The "Default" compression ratio is roughly equivalent to zstd level 3 (default).
+* The "Better" compression ratio is roughly equivalent to zstd level 7.
+* The "Best" compression ratio is roughly equivalent to zstd level 11.
-Compared to cgo zstd, the speed is around level 3 (default), but compression slightly worse, between level 1&2.
+In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode.
+The compression ratio compared to stdlib is around level 3, but usually 3x as fast.
### Usage
@@ -52,11 +53,11 @@ To create a writer with default options, do like this:
```Go
// Compress input to output.
func Compress(in io.Reader, out io.Writer) error {
- w, err := NewWriter(output)
+ enc, err := zstd.NewWriter(out)
if err != nil {
return err
}
- _, err := io.Copy(w, input)
+ _, err = io.Copy(enc, in)
if err != nil {
enc.Close()
return err
@@ -137,120 +138,118 @@ Using the Encoder for both a stream and individual blocks concurrently is safe.
I have collected some speed examples to compare speed and compression against other compressors.
* `file` is the input file.
-* `out` is the compressor used. `zskp` is this package. `gzstd` is gzip standard library. `zstd` is the Datadog cgo library.
-* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default".
+* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library.
+* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best".
* `insize`/`outsize` is the input/output size.
* `millis` is the number of milliseconds used for compression.
* `mb/s` is megabytes (2^20 bytes) per second.
```
-The test data for the Large Text Compression Benchmark is the first
-10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
-http://mattmahoney.net/dc/textdata.html
-
-file out level insize outsize millis mb/s
-enwik9 zskp 1 1000000000 343833033 5840 163.30
-enwik9 zskp 2 1000000000 317822183 8449 112.87
-enwik9 gzstd 1 1000000000 382578136 13627 69.98
-enwik9 gzstd 3 1000000000 349139651 22344 42.68
-enwik9 zstd 1 1000000000 357416379 4838 197.12
-enwik9 zstd 3 1000000000 313734522 7556 126.21
-
-GOB stream of binary data. Highly compressible.
-https://files.klauspost.com/compress/gob-stream.7z
-
-file out level insize outsize millis mb/s
-gob-stream zskp 1 1911399616 234981983 5100 357.42
-gob-stream zskp 2 1911399616 208674003 6698 272.15
-gob-stream gzstd 1 1911399616 357382641 14727 123.78
-gob-stream gzstd 3 1911399616 327835097 17005 107.19
-gob-stream zstd 1 1911399616 250787165 4075 447.22
-gob-stream zstd 3 1911399616 208191888 5511 330.77
-
-Highly compressible JSON file. Similar to logs in a lot of ways.
-https://files.klauspost.com/compress/adresser.001.gz
-
-file out level insize outsize millis mb/s
-adresser.001 zskp 1 1073741824 18510122 1477 692.83
-adresser.001 zskp 2 1073741824 19831697 1705 600.59
-adresser.001 gzstd 1 1073741824 47755503 3079 332.47
-adresser.001 gzstd 3 1073741824 40052381 3051 335.63
-adresser.001 zstd 1 1073741824 16135896 994 1030.18
-adresser.001 zstd 3 1073741824 17794465 905 1131.49
-
-VM Image, Linux mint with a few installed applications:
-https://files.klauspost.com/compress/rawstudio-mint14.7z
-
-file out level insize outsize millis mb/s
-rawstudio-mint14.tar zskp 1 8558382592 3648168838 33398 244.38
-rawstudio-mint14.tar zskp 2 8558382592 3376721436 50962 160.16
-rawstudio-mint14.tar gzstd 1 8558382592 3926257486 84712 96.35
-rawstudio-mint14.tar gzstd 3 8558382592 3740711978 176344 46.28
-rawstudio-mint14.tar zstd 1 8558382592 3607859742 27903 292.51
-rawstudio-mint14.tar zstd 3 8558382592 3341710879 46700 174.77
-
-
-The test data is designed to test archivers in realistic backup scenarios.
-http://mattmahoney.net/dc/10gb.html
-
-file out level insize outsize millis mb/s
-10gb.tar zskp 1 10065157632 4883149814 45715 209.97
-10gb.tar zskp 2 10065157632 4638110010 60970 157.44
-10gb.tar gzstd 1 10065157632 5198296126 97769 98.18
-10gb.tar gzstd 3 10065157632 4932665487 313427 30.63
-10gb.tar zstd 1 10065157632 4940796535 40391 237.65
-10gb.tar zstd 3 10065157632 4638618579 52911 181.42
-
Silesia Corpus:
http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip
-file out level insize outsize millis mb/s
-silesia.tar zskp 1 211947520 73025800 1108 182.26
-silesia.tar zskp 2 211947520 67674684 1599 126.41
-silesia.tar gzstd 1 211947520 80007735 2515 80.37
-silesia.tar gzstd 3 211947520 73133380 4259 47.45
-silesia.tar zstd 1 211947520 73513991 933 216.64
-silesia.tar zstd 3 211947520 66793301 1377 146.79
-```
-
-### Converters
+This package:
+file out level insize outsize millis mb/s
+silesia.tar zskp 1 211947520 73101992 643 313.87
+silesia.tar zskp 2 211947520 67504318 969 208.38
+silesia.tar zskp 3 211947520 65177448 1899 106.44
+silesia.tar zskp 4 211947520 61381950 8115 24.91
-As part of the development process a *Snappy* -> *Zstandard* converter was also built.
+cgo zstd:
+silesia.tar zstd 1 211947520 73605392 543 371.56
+silesia.tar zstd 3 211947520 66793289 864 233.68
+silesia.tar zstd 6 211947520 62916450 1913 105.66
+silesia.tar zstd 9 211947520 60212393 5063 39.92
-This can convert a *framed* [Snappy Stream](https://godoc.org/github.com/golang/snappy#Writer) to a zstd stream. Note that a single block is not framed.
+gzip, stdlib/this package:
+silesia.tar gzstd 1 211947520 80007735 1654 122.21
+silesia.tar gzkp 1 211947520 80369488 1168 173.06
-Conversion is done by converting the stream directly from Snappy without intermediate full decoding.
-Therefore the compression ratio is much less than what can be done by a full decompression
-and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without
-any errors being generated.
-No CRC value is being generated and not all CRC values of the Snappy stream are checked.
-However, it provides really fast re-compression of Snappy streams.
+GOB stream of binary data. Highly compressible.
+https://files.klauspost.com/compress/gob-stream.7z
+file out level insize outsize millis mb/s
+gob-stream zskp 1 1911399616 235022249 3088 590.30
+gob-stream zskp 2 1911399616 205669791 3786 481.34
+gob-stream zskp 3 1911399616 185792019 9324 195.48
+gob-stream zskp 4 1911399616 171537212 32113 56.76
+gob-stream zstd 1 1911399616 249810424 2637 691.26
+gob-stream zstd 3 1911399616 208192146 3490 522.31
+gob-stream zstd 6 1911399616 193632038 6687 272.56
+gob-stream zstd 9 1911399616 177620386 16175 112.70
+gob-stream gzstd 1 1911399616 357382641 10251 177.82
+gob-stream gzkp 1 1911399616 362156523 5695 320.08
-```
-BenchmarkSnappy_ConvertSilesia-8 1 1156001600 ns/op 183.35 MB/s
-Snappy len 103008711 -> zstd len 82687318
+The test data for the Large Text Compression Benchmark is the first
+10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
+http://mattmahoney.net/dc/textdata.html
-BenchmarkSnappy_Enwik9-8 1 6472998400 ns/op 154.49 MB/s
-Snappy len 508028601 -> zstd len 390921079
-```
+file out level insize outsize millis mb/s
+enwik9 zskp 1 1000000000 343848582 3609 264.18
+enwik9 zskp 2 1000000000 317276632 5746 165.97
+enwik9 zskp 3 1000000000 294540704 11725 81.34
+enwik9 zskp 4 1000000000 276609671 44029 21.66
+enwik9 zstd 1 1000000000 358072021 3110 306.65
+enwik9 zstd 3 1000000000 313734672 4784 199.35
+enwik9 zstd 6 1000000000 295138875 10290 92.68
+enwik9 zstd 9 1000000000 278348700 28549 33.40
+enwik9 gzstd 1 1000000000 382578136 9604 99.30
+enwik9 gzkp 1 1000000000 383825945 6544 145.73
+
+Highly compressible JSON file.
+https://files.klauspost.com/compress/github-june-2days-2019.json.zst
+
+file out level insize outsize millis mb/s
+github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40
+github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96
+github-june-2days-2019.json zskp 3 6273951764 537511906 29252 204.54
+github-june-2days-2019.json zskp 4 6273951764 512796117 97791 61.18
+github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00
+github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57
+github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18
+github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16
+github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79
+github-june-2days-2019.json gzkp 1 6273951764 1128755542 19236 311.03
+VM Image, Linux mint with a few installed applications:
+https://files.klauspost.com/compress/rawstudio-mint14.7z
-```Go
- s := zstd.SnappyConverter{}
- n, err = s.Convert(input, output)
- if err != nil {
- fmt.Println("Re-compressed stream to", n, "bytes")
- }
+file out level insize outsize millis mb/s
+rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84
+rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07
+rawstudio-mint14.tar zskp 3 8558382592 3224594213 71751 113.75
+rawstudio-mint14.tar zskp 4 8558382592 3027332295 486243 16.79
+rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27
+rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92
+rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77
+rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91
+rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40
+rawstudio-mint14.tar gzkp 1 8558382592 3970463184 41749 195.49
+
+CSV data:
+https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst
+
+file out level insize outsize millis mb/s
+nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35
+nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44
+nyc-taxi-data-10M.csv zskp 3 3325605752 538490114 19880 159.53
+nyc-taxi-data-10M.csv zskp 4 3325605752 495986829 89368 35.49
+nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18
+nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07
+nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27
+nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12
+nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83
+nyc-taxi-data-10M.csv gzkp 1 3325605752 924718719 16388 193.53
```
-The converter `s` can be reused to avoid allocations, even after errors.
-
-
## Decompressor
-STATUS: Release Candidate - there may still be subtle bugs, but a wide variety of content has been tested.
+Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
+This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
+kindly supplied by [fuzzit.dev](https://fuzzit.dev/).
+The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder,
+or run it past its limits with ANY input provided.
### Usage
@@ -263,14 +262,14 @@ For streaming use a simple setup could look like this:
import "github.com/klauspost/compress/zstd"
func Decompress(in io.Reader, out io.Writer) error {
- d, err := zstd.NewReader(input)
+ d, err := zstd.NewReader(in)
if err != nil {
return err
}
defer d.Close()
// Copy content...
- _, err := io.Copy(out, d)
+ _, err = io.Copy(out, d)
return err
}
```
@@ -299,6 +298,35 @@ The decoder can be used for *concurrent* decompression of multiple buffers.
It will only allow a certain number of concurrent operations to run.
To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder.
+### Dictionaries
+
+Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed.
+
+Dictionaries are added individually to Decoders.
+Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder.
+To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data.
+Several dictionaries can be added at once.
+
+The dictionary will be used automatically for the data that specifies them.
+A re-used Decoder will still contain the dictionaries registered.
+
+When registering multiple dictionaries with the same ID, the last one will be used.
+
+It is possible to use dictionaries when compressing data.
+
+To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used
+and it will likely be used even if it doesn't improve compression.
+
+The used dictionary must be used to decompress the content.
+
+For any real gains, the dictionary should be built with similar data.
+If an unsuitable dictionary is used the output may be slightly larger than using no dictionary.
+Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data.
+For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression).
+
+For now there is a fixed startup performance penalty for compressing content with dictionaries.
+This will likely be improved over time. Just be aware to test performance when implementing.
+
### Allocation-less operation
The decoder has been designed to operate without allocations after a warmup.
@@ -340,36 +368,42 @@ These are some examples of performance compared to [datadog cgo library](https:/
The first two are streaming decodes and the last are smaller inputs.
```
-BenchmarkDecoderSilesia-8 20 642550210 ns/op 329.85 MB/s 3101 B/op 8 allocs/op
-BenchmarkDecoderSilesiaCgo-8 100 384930000 ns/op 550.61 MB/s 451878 B/op 9713 allocs/op
-
-BenchmarkDecoderEnwik9-2 10 3146000080 ns/op 317.86 MB/s 2649 B/op 9 allocs/op
-BenchmarkDecoderEnwik9Cgo-2 20 1905900000 ns/op 524.69 MB/s 1125120 B/op 45785 allocs/op
-
-BenchmarkDecoder_DecodeAll/z000000.zst-8 200 7049994 ns/op 138.26 MB/s 40 B/op 2 allocs/op
-BenchmarkDecoder_DecodeAll/z000001.zst-8 100000 19560 ns/op 97.49 MB/s 40 B/op 2 allocs/op
-BenchmarkDecoder_DecodeAll/z000002.zst-8 5000 297599 ns/op 236.99 MB/s 40 B/op 2 allocs/op
-BenchmarkDecoder_DecodeAll/z000003.zst-8 2000 725502 ns/op 141.17 MB/s 40 B/op 2 allocs/op
-BenchmarkDecoder_DecodeAll/z000004.zst-8 200000 9314 ns/op 54.54 MB/s 40 B/op 2 allocs/op
-BenchmarkDecoder_DecodeAll/z000005.zst-8 10000 137500 ns/op 104.72 MB/s 40 B/op 2 allocs/op
-BenchmarkDecoder_DecodeAll/z000006.zst-8 500 2316009 ns/op 206.06 MB/s 40 B/op 2 allocs/op
-BenchmarkDecoder_DecodeAll/z000007.zst-8 20000 64499 ns/op 344.90 MB/s 40 B/op 2 allocs/op
-BenchmarkDecoder_DecodeAll/z000008.zst-8 50000 24900 ns/op 219.56 MB/s 40 B/op 2 allocs/op
-BenchmarkDecoder_DecodeAll/z000009.zst-8 1000 2348999 ns/op 154.01 MB/s 40 B/op 2 allocs/op
-
-BenchmarkDecoder_DecodeAllCgo/z000000.zst-8 500 4268005 ns/op 228.38 MB/s 1228849 B/op 3 allocs/op
-BenchmarkDecoder_DecodeAllCgo/z000001.zst-8 100000 15250 ns/op 125.05 MB/s 2096 B/op 3 allocs/op
-BenchmarkDecoder_DecodeAllCgo/z000002.zst-8 10000 147399 ns/op 478.49 MB/s 73776 B/op 3 allocs/op
-BenchmarkDecoder_DecodeAllCgo/z000003.zst-8 5000 320798 ns/op 319.27 MB/s 139312 B/op 3 allocs/op
-BenchmarkDecoder_DecodeAllCgo/z000004.zst-8 200000 10004 ns/op 50.77 MB/s 560 B/op 3 allocs/op
-BenchmarkDecoder_DecodeAllCgo/z000005.zst-8 20000 73599 ns/op 195.64 MB/s 19120 B/op 3 allocs/op
-BenchmarkDecoder_DecodeAllCgo/z000006.zst-8 1000 1119003 ns/op 426.48 MB/s 557104 B/op 3 allocs/op
-BenchmarkDecoder_DecodeAllCgo/z000007.zst-8 20000 103450 ns/op 215.04 MB/s 71296 B/op 9 allocs/op
-BenchmarkDecoder_DecodeAllCgo/z000008.zst-8 100000 20130 ns/op 271.58 MB/s 6192 B/op 3 allocs/op
-BenchmarkDecoder_DecodeAllCgo/z000009.zst-8 2000 1123500 ns/op 322.00 MB/s 368688 B/op 3 allocs/op
+BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op
+BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op
+
+BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op
+BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op
+
+Concurrent performance:
+
+BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op
+
+BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op
```
-This reflects the performance around May 2019, but this may be out of date.
+This reflects the performance around May 2020, but this may be out of date.
# Contributions
@@ -380,4 +414,4 @@ For sending files for reproducing errors use a service like [goobox](https://goo
For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan).
-This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare.
\ No newline at end of file
+This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare.
diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go
index 15d79d439fa..85445853715 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitreader.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go
@@ -5,6 +5,7 @@
package zstd
import (
+ "encoding/binary"
"errors"
"io"
"math/bits"
@@ -34,8 +35,12 @@ func (b *bitReader) init(in []byte) error {
}
b.bitsRead = 64
b.value = 0
- b.fill()
- b.fill()
+ if len(in) >= 8 {
+ b.fillFastStart()
+ } else {
+ b.fill()
+ b.fill()
+ }
b.bitsRead += 8 - uint8(highBits(uint32(v)))
return nil
}
@@ -63,21 +68,31 @@ func (b *bitReader) fillFast() {
if b.bitsRead < 32 {
return
}
- // Do single re-slice to avoid bounds checks.
- v := b.in[b.off-4 : b.off]
+ // 2 bounds checks.
+ v := b.in[b.off-4:]
+ v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low)
b.bitsRead -= 32
b.off -= 4
}
+// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
+func (b *bitReader) fillFastStart() {
+ // Do single re-slice to avoid bounds checks.
+ b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+ b.bitsRead = 0
+ b.off -= 8
+}
+
// fill() will make sure at least 32 bits are available.
func (b *bitReader) fill() {
if b.bitsRead < 32 {
return
}
if b.off >= 4 {
- v := b.in[b.off-4 : b.off]
+ v := b.in[b.off-4:]
+ v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low)
b.bitsRead -= 32
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index aca1cb85d10..b51d922bda6 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -11,6 +11,7 @@ import (
"sync"
"github.com/klauspost/compress/huff0"
+ "github.com/klauspost/compress/zstd/internal/xxhash"
)
type blockType uint8
@@ -63,7 +64,8 @@ var (
type blockDec struct {
// Raw source data of the block.
- data []byte
+ data []byte
+ dataStorage []byte
// Destination of the decoded data.
dst []byte
@@ -73,20 +75,29 @@ type blockDec struct {
// Window size of the block.
WindowSize uint64
- Type blockType
- RLESize uint32
- // Is this the last block of a frame?
- Last bool
-
- // Use less memory
- lowMem bool
history chan *history
input chan struct{}
result chan decodeOutput
sequenceBuf []seq
- tmp [4]byte
err error
+ decWG sync.WaitGroup
+
+ // Frame to use for singlethreaded decoding.
+ // Should not be used by the decoder itself since parent may be another frame.
+ localFrame *frameDec
+
+ // Block is RLE, this is the size.
+ RLESize uint32
+ tmp [4]byte
+
+ Type blockType
+
+ // Is this the last block of a frame?
+ Last bool
+
+ // Use less memory
+ lowMem bool
}
func (b *blockDec) String() string {
@@ -103,6 +114,7 @@ func newBlockDec(lowMem bool) *blockDec {
input: make(chan struct{}, 1),
history: make(chan *history, 1),
}
+ b.decWG.Add(1)
go b.startDecoder()
return &b
}
@@ -123,43 +135,56 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
b.Type = blockType((bh >> 1) & 3)
// find size.
cSize := int(bh >> 3)
+ maxSize := maxBlockSize
switch b.Type {
case blockTypeReserved:
return ErrReservedBlockType
case blockTypeRLE:
b.RLESize = uint32(cSize)
+ if b.lowMem {
+ maxSize = cSize
+ }
cSize = 1
case blockTypeCompressed:
if debug {
println("Data size on stream:", cSize)
}
b.RLESize = 0
+ maxSize = maxCompressedBlockSize
+ if windowSize < maxCompressedBlockSize && b.lowMem {
+ maxSize = int(windowSize)
+ }
if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
if debug {
printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b)
}
return ErrCompressedSizeTooBig
}
- default:
+ case blockTypeRaw:
b.RLESize = 0
+ // We do not need a destination for raw blocks.
+ maxSize = -1
+ default:
+ panic("Invalid block type")
}
// Read block data.
- if cap(b.data) < cSize {
+ if cap(b.dataStorage) < cSize {
if b.lowMem {
- b.data = make([]byte, 0, cSize)
+ b.dataStorage = make([]byte, 0, cSize)
} else {
- b.data = make([]byte, 0, maxBlockSize)
+ b.dataStorage = make([]byte, 0, maxBlockSize)
}
}
- if cap(b.dst) <= maxBlockSize {
- b.dst = make([]byte, 0, maxBlockSize+1)
+ if cap(b.dst) <= maxSize {
+ b.dst = make([]byte, 0, maxSize+1)
}
var err error
- b.data, err = br.readBig(cSize, b.data[:0])
+ b.data, err = br.readBig(cSize, b.dataStorage)
if err != nil {
if debug {
- println("Reading block:", err)
+ println("Reading block:", err, "(", cSize, ")", len(b.data))
+ printf("%T", br)
}
return err
}
@@ -180,11 +205,13 @@ func (b *blockDec) Close() {
close(b.input)
close(b.history)
close(b.result)
+ b.decWG.Wait()
}
// decodeAsync will prepare decoding the block when it receives input.
// This will separate output and history.
func (b *blockDec) startDecoder() {
+ defer b.decWG.Done()
for range b.input {
//println("blockDec: Got block input")
switch b.Type {
@@ -274,7 +301,7 @@ func (b *blockDec) decodeBuf(hist *history) error {
hist.b = nil
err := b.decodeCompressed(hist)
if debug {
- println("Decompressed to total", len(b.dst), "bytes, error:", err)
+ println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err)
}
hist.b = b.dst
b.dst = saved
@@ -367,7 +394,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
}
if debug {
- println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize", litCompSize)
+ println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams)
}
var literals []byte
var huff *huff0.Scratch
@@ -425,7 +452,6 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
literals = in[:litCompSize]
in = in[litCompSize:]
-
huff = huffDecoderPool.Get().(*huff0.Scratch)
var err error
// Ensure we have space to store it.
@@ -439,25 +465,22 @@ func (b *blockDec) decodeCompressed(hist *history) error {
if huff == nil {
huff = &huff0.Scratch{}
}
- huff.Out = b.literalBuf[:0]
huff, literals, err = huff0.ReadTable(literals, huff)
if err != nil {
println("reading huffman table:", err)
return err
}
// Use our out buffer.
- huff.Out = b.literalBuf[:0]
if fourStreams {
- literals, err = huff.Decompress4X(literals, litRegenSize)
+ literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
} else {
- literals, err = huff.Decompress1X(literals)
+ literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
}
if err != nil {
println("decoding compressed literals:", err)
return err
}
// Make sure we don't leak our literals buffer
- huff.Out = nil
if len(literals) != litRegenSize {
return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
}
@@ -590,7 +613,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
// Decode treeless literal block.
if litType == literalsBlockTreeless {
// TODO: We could send the history early WITHOUT the stream history.
- // This would allow decoding treeless literials before the byte history is available.
+ // This would allow decoding treeless literals before the byte history is available.
// Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless.
// So not much obvious gain here.
@@ -608,14 +631,12 @@ func (b *blockDec) decodeCompressed(hist *history) error {
var err error
// Use our out buffer.
huff = hist.huffTree
- huff.Out = b.literalBuf[:0]
if fourStreams {
- literals, err = huff.Decompress4X(literals, litRegenSize)
+ literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
} else {
- literals, err = huff.Decompress1X(literals)
+ literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
}
// Make sure we don't leak our literals buffer
- huff.Out = nil
if err != nil {
println("decompressing literals:", err)
return err
@@ -625,16 +646,17 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
} else {
if hist.huffTree != nil && huff != nil {
- huffDecoderPool.Put(hist.huffTree)
+ if hist.dict == nil || hist.dict.litEnc != hist.huffTree {
+ huffDecoderPool.Put(hist.huffTree)
+ }
hist.huffTree = nil
}
}
if huff != nil {
- huff.Out = nil
hist.huffTree = huff
}
if debug {
- println("Final literals:", len(literals), "and", nSeqs, "sequences.")
+ println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.")
}
if nSeqs == 0 {
@@ -663,12 +685,21 @@ func (b *blockDec) decodeCompressed(hist *history) error {
// If only recent offsets were not transferred, this would be an obvious win.
// Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded.
+ hbytes := hist.b
+ if len(hbytes) > hist.windowSize {
+ hbytes = hbytes[len(hbytes)-hist.windowSize:]
+ // We do not need history any more.
+ if hist.dict != nil {
+ hist.dict.content = nil
+ }
+ }
+
if err := seqs.initialize(br, hist, literals, b.dst); err != nil {
println("initializing sequences:", err)
return err
}
- err = seqs.decode(nSeqs, br, hist.b)
+ err = seqs.decode(nSeqs, br, hbytes)
if err != nil {
return err
}
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
index cba24c76d14..9647c64e53a 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -14,35 +14,52 @@ import (
)
type blockEnc struct {
- size int
- literals []byte
- sequences []seq
- coders seqCoders
- litEnc *huff0.Scratch
- wr bitWriter
-
- extraLits int
- last bool
-
+ size int
+ literals []byte
+ sequences []seq
+ coders seqCoders
+ litEnc *huff0.Scratch
+ dictLitEnc *huff0.Scratch
+ wr bitWriter
+
+ extraLits int
output []byte
recentOffsets [3]uint32
prevRecentOffsets [3]uint32
+
+ last bool
+ lowMem bool
}
// init should be used once the block has been created.
// If called more than once, the effect is the same as calling reset.
func (b *blockEnc) init() {
- if cap(b.literals) < maxCompressedLiteralSize {
- b.literals = make([]byte, 0, maxCompressedLiteralSize)
- }
- const defSeqs = 200
- b.literals = b.literals[:0]
- if cap(b.sequences) < defSeqs {
- b.sequences = make([]seq, 0, defSeqs)
- }
- if cap(b.output) < maxCompressedBlockSize {
- b.output = make([]byte, 0, maxCompressedBlockSize)
+ if b.lowMem {
+ // 1K literals
+ if cap(b.literals) < 1<<10 {
+ b.literals = make([]byte, 0, 1<<10)
+ }
+ const defSeqs = 20
+ if cap(b.sequences) < defSeqs {
+ b.sequences = make([]seq, 0, defSeqs)
+ }
+ // 1K
+ if cap(b.output) < 1<<10 {
+ b.output = make([]byte, 0, 1<<10)
+ }
+ } else {
+ if cap(b.literals) < maxCompressedBlockSize {
+ b.literals = make([]byte, 0, maxCompressedBlockSize)
+ }
+ const defSeqs = 200
+ if cap(b.sequences) < defSeqs {
+ b.sequences = make([]seq, 0, defSeqs)
+ }
+ if cap(b.output) < maxCompressedBlockSize {
+ b.output = make([]byte, 0, maxCompressedBlockSize)
+ }
}
+
if b.coders.mlEnc == nil {
b.coders.mlEnc = &fseEncoder{}
b.coders.mlPrev = &fseEncoder{}
@@ -51,7 +68,7 @@ func (b *blockEnc) init() {
b.coders.llEnc = &fseEncoder{}
b.coders.llPrev = &fseEncoder{}
}
- b.litEnc = &huff0.Scratch{}
+ b.litEnc = &huff0.Scratch{WantLogLess: 4}
b.reset(nil)
}
@@ -75,6 +92,7 @@ func (b *blockEnc) reset(prev *blockEnc) {
if prev != nil {
b.recentOffsets = prev.prevRecentOffsets
}
+ b.dictLitEnc = nil
}
// reset will reset the block for a new encode, but in the same stream,
@@ -155,14 +173,17 @@ func (h *literalsHeader) setSize(regenLen int) {
}
// setSizes will set the size of a compressed literals section and the input length.
-func (h *literalsHeader) setSizes(compLen, inLen int) {
+func (h *literalsHeader) setSizes(compLen, inLen int, single bool) {
compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen))
// Only retain 2 bits
const mask = 3
lh := uint64(*h & mask)
switch {
case compBits <= 10 && inBits <= 10:
- lh |= (1 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60)
+ if !single {
+ lh |= 1 << 2
+ }
+ lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60)
if debug {
const mmask = (1 << 24) - 1
n := (lh >> 4) & mmask
@@ -175,8 +196,14 @@ func (h *literalsHeader) setSizes(compLen, inLen int) {
}
case compBits <= 14 && inBits <= 14:
lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60)
+ if single {
+ panic("single stream used with more than 10 bits length.")
+ }
case compBits <= 18 && inBits <= 18:
lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60)
+ if single {
+ panic("single stream used with more than 10 bits length.")
+ }
default:
panic("internal error: block too big")
}
@@ -286,49 +313,78 @@ func (b *blockEnc) encodeRaw(a []byte) {
b.output = bh.appendTo(b.output[:0])
b.output = append(b.output, a...)
if debug {
- println("Adding RAW block, length", len(a))
+ println("Adding RAW block, length", len(a), "last:", b.last)
+ }
+}
+
+// encodeRaw can be used to set the output to a raw representation of supplied bytes.
+func (b *blockEnc) encodeRawTo(dst, src []byte) []byte {
+ var bh blockHeader
+ bh.setLast(b.last)
+ bh.setSize(uint32(len(src)))
+ bh.setType(blockTypeRaw)
+ dst = bh.appendTo(dst)
+ dst = append(dst, src...)
+ if debug {
+ println("Adding RAW block, length", len(src), "last:", b.last)
}
+ return dst
}
// encodeLits can be used if the block is only litLen.
-func (b *blockEnc) encodeLits() error {
+func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
var bh blockHeader
bh.setLast(b.last)
- bh.setSize(uint32(len(b.literals)))
+ bh.setSize(uint32(len(lits)))
// Don't compress extremely small blocks
- if len(b.literals) < 32 {
+ if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw {
if debug {
- println("Adding RAW block, length", len(b.literals))
+ println("Adding RAW block, length", len(lits), "last:", b.last)
}
bh.setType(blockTypeRaw)
b.output = bh.appendTo(b.output)
- b.output = append(b.output, b.literals...)
+ b.output = append(b.output, lits...)
return nil
}
- // TODO: Switch to 1X when less than x bytes.
- out, reUsed, err := huff0.Compress4X(b.literals, b.litEnc)
- // Bail out of compression is too little.
- if len(out) > (len(b.literals) - len(b.literals)>>4) {
+ var (
+ out []byte
+ reUsed, single bool
+ err error
+ )
+ if b.dictLitEnc != nil {
+ b.litEnc.TransferCTable(b.dictLitEnc)
+ b.litEnc.Reuse = huff0.ReusePolicyAllow
+ b.dictLitEnc = nil
+ }
+ if len(lits) >= 1024 {
+ // Use 4 Streams.
+ out, reUsed, err = huff0.Compress4X(lits, b.litEnc)
+ } else if len(lits) > 32 {
+ // Use 1 stream
+ single = true
+ out, reUsed, err = huff0.Compress1X(lits, b.litEnc)
+ } else {
err = huff0.ErrIncompressible
}
+
switch err {
case huff0.ErrIncompressible:
if debug {
- println("Adding RAW block, length", len(b.literals))
+ println("Adding RAW block, length", len(lits), "last:", b.last)
}
bh.setType(blockTypeRaw)
b.output = bh.appendTo(b.output)
- b.output = append(b.output, b.literals...)
+ b.output = append(b.output, lits...)
return nil
case huff0.ErrUseRLE:
if debug {
- println("Adding RLE block, length", len(b.literals))
+ println("Adding RLE block, length", len(lits))
}
bh.setType(blockTypeRLE)
b.output = bh.appendTo(b.output)
- b.output = append(b.output, b.literals[0])
+ b.output = append(b.output, lits[0])
return nil
default:
return err
@@ -351,7 +407,7 @@ func (b *blockEnc) encodeLits() error {
lh.setType(literalsBlockCompressed)
}
// Set sizes
- lh.setSizes(len(out), len(b.literals))
+ lh.setSizes(len(out), len(lits), single)
bh.setSize(uint32(len(out) + lh.size() + 1))
// Write block headers.
@@ -364,36 +420,97 @@ func (b *blockEnc) encodeLits() error {
return nil
}
-// encode will encode the block and put the output in b.output.
-func (b *blockEnc) encode() error {
+// fuzzFseEncoder can be used to fuzz the FSE encoder.
+func fuzzFseEncoder(data []byte) int {
+ if len(data) > maxSequences || len(data) < 2 {
+ return 0
+ }
+ enc := fseEncoder{}
+ hist := enc.Histogram()[:256]
+ maxSym := uint8(0)
+ for i, v := range data {
+ v = v & 63
+ data[i] = v
+ hist[v]++
+ if v > maxSym {
+ maxSym = v
+ }
+ }
+ if maxSym == 0 {
+ // All 0
+ return 0
+ }
+ maxCount := func(a []uint32) int {
+ var max uint32
+ for _, v := range a {
+ if v > max {
+ max = v
+ }
+ }
+ return int(max)
+ }
+ cnt := maxCount(hist[:maxSym])
+ if cnt == len(data) {
+ // RLE
+ return 0
+ }
+ enc.HistogramFinished(maxSym, cnt)
+ err := enc.normalizeCount(len(data))
+ if err != nil {
+ return 0
+ }
+ _, err = enc.writeCount(nil)
+ if err != nil {
+ panic(err)
+ }
+ return 1
+}
+
+// encode will encode the block and append the output in b.output.
+// Previous offset codes must be pushed if more blocks are expected.
+func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
if len(b.sequences) == 0 {
- return b.encodeLits()
+ return b.encodeLits(b.literals, rawAllLits)
}
- // We want some difference
- if len(b.literals) > (b.size - (b.size >> 5)) {
- return errIncompressible
+ // We want some difference to at least account for the headers.
+ saved := b.size - len(b.literals) - (b.size >> 5)
+ if saved < 16 {
+ if org == nil {
+ return errIncompressible
+ }
+ b.popOffsets()
+ return b.encodeLits(org, rawAllLits)
}
var bh blockHeader
var lh literalsHeader
bh.setLast(b.last)
bh.setType(blockTypeCompressed)
+ // Store offset of the block header. Needed when we know the size.
+ bhOffset := len(b.output)
b.output = bh.appendTo(b.output)
var (
- out []byte
- reUsed bool
- err error
+ out []byte
+ reUsed, single bool
+ err error
)
- if len(b.literals) > 32 {
- // TODO: Switch to 1X on small blocks.
+ if b.dictLitEnc != nil {
+ b.litEnc.TransferCTable(b.dictLitEnc)
+ b.litEnc.Reuse = huff0.ReusePolicyAllow
+ b.dictLitEnc = nil
+ }
+ if len(b.literals) >= 1024 && !raw {
+ // Use 4 Streams.
out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
- if len(out) > len(b.literals)-len(b.literals)>>4 {
- err = huff0.ErrIncompressible
- }
+ } else if len(b.literals) > 32 && !raw {
+ // Use 1 stream
+ single = true
+ out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
} else {
err = huff0.ErrIncompressible
}
+
switch err {
case huff0.ErrIncompressible:
lh.setType(literalsBlockRaw)
@@ -435,7 +552,7 @@ func (b *blockEnc) encode() error {
}
}
}
- lh.setSizes(len(out), len(b.literals))
+ lh.setSizes(len(out), len(b.literals), single)
if debug {
printf("Compressed %d literals to %d bytes", len(b.literals), len(out))
println("Adding literal header:", lh)
@@ -661,23 +778,23 @@ func (b *blockEnc) encode() error {
}
b.output = wr.out
- if len(b.output)-3 >= b.size {
+ if len(b.output)-3-bhOffset >= b.size {
// Maybe even add a bigger margin.
b.litEnc.Reuse = huff0.ReusePolicyNone
return errIncompressible
}
// Size is output minus block header.
- bh.setSize(uint32(len(b.output)) - 3)
+ bh.setSize(uint32(len(b.output)-bhOffset) - 3)
if debug {
println("Rewriting block header", bh)
}
- _ = bh.appendTo(b.output[:0])
+ _ = bh.appendTo(b.output[bhOffset:bhOffset])
b.coders.setPrev(llEnc, mlEnc, ofEnc)
return nil
}
-var errIncompressible = errors.New("uncompressible")
+var errIncompressible = errors.New("incompressible")
func (b *blockEnc) genCodes() {
if len(b.sequences) == 0 {
@@ -723,7 +840,7 @@ func (b *blockEnc) genCodes() {
mlH[v]++
if v > mlMax {
mlMax = v
- if debug && mlMax > maxMatchLengthSymbol {
+ if debugAsserts && mlMax > maxMatchLengthSymbol {
panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen))
}
}
@@ -738,13 +855,13 @@ func (b *blockEnc) genCodes() {
}
return int(max)
}
- if mlMax > maxMatchLengthSymbol {
+ if debugAsserts && mlMax > maxMatchLengthSymbol {
panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax))
}
- if ofMax > maxOffsetBits {
+ if debugAsserts && ofMax > maxOffsetBits {
panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax))
}
- if llMax > maxLiteralLengthSymbol {
+ if debugAsserts && llMax > maxLiteralLengthSymbol {
panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax))
}
diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
index 4a846047684..658ef78380e 100644
--- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go
+++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
@@ -30,7 +30,7 @@ type byteBuffer interface {
type byteBuf []byte
func (b *byteBuf) readSmall(n int) []byte {
- if debug && n > 8 {
+ if debugAsserts && n > 8 {
panic(fmt.Errorf("small read > 8 (%d). use readBig", n))
}
bb := *b
@@ -82,7 +82,7 @@ type readerWrapper struct {
}
func (r *readerWrapper) readSmall(n int) []byte {
- if debug && n > 8 {
+ if debugAsserts && n > 8 {
panic(fmt.Errorf("small read > 8 (%d). use readBig", n))
}
n2, err := io.ReadFull(r.r, r.tmp[:n])
@@ -101,6 +101,9 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
dst = make([]byte, n)
}
n2, err := io.ReadFull(r.r, dst[:n])
+ if err == io.EOF && n > 0 {
+ err = io.ErrUnexpectedEOF
+ }
return dst[:n2], err
}
@@ -116,6 +119,9 @@ func (r *readerWrapper) readByte() (byte, error) {
}
func (r *readerWrapper) skipN(n int) error {
- _, err := io.CopyN(ioutil.Discard, r.r, int64(n))
+ n2, err := io.CopyN(ioutil.Discard, r.r, int64(n))
+ if n2 != int64(n) {
+ err = io.ErrUnexpectedEOF
+ }
return err
}
diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go
index dc4378b6401..2c4fca17fa1 100644
--- a/vendor/github.com/klauspost/compress/zstd/bytereader.go
+++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go
@@ -31,7 +31,8 @@ func (b *byteReader) overread() bool {
// Int32 returns a little endian int32 starting at current offset.
func (b byteReader) Int32() int32 {
- b2 := b.b[b.off : b.off+4 : b.off+4]
+ b2 := b.b[b.off:]
+ b2 = b2[:4]
v3 := int32(b2[3])
v2 := int32(b2[2])
v1 := int32(b2[1])
@@ -55,7 +56,20 @@ func (b byteReader) Uint32() uint32 {
}
return v
}
- b2 := b.b[b.off : b.off+4 : b.off+4]
+ b2 := b.b[b.off:]
+ b2 = b2[:4]
+ v3 := uint32(b2[3])
+ v2 := uint32(b2[2])
+ v1 := uint32(b2[1])
+ v0 := uint32(b2[0])
+ return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
+}
+
+// Uint32NC returns a little endian uint32 starting at current offset.
+// The caller must be sure if there are at least 4 bytes left.
+func (b byteReader) Uint32NC() uint32 {
+ b2 := b.b[b.off:]
+ b2 = b2[:4]
v3 := uint32(b2[3])
v2 := uint32(b2[2])
v1 := uint32(b2[1])
diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
new file mode 100644
index 00000000000..87896c5eaa7
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
@@ -0,0 +1,202 @@
+// Copyright 2020+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package zstd
+
+import (
+ "bytes"
+ "errors"
+ "io"
+)
+
+// HeaderMaxSize is the maximum size of a Frame and Block Header.
+// If less is sent to Header.Decode it *may* still contain enough information.
+const HeaderMaxSize = 14 + 3
+
+// Header contains information about the first frame and block within that.
+type Header struct {
+ // Window Size the window of data to keep while decoding.
+ // Will only be set if HasFCS is false.
+ WindowSize uint64
+
+ // Frame content size.
+ // Expected size of the entire frame.
+ FrameContentSize uint64
+
+ // Dictionary ID.
+ // If 0, no dictionary.
+ DictionaryID uint32
+
+ // First block information.
+ FirstBlock struct {
+ // OK will be set if first block could be decoded.
+ OK bool
+
+ // Is this the last block of a frame?
+ Last bool
+
+ // Is the data compressed?
+ // If true CompressedSize will be populated.
+ // Unfortunately DecompressedSize cannot be determined
+ // without decoding the blocks.
+ Compressed bool
+
+ // DecompressedSize is the expected decompressed size of the block.
+ // Will be 0 if it cannot be determined.
+ DecompressedSize int
+
+ // CompressedSize of the data in the block.
+ // Does not include the block header.
+ // Will be equal to DecompressedSize if not Compressed.
+ CompressedSize int
+ }
+
+ // Skippable will be true if the frame is meant to be skipped.
+ // No other information will be populated.
+ Skippable bool
+
+ // If set there is a checksum present for the block content.
+ HasCheckSum bool
+
+ // If this is true FrameContentSize will have a valid value
+ HasFCS bool
+
+ SingleSegment bool
+}
+
+// Decode the header from the beginning of the stream.
+// This will decode the frame header and the first block header if enough bytes are provided.
+// It is recommended to provide at least HeaderMaxSize bytes.
+// If the frame header cannot be read an error will be returned.
+// If there isn't enough input, io.ErrUnexpectedEOF is returned.
+// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
+func (h *Header) Decode(in []byte) error {
+ if len(in) < 4 {
+ return io.ErrUnexpectedEOF
+ }
+ b, in := in[:4], in[4:]
+ if !bytes.Equal(b, frameMagic) {
+ if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
+ return ErrMagicMismatch
+ }
+ *h = Header{Skippable: true}
+ return nil
+ }
+ if len(in) < 1 {
+ return io.ErrUnexpectedEOF
+ }
+
+ // Clear output
+ *h = Header{}
+ fhd, in := in[0], in[1:]
+ h.SingleSegment = fhd&(1<<5) != 0
+ h.HasCheckSum = fhd&(1<<2) != 0
+
+ if fhd&(1<<3) != 0 {
+ return errors.New("Reserved bit set on frame header")
+ }
+
+ // Read Window_Descriptor
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
+ if !h.SingleSegment {
+ if len(in) < 1 {
+ return io.ErrUnexpectedEOF
+ }
+ var wd byte
+ wd, in = in[0], in[1:]
+ windowLog := 10 + (wd >> 3)
+ windowBase := uint64(1) << windowLog
+ windowAdd := (windowBase / 8) * uint64(wd&0x7)
+ h.WindowSize = windowBase + windowAdd
+ }
+
+ // Read Dictionary_ID
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
+ if size := fhd & 3; size != 0 {
+ if size == 3 {
+ size = 4
+ }
+ if len(in) < int(size) {
+ return io.ErrUnexpectedEOF
+ }
+ b, in = in[:size], in[size:]
+ if b == nil {
+ return io.ErrUnexpectedEOF
+ }
+ switch size {
+ case 1:
+ h.DictionaryID = uint32(b[0])
+ case 2:
+ h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8)
+ case 4:
+ h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+ }
+ }
+
+ // Read Frame_Content_Size
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size
+ var fcsSize int
+ v := fhd >> 6
+ switch v {
+ case 0:
+ if h.SingleSegment {
+ fcsSize = 1
+ }
+ default:
+ fcsSize = 1 << v
+ }
+
+ if fcsSize > 0 {
+ h.HasFCS = true
+ if len(in) < fcsSize {
+ return io.ErrUnexpectedEOF
+ }
+ b, in = in[:fcsSize], in[fcsSize:]
+ if b == nil {
+ return io.ErrUnexpectedEOF
+ }
+ switch fcsSize {
+ case 1:
+ h.FrameContentSize = uint64(b[0])
+ case 2:
+ // When FCS_Field_Size is 2, the offset of 256 is added.
+ h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256
+ case 4:
+ h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24)
+ case 8:
+ d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+ d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24)
+ h.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
+ }
+ }
+
+ // Frame Header done, we will not fail from now on.
+ if len(in) < 3 {
+ return nil
+ }
+ tmp, in := in[:3], in[3:]
+ bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16)
+ h.FirstBlock.Last = bh&1 != 0
+ blockType := blockType((bh >> 1) & 3)
+ // find size.
+ cSize := int(bh >> 3)
+ switch blockType {
+ case blockTypeReserved:
+ return nil
+ case blockTypeRLE:
+ h.FirstBlock.Compressed = true
+ h.FirstBlock.DecompressedSize = cSize
+ h.FirstBlock.CompressedSize = 1
+ case blockTypeCompressed:
+ h.FirstBlock.Compressed = true
+ h.FirstBlock.CompressedSize = cSize
+ case blockTypeRaw:
+ h.FirstBlock.DecompressedSize = cSize
+ h.FirstBlock.CompressedSize = cSize
+ default:
+ panic("Invalid block type")
+ }
+
+ h.FirstBlock.OK = true
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index f06bff6f6f4..1d41c25d292 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -5,7 +5,6 @@
package zstd
import (
- "bytes"
"errors"
"io"
"sync"
@@ -23,17 +22,15 @@ type Decoder struct {
// Unreferenced decoders, ready for use.
decoders chan *blockDec
- // Unreferenced decoders, ready for use.
- frames chan *frameDec
-
// Streams ready to be decoded.
stream chan decodeStream
// Current read position used for Reader functionality.
current decoderState
- // Custom dictionaries
- dicts map[uint32]struct{}
+ // Custom dictionaries.
+ // Always uses copies.
+ dicts map[uint32]dict
// streamWg is the waitgroup for all streams
streamWg sync.WaitGroup
@@ -66,7 +63,7 @@ var (
// A Decoder can be used in two modes:
//
// 1) As a stream, or
-// 2) For stateless decoding using DecodeAll or DecodeBuffer.
+// 2) For stateless decoding using DecodeAll.
//
// Only a single stream can be decoded concurrently, but the same decoder
// can run multiple concurrent stateless decodes. It is even possible to
@@ -75,6 +72,7 @@ var (
// The Reset function can be used to initiate a new stream, which is will considerably
// reduce the allocations normally caused by NewReader.
func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
+ initPredefined()
var d Decoder
d.o.setDefault()
for _, o := range opts {
@@ -86,12 +84,23 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
d.current.output = make(chan decodeOutput, d.o.concurrent)
d.current.flushed = true
+ if r == nil {
+ d.current.err = ErrDecoderNilInput
+ }
+
+ // Transfer option dicts.
+ d.dicts = make(map[uint32]dict, len(d.o.dicts))
+ for _, dc := range d.o.dicts {
+ d.dicts[dc.id] = dc
+ }
+ d.o.dicts = nil
+
// Create decoders
d.decoders = make(chan *blockDec, d.o.concurrent)
- d.frames = make(chan *frameDec, d.o.concurrent)
for i := 0; i < d.o.concurrent; i++ {
- d.frames <- newFrameDec(d.o)
- d.decoders <- newBlockDec(d.o.lowMem)
+ dec := newBlockDec(d.o.lowMem)
+ dec.localFrame = newFrameDec(d.o)
+ d.decoders <- dec
}
if r == nil {
@@ -105,7 +114,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
// When the stream is done, io.EOF will be returned.
func (d *Decoder) Read(p []byte) (int, error) {
if d.stream == nil {
- return 0, errors.New("no input has been initialized")
+ return 0, ErrDecoderNilInput
}
var n int
for {
@@ -123,10 +132,15 @@ func (d *Decoder) Read(p []byte) (int, error) {
if d.current.err != nil {
break
}
- d.nextBlock()
+ if !d.nextBlock(n == 0) {
+ return n, nil
+ }
}
}
if len(d.current.b) > 0 {
+ if debug {
+ println("returning", n, "still bytes left:", len(d.current.b))
+ }
// Only return error at end of block
return n, nil
}
@@ -141,12 +155,20 @@ func (d *Decoder) Read(p []byte) (int, error) {
// Reset will reset the decoder the supplied stream after the current has finished processing.
// Note that this functionality cannot be used after Close has been called.
+// Reset can be called with a nil reader to release references to the previous reader.
+// After being called with a nil reader, no other operations than Reset or DecodeAll or Close
+// should be used.
func (d *Decoder) Reset(r io.Reader) error {
if d.current.err == ErrDecoderClosed {
return d.current.err
}
+
+ d.drainOutput()
+
if r == nil {
- return errors.New("nil Reader sent as input")
+ d.current.err = ErrDecoderNilInput
+ d.current.flushed = true
+ return nil
}
if d.stream == nil {
@@ -155,18 +177,29 @@ func (d *Decoder) Reset(r io.Reader) error {
go d.startStreamDecoder(d.stream)
}
- d.drainOutput()
-
// If bytes buffer and < 1MB, do sync decoding anyway.
- if bb, ok := r.(*bytes.Buffer); ok && bb.Len() < 1<<20 {
- b := bb.Bytes()
- dst, err := d.DecodeAll(b, nil)
+ if bb, ok := r.(byter); ok && bb.Len() < 1<<20 {
+ var bb2 byter
+ bb2 = bb
+ if debug {
+ println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
+ }
+ b := bb2.Bytes()
+ var dst []byte
+ if cap(d.current.b) > 0 {
+ dst = d.current.b
+ }
+
+ dst, err := d.DecodeAll(b, dst[:0])
if err == nil {
err = io.EOF
}
d.current.b = dst
d.current.err = err
d.current.flushed = true
+ if debug {
+ println("sync decode to", len(dst), "bytes, err:", err)
+ }
return nil
}
@@ -193,7 +226,9 @@ func (d *Decoder) drainOutput() {
d.current.cancel = nil
}
if d.current.d != nil {
- println("re-adding current decoder", d.current.d, len(d.decoders))
+ if debug {
+ printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders))
+ }
d.decoders <- d.current.d
d.current.d = nil
d.current.b = nil
@@ -206,7 +241,9 @@ func (d *Decoder) drainOutput() {
select {
case v := <-d.current.output:
if v.d != nil {
- println("got decoder", v.d)
+ if debug {
+ printf("re-adding decoder %p", v.d)
+ }
d.decoders <- v.d
}
if v.err == errEndOfStream {
@@ -223,7 +260,7 @@ func (d *Decoder) drainOutput() {
// Any error encountered during the write is also returned.
func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
if d.stream == nil {
- return 0, errors.New("no input has been initialized")
+ return 0, ErrDecoderNilInput
}
var n int64
for {
@@ -238,7 +275,7 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
if d.current.err != nil {
break
}
- d.nextBlock()
+ d.nextBlock(true)
}
err := d.current.err
if err != nil {
@@ -259,23 +296,36 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
if d.current.err == ErrDecoderClosed {
return dst, ErrDecoderClosed
}
- //println(len(d.frames), len(d.decoders), d.current)
- block, frame := <-d.decoders, <-d.frames
+
+ // Grab a block decoder and frame decoder.
+ block := <-d.decoders
+ frame := block.localFrame
defer func() {
- d.decoders <- block
+ if debug {
+ printf("re-adding decoder: %p", block)
+ }
frame.rawInput = nil
- d.frames <- frame
+ frame.bBuf = nil
+ d.decoders <- block
}()
- if cap(dst) == 0 {
- // Allocate 1MB by default.
- dst = make([]byte, 0, 1<<20)
- }
- br := byteBuf(input)
+ frame.bBuf = input
+
for {
- err := frame.reset(&br)
+ frame.history.reset()
+ err := frame.reset(&frame.bBuf)
if err == io.EOF {
+ if debug {
+ println("frame reset return EOF")
+ }
return dst, nil
}
+ if frame.DictionaryID != nil {
+ dict, ok := d.dicts[*frame.DictionaryID]
+ if !ok {
+ return nil, ErrUnknownDictionary
+ }
+ frame.history.setDict(&dict)
+ }
if err != nil {
return dst, err
}
@@ -284,17 +334,34 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
}
if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 {
// Never preallocate moe than 1 GB up front.
- if uint64(cap(dst)) < frame.FrameContentSize {
+ if cap(dst)-len(dst) < int(frame.FrameContentSize) {
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
copy(dst2, dst)
dst = dst2
}
}
+ if cap(dst) == 0 {
+ // Allocate len(input) * 2 by default if nothing is provided
+ // and we didn't get frame content size.
+ size := len(input) * 2
+ // Cap to 1 MB.
+ if size > 1<<20 {
+ size = 1 << 20
+ }
+ if uint64(size) > d.o.maxDecodedSize {
+ size = int(d.o.maxDecodedSize)
+ }
+ dst = make([]byte, 0, size)
+ }
+
dst, err = frame.runDecoder(dst, block)
if err != nil {
return dst, err
}
- if len(br) == 0 {
+ if len(frame.bBuf) == 0 {
+ if debug {
+ println("frame dbuf empty")
+ }
break
}
}
@@ -303,19 +370,35 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
// nextBlock returns the next block.
// If an error occurs d.err will be set.
-func (d *Decoder) nextBlock() {
+// Optionally the function can block for new output.
+// If non-blocking mode is used the returned boolean will be false
+// if no data was available without blocking.
+func (d *Decoder) nextBlock(blocking bool) (ok bool) {
if d.current.d != nil {
+ if debug {
+ printf("re-adding current decoder %p", d.current.d)
+ }
d.decoders <- d.current.d
d.current.d = nil
}
if d.current.err != nil {
// Keep error state.
- return
+ return blocking
+ }
+
+ if blocking {
+ d.current.decodeOutput = <-d.current.output
+ } else {
+ select {
+ case d.current.decodeOutput = <-d.current.output:
+ default:
+ return false
+ }
}
- d.current.decodeOutput = <-d.current.output
if debug {
println("got", len(d.current.b), "bytes, error:", d.current.err)
}
+ return true
}
// Close will release all resources.
@@ -344,6 +427,35 @@ func (d *Decoder) Close() {
d.current.err = ErrDecoderClosed
}
+// IOReadCloser returns the decoder as an io.ReadCloser for convenience.
+// Any changes to the decoder will be reflected, so the returned ReadCloser
+// can be reused along with the decoder.
+// io.WriterTo is also supported by the returned ReadCloser.
+func (d *Decoder) IOReadCloser() io.ReadCloser {
+ return closeWrapper{d: d}
+}
+
+// closeWrapper wraps a function call as a closer.
+type closeWrapper struct {
+ d *Decoder
+}
+
+// WriteTo forwards WriteTo calls to the decoder.
+func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) {
+ return c.d.WriteTo(w)
+}
+
+// Read forwards read calls to the decoder.
+func (c closeWrapper) Read(p []byte) (n int, err error) {
+ return c.d.Read(p)
+}
+
+// Close closes the decoder.
+func (c closeWrapper) Close() error {
+ c.d.Close()
+ return nil
+}
+
type decodeOutput struct {
d *blockDec
b []byte
@@ -377,13 +489,25 @@ func (d *Decoder) startStreamDecoder(inStream chan decodeStream) {
defer d.streamWg.Done()
frame := newFrameDec(d.o)
for stream := range inStream {
+ if debug {
+ println("got new stream")
+ }
br := readerWrapper{r: stream.r}
decodeStream:
for {
+ frame.history.reset()
err := frame.reset(&br)
if debug && err != nil {
println("Frame decoder returned", err)
}
+ if err == nil && frame.DictionaryID != nil {
+ dict, ok := d.dicts[*frame.DictionaryID]
+ if !ok {
+ err = ErrUnknownDictionary
+ } else {
+ frame.history.setDict(&dict)
+ }
+ }
if err != nil {
stream.output <- decodeOutput{
err: err,
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
index 52c1eb06625..284d384492b 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
@@ -18,6 +18,7 @@ type decoderOptions struct {
lowMem bool
concurrent int
maxDecodedSize uint64
+ dicts []dict
}
func (o *decoderOptions) setDefault() {
@@ -50,17 +51,34 @@ func WithDecoderConcurrency(n int) DOption {
}
// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory
-// (non-streaming) operations.
-// Maxmimum and default is 1 << 63 bytes.
+// non-streaming operations or maximum window size for streaming operations.
+// This can be used to control memory usage of potentially hostile content.
+// For streaming operations, the maximum window size is capped at 1<<30 bytes.
+// Maximum and default is 1 << 63 bytes.
func WithDecoderMaxMemory(n uint64) DOption {
return func(o *decoderOptions) error {
if n == 0 {
- return errors.New("WithDecoderMaxmemory must be at least 1")
+ return errors.New("WithDecoderMaxMemory must be at least 1")
}
if n > 1<<63 {
- return fmt.Errorf("WithDecoderMaxmemorymust be less than 1 << 63")
+ return fmt.Errorf("WithDecoderMaxmemory must be less than 1 << 63")
}
o.maxDecodedSize = n
return nil
}
}
+
+// WithDecoderDicts allows to register one or more dictionaries for the decoder.
+// If several dictionaries with the same ID is provided the last one will be used.
+func WithDecoderDicts(dicts ...[]byte) DOption {
+ return func(o *decoderOptions) error {
+ for _, b := range dicts {
+ d, err := loadDict(b)
+ if err != nil {
+ return err
+ }
+ o.dicts = append(o.dicts, *d)
+ }
+ return nil
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go
new file mode 100644
index 00000000000..fa25a18d864
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/dict.go
@@ -0,0 +1,122 @@
+package zstd
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/klauspost/compress/huff0"
+)
+
+type dict struct {
+ id uint32
+
+ litEnc *huff0.Scratch
+ llDec, ofDec, mlDec sequenceDec
+ //llEnc, ofEnc, mlEnc []*fseEncoder
+ offsets [3]int
+ content []byte
+}
+
+var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec}
+
+// ID returns the dictionary id or 0 if d is nil.
+func (d *dict) ID() uint32 {
+ if d == nil {
+ return 0
+ }
+ return d.id
+}
+
+// DictContentSize returns the dictionary content size or 0 if d is nil.
+func (d *dict) DictContentSize() int {
+ if d == nil {
+ return 0
+ }
+ return len(d.content)
+}
+
+// Load a dictionary as described in
+// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
+func loadDict(b []byte) (*dict, error) {
+ // Check static field size.
+ if len(b) <= 8+(3*4) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ d := dict{
+ llDec: sequenceDec{fse: &fseDecoder{}},
+ ofDec: sequenceDec{fse: &fseDecoder{}},
+ mlDec: sequenceDec{fse: &fseDecoder{}},
+ }
+ if !bytes.Equal(b[:4], dictMagic[:]) {
+ return nil, ErrMagicMismatch
+ }
+ d.id = binary.LittleEndian.Uint32(b[4:8])
+ if d.id == 0 {
+ return nil, errors.New("dictionaries cannot have ID 0")
+ }
+
+ // Read literal table
+ var err error
+ d.litEnc, b, err = huff0.ReadTable(b[8:], nil)
+ if err != nil {
+ return nil, err
+ }
+ d.litEnc.Reuse = huff0.ReusePolicyMust
+
+ br := byteReader{
+ b: b,
+ off: 0,
+ }
+ readDec := func(i tableIndex, dec *fseDecoder) error {
+ if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil {
+ return err
+ }
+ if br.overread() {
+ return io.ErrUnexpectedEOF
+ }
+ err = dec.transform(symbolTableX[i])
+ if err != nil {
+ println("Transform table error:", err)
+ return err
+ }
+ if debug {
+ println("Read table ok", "symbolLen:", dec.symbolLen)
+ }
+ // Set decoders as predefined so they aren't reused.
+ dec.preDefined = true
+ return nil
+ }
+
+ if err := readDec(tableOffsets, d.ofDec.fse); err != nil {
+ return nil, err
+ }
+ if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil {
+ return nil, err
+ }
+ if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil {
+ return nil, err
+ }
+ if br.remain() < 12 {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ d.offsets[0] = int(br.Uint32())
+ br.advance(4)
+ d.offsets[1] = int(br.Uint32())
+ br.advance(4)
+ d.offsets[2] = int(br.Uint32())
+ br.advance(4)
+ if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 {
+ return nil, errors.New("invalid offset in dictionary")
+ }
+ d.content = make([]byte, br.remain())
+ copy(d.content, br.unread())
+ if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) {
+ return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets)
+ }
+
+ return &d, nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go
new file mode 100644
index 00000000000..2d4d893eb91
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go
@@ -0,0 +1,177 @@
+package zstd
+
+import (
+ "fmt"
+ "math/bits"
+
+ "github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+const (
+ dictShardBits = 6
+)
+
+type fastBase struct {
+ // cur is the offset at the start of hist
+ cur int32
+ // maximum offset. Should be at least 2x block size.
+ maxMatchOff int32
+ hist []byte
+ crc *xxhash.Digest
+ tmp [8]byte
+ blk *blockEnc
+ lastDictID uint32
+ lowMem bool
+}
+
+// CRC returns the underlying CRC writer.
+func (e *fastBase) CRC() *xxhash.Digest {
+ return e.crc
+}
+
+// AppendCRC will append the CRC to the destination slice and return it.
+func (e *fastBase) AppendCRC(dst []byte) []byte {
+ crc := e.crc.Sum(e.tmp[:0])
+ dst = append(dst, crc[7], crc[6], crc[5], crc[4])
+ return dst
+}
+
+// WindowSize returns the window size of the encoder,
+// or a window size small enough to contain the input size, if > 0.
+func (e *fastBase) WindowSize(size int) int32 {
+ if size > 0 && size < int(e.maxMatchOff) {
+ b := int32(1) << uint(bits.Len(uint(size)))
+ // Keep minimum window.
+ if b < 1024 {
+ b = 1024
+ }
+ return b
+ }
+ return e.maxMatchOff
+}
+
+// Block returns the current block.
+func (e *fastBase) Block() *blockEnc {
+ return e.blk
+}
+
+func (e *fastBase) addBlock(src []byte) int32 {
+ if debugAsserts && e.cur > bufferReset {
+ panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset))
+ }
+ // check if we have space already
+ if len(e.hist)+len(src) > cap(e.hist) {
+ if cap(e.hist) == 0 {
+ e.ensureHist(len(src))
+ } else {
+ if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) {
+ panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff))
+ }
+ // Move down
+ offset := int32(len(e.hist)) - e.maxMatchOff
+ copy(e.hist[0:e.maxMatchOff], e.hist[offset:])
+ e.cur += offset
+ e.hist = e.hist[:e.maxMatchOff]
+ }
+ }
+ s := int32(len(e.hist))
+ e.hist = append(e.hist, src...)
+ return s
+}
+
+// ensureHist will ensure that history can keep at least this many bytes.
+func (e *fastBase) ensureHist(n int) {
+ if cap(e.hist) >= n {
+ return
+ }
+ l := e.maxMatchOff
+ if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize {
+ l += maxCompressedBlockSize
+ } else {
+ l += e.maxMatchOff
+ }
+ // Make it at least 1MB.
+ if l < 1<<20 && !e.lowMem {
+ l = 1 << 20
+ }
+ // Make it at least the requested size.
+ if l < int32(n) {
+ l = int32(n)
+ }
+ e.hist = make([]byte, 0, l)
+}
+
+// useBlock will replace the block with the provided one,
+// but transfer recent offsets from the previous.
+func (e *fastBase) UseBlock(enc *blockEnc) {
+ enc.reset(e.blk)
+ e.blk = enc
+}
+
+func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 {
+ // Extend the match to be as long as possible.
+ return int32(matchLen(src[s:], src[t:]))
+}
+
+func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
+ if debugAsserts {
+ if s < 0 {
+ err := fmt.Sprintf("s (%d) < 0", s)
+ panic(err)
+ }
+ if t < 0 {
+ err := fmt.Sprintf("s (%d) < 0", s)
+ panic(err)
+ }
+ if s-t > e.maxMatchOff {
+ err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff)
+ panic(err)
+ }
+ if len(src)-int(s) > maxCompressedBlockSize {
+ panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
+ }
+ }
+
+ // Extend the match to be as long as possible.
+ return int32(matchLen(src[s:], src[t:]))
+}
+
+// Reset the encoding table.
+func (e *fastBase) resetBase(d *dict, singleBlock bool) {
+ if e.blk == nil {
+ e.blk = &blockEnc{lowMem: e.lowMem}
+ e.blk.init()
+ } else {
+ e.blk.reset(nil)
+ }
+ e.blk.initNewEncode()
+ if e.crc == nil {
+ e.crc = xxhash.New()
+ } else {
+ e.crc.Reset()
+ }
+ if (!singleBlock || d.DictContentSize() > 0) && cap(e.hist) < int(e.maxMatchOff*2)+d.DictContentSize() {
+ l := e.maxMatchOff*2 + int32(d.DictContentSize())
+ // Make it at least 1MB.
+ if l < 1<<20 {
+ l = 1 << 20
+ }
+ e.hist = make([]byte, 0, l)
+ }
+ // We offset current position so everything will be out of reach.
+ // If above reset line, history will be purged.
+ if e.cur < bufferReset {
+ e.cur += e.maxMatchOff + int32(len(e.hist))
+ }
+ e.hist = e.hist[:0]
+ if d != nil {
+ // Set offsets (currently not used)
+ for i, off := range d.offsets {
+ e.blk.recentOffsets[i] = uint32(off)
+ e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i]
+ }
+ // Transfer litenc.
+ e.blk.dictLitEnc = d.litEnc
+ e.hist = append(e.hist, d.content...)
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go
new file mode 100644
index 00000000000..fe3625c5f5e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go
@@ -0,0 +1,487 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "fmt"
+ "math/bits"
+)
+
+const (
+ bestLongTableBits = 20 // Bits used in the long match table
+ bestLongTableSize = 1 << bestLongTableBits // Size of the table
+
+ // Note: Increasing the short table bits or making the hash shorter
+ // can actually lead to compression degradation since it will 'steal' more from the
+ // long match table and match offsets are quite big.
+ // This greatly depends on the type of input.
+ bestShortTableBits = 16 // Bits used in the short match table
+ bestShortTableSize = 1 << bestShortTableBits // Size of the table
+)
+
+// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches.
+// The long match table contains the previous entry with the same hash,
+// effectively making it a "chain" of length 2.
+// When we find a long match we choose between the two values and select the longest.
+// When we find a short match, after checking the long, we check if we can find a long at n+1
+// and that it is longer (lazy matching).
+type bestFastEncoder struct {
+ fastBase
+ table [bestShortTableSize]prevEntry
+ longTable [bestLongTableSize]prevEntry
+ dictTable []prevEntry
+ dictLongTable []prevEntry
+}
+
+// Encode improves compression...
+func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
+ const (
+ // Input margin is the number of bytes we read (8)
+ // and the maximum we will read ahead (2)
+ inputMargin = 8 + 4
+ minNonLiteralBlockSize = 16
+ )
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = prevEntry{}
+ }
+ for i := range e.longTable[:] {
+ e.longTable[i] = prevEntry{}
+ }
+ e.cur = e.maxMatchOff
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ v2 := e.table[i].prev
+ if v < minOff {
+ v = 0
+ v2 = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ if v2 < minOff {
+ v2 = 0
+ } else {
+ v2 = v2 - e.cur + e.maxMatchOff
+ }
+ }
+ e.table[i] = prevEntry{
+ offset: v,
+ prev: v2,
+ }
+ }
+ for i := range e.longTable[:] {
+ v := e.longTable[i].offset
+ v2 := e.longTable[i].prev
+ if v < minOff {
+ v = 0
+ v2 = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ if v2 < minOff {
+ v2 = 0
+ } else {
+ v2 = v2 - e.cur + e.maxMatchOff
+ }
+ }
+ e.longTable[i] = prevEntry{
+ offset: v,
+ prev: v2,
+ }
+ }
+ e.cur = e.maxMatchOff
+ break
+ }
+
+ s := e.addBlock(src)
+ blk.size = len(src)
+ if len(src) < minNonLiteralBlockSize {
+ blk.extraLits = len(src)
+ blk.literals = blk.literals[:len(src)]
+ copy(blk.literals, src)
+ return
+ }
+
+ // Override src
+ src = e.hist
+ sLimit := int32(len(src)) - inputMargin
+ const kSearchStrength = 10
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := s
+ cv := load6432(src, s)
+
+ // Relative offsets
+ offset1 := int32(blk.recentOffsets[0])
+ offset2 := int32(blk.recentOffsets[1])
+ offset3 := int32(blk.recentOffsets[2])
+
+ addLiterals := func(s *seq, until int32) {
+ if until == nextEmit {
+ return
+ }
+ blk.literals = append(blk.literals, src[nextEmit:until]...)
+ s.litLen = uint32(until - nextEmit)
+ }
+ _ = addLiterals
+
+ if debug {
+ println("recent offsets:", blk.recentOffsets)
+ }
+
+encodeLoop:
+ for {
+ // We allow the encoder to optionally turn off repeat offsets across blocks
+ canRepeat := len(blk.sequences) > 2
+
+ if debugAsserts && canRepeat && offset1 == 0 {
+ panic("offset0 was 0")
+ }
+
+ type match struct {
+ offset int32
+ s int32
+ length int32
+ rep int32
+ }
+ matchAt := func(offset int32, s int32, first uint32, rep int32) match {
+ if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
+ return match{offset: offset, s: s}
+ }
+ return match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
+ }
+
+ bestOf := func(a, b match) match {
+ aScore := b.s - a.s + a.length
+ bScore := a.s - b.s + b.length
+ if a.rep < 0 {
+ aScore = aScore - int32(bits.Len32(uint32(a.offset)))/8
+ }
+ if b.rep < 0 {
+ bScore = bScore - int32(bits.Len32(uint32(b.offset)))/8
+ }
+ if aScore >= bScore {
+ return a
+ }
+ return b
+ }
+ const goodEnough = 100
+
+ nextHashL := hash8(cv, bestLongTableBits)
+ nextHashS := hash4x64(cv, bestShortTableBits)
+ candidateL := e.longTable[nextHashL]
+ candidateS := e.table[nextHashS]
+
+ best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
+ best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
+ best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1))
+ if canRepeat && best.length < goodEnough {
+ best = bestOf(best, matchAt(s-offset1+1, s+1, uint32(cv>>8), 1))
+ best = bestOf(best, matchAt(s-offset2+1, s+1, uint32(cv>>8), 2))
+ best = bestOf(best, matchAt(s-offset3+1, s+1, uint32(cv>>8), 3))
+ if best.length > 0 {
+ best = bestOf(best, matchAt(s-offset1+3, s+3, uint32(cv>>24), 1))
+ best = bestOf(best, matchAt(s-offset2+3, s+3, uint32(cv>>24), 2))
+ best = bestOf(best, matchAt(s-offset3+3, s+3, uint32(cv>>24), 3))
+ }
+ }
+ // Load next and check...
+ e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset}
+ e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset}
+
+ // Look far ahead, unless we have a really long match already...
+ if best.length < goodEnough {
+ // No match found, move forward on input, no need to check forward...
+ if best.length < 4 {
+ s += 1 + (s-nextEmit)>>(kSearchStrength-1)
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ continue
+ }
+
+ s++
+ candidateS = e.table[hash4x64(cv>>8, bestShortTableBits)]
+ cv = load6432(src, s)
+ cv2 := load6432(src, s+1)
+ candidateL = e.longTable[hash8(cv, bestLongTableBits)]
+ candidateL2 := e.longTable[hash8(cv2, bestLongTableBits)]
+
+ best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
+ best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1))
+ best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
+ best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1))
+ best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1))
+ }
+
+ // We have a match, we can store the forward value
+ if best.rep > 0 {
+ s = best.s
+ var seq seq
+ seq.matchLen = uint32(best.length - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := best.s
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ repIndex := best.offset
+ for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 0
+ seq.offset = uint32(best.rep)
+ if debugSequences {
+ println("repeat sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Index match start+1 (long) -> s - 1
+ index0 := s
+ s = best.s + best.length
+
+ nextEmit = s
+ if s >= sLimit {
+ if debug {
+ println("repeat ended", s, best.length)
+
+ }
+ break encodeLoop
+ }
+ // Index skipped...
+ off := index0 + e.cur
+ for index0 < s-1 {
+ cv0 := load6432(src, index0)
+ h0 := hash8(cv0, bestLongTableBits)
+ h1 := hash4x64(cv0, bestShortTableBits)
+ e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+ e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
+ off++
+ index0++
+ }
+ switch best.rep {
+ case 2:
+ offset1, offset2 = offset2, offset1
+ case 3:
+ offset1, offset2, offset3 = offset3, offset1, offset2
+ }
+ cv = load6432(src, s)
+ continue
+ }
+
+ // A 4-byte match has been found. Update recent offsets.
+ // We'll later see if more than 4 bytes.
+ s = best.s
+ t := best.offset
+ offset1, offset2, offset3 = s-t, offset1, offset2
+
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+
+ if debugAsserts && canRepeat && int(offset1) > len(src) {
+ panic("invalid offset")
+ }
+
+ // Extend the n-byte match as long as possible.
+ l := best.length
+
+ // Extend backwards
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+ s--
+ t--
+ l++
+ }
+
+ // Write our sequence
+ var seq seq
+ seq.litLen = uint32(s - nextEmit)
+ seq.matchLen = uint32(l - zstdMinMatch)
+ if seq.litLen > 0 {
+ blk.literals = append(blk.literals, src[nextEmit:s]...)
+ }
+ seq.offset = uint32(s-t) + 3
+ s += l
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ nextEmit = s
+ if s >= sLimit {
+ break encodeLoop
+ }
+
+ // Index match start+1 (long) -> s - 1
+ index0 := s - l + 1
+ // every entry
+ for index0 < s-1 {
+ cv0 := load6432(src, index0)
+ h0 := hash8(cv0, bestLongTableBits)
+ h1 := hash4x64(cv0, bestShortTableBits)
+ off := index0 + e.cur
+ e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+ e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
+ index0++
+ }
+
+ cv = load6432(src, s)
+ if !canRepeat {
+ continue
+ }
+
+ // Check offset 2
+ for {
+ o2 := s - offset2
+ if load3232(src, o2) != uint32(cv) {
+ // Do regular search
+ break
+ }
+
+ // Store this, since we have it.
+ nextHashS := hash4x64(cv, bestShortTableBits)
+ nextHashL := hash8(cv, bestLongTableBits)
+
+ // We have at least 4 byte match.
+ // No need to check backwards. We come straight from a match
+ l := 4 + e.matchlen(s+4, o2+4, src)
+
+ e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
+ e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset}
+ seq.matchLen = uint32(l) - zstdMinMatch
+ seq.litLen = 0
+
+ // Since litlen is always 0, this is offset 1.
+ seq.offset = 1
+ s += l
+ nextEmit = s
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Swap offset 1 and 2.
+ offset1, offset2 = offset2, offset1
+ if s >= sLimit {
+ // Finished
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+ }
+
+ if int(nextEmit) < len(src) {
+ blk.literals = append(blk.literals, src[nextEmit:]...)
+ blk.extraLits = len(src) - int(nextEmit)
+ }
+ blk.recentOffsets[0] = uint32(offset1)
+ blk.recentOffsets[1] = uint32(offset2)
+ blk.recentOffsets[2] = uint32(offset3)
+ if debug {
+ println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+ }
+}
+
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+ e.ensureHist(len(src))
+ e.Encode(blk, src)
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
+ e.resetBase(d, singleBlock)
+ if d == nil {
+ return
+ }
+ // Init or copy dict table
+ if len(e.dictTable) != len(e.table) || d.id != e.lastDictID {
+ if len(e.dictTable) != len(e.table) {
+ e.dictTable = make([]prevEntry, len(e.table))
+ }
+ end := int32(len(d.content)) - 8 + e.maxMatchOff
+ for i := e.maxMatchOff; i < end; i += 4 {
+ const hashLog = bestShortTableBits
+
+ cv := load6432(d.content, i-e.maxMatchOff)
+ nextHash := hash4x64(cv, hashLog) // 0 -> 4
+ nextHash1 := hash4x64(cv>>8, hashLog) // 1 -> 5
+ nextHash2 := hash4x64(cv>>16, hashLog) // 2 -> 6
+ nextHash3 := hash4x64(cv>>24, hashLog) // 3 -> 7
+ e.dictTable[nextHash] = prevEntry{
+ prev: e.dictTable[nextHash].offset,
+ offset: i,
+ }
+ e.dictTable[nextHash1] = prevEntry{
+ prev: e.dictTable[nextHash1].offset,
+ offset: i + 1,
+ }
+ e.dictTable[nextHash2] = prevEntry{
+ prev: e.dictTable[nextHash2].offset,
+ offset: i + 2,
+ }
+ e.dictTable[nextHash3] = prevEntry{
+ prev: e.dictTable[nextHash3].offset,
+ offset: i + 3,
+ }
+ }
+ e.lastDictID = d.id
+ }
+
+ // Init or copy dict table
+ if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID {
+ if len(e.dictLongTable) != len(e.longTable) {
+ e.dictLongTable = make([]prevEntry, len(e.longTable))
+ }
+ if len(d.content) >= 8 {
+ cv := load6432(d.content, 0)
+ h := hash8(cv, bestLongTableBits)
+ e.dictLongTable[h] = prevEntry{
+ offset: e.maxMatchOff,
+ prev: e.dictLongTable[h].offset,
+ }
+
+ end := int32(len(d.content)) - 8 + e.maxMatchOff
+ off := 8 // First to read
+ for i := e.maxMatchOff + 1; i < end; i++ {
+ cv = cv>>8 | (uint64(d.content[off]) << 56)
+ h := hash8(cv, bestLongTableBits)
+ e.dictLongTable[h] = prevEntry{
+ offset: i,
+ prev: e.dictLongTable[h].offset,
+ }
+ off++
+ }
+ }
+ e.lastDictID = d.id
+ }
+ // Reset table to initial state
+ copy(e.longTable[:], e.dictLongTable)
+
+ e.cur = e.maxMatchOff
+ // Reset table to initial state
+ copy(e.table[:], e.dictTable)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
new file mode 100644
index 00000000000..c2ce4a2bac5
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -0,0 +1,1170 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import "fmt"
+
+const (
+ betterLongTableBits = 19 // Bits used in the long match table
+ betterLongTableSize = 1 << betterLongTableBits // Size of the table
+
+ // Note: Increasing the short table bits or making the hash shorter
+ // can actually lead to compression degradation since it will 'steal' more from the
+ // long match table and match offsets are quite big.
+ // This greatly depends on the type of input.
+ betterShortTableBits = 13 // Bits used in the short match table
+ betterShortTableSize = 1 << betterShortTableBits // Size of the table
+
+ betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table
+ betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard
+
+ betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table
+ betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard
+)
+
+type prevEntry struct {
+ offset int32
+ prev int32
+}
+
+// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches.
+// The long match table contains the previous entry with the same hash,
+// effectively making it a "chain" of length 2.
+// When we find a long match we choose between the two values and select the longest.
+// When we find a short match, after checking the long, we check if we can find a long at n+1
+// and that it is longer (lazy matching).
+type betterFastEncoder struct {
+ fastBase
+ table [betterShortTableSize]tableEntry
+ longTable [betterLongTableSize]prevEntry
+}
+
+type betterFastEncoderDict struct {
+ betterFastEncoder
+ dictTable []tableEntry
+ dictLongTable []prevEntry
+ shortTableShardDirty [betterShortTableShardCnt]bool
+ longTableShardDirty [betterLongTableShardCnt]bool
+ allDirty bool
+}
+
+// Encode improves compression...
+func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
+ const (
+ // Input margin is the number of bytes we read (8)
+ // and the maximum we will read ahead (2)
+ inputMargin = 8 + 2
+ minNonLiteralBlockSize = 16
+ )
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.longTable[:] {
+ e.longTable[i] = prevEntry{}
+ }
+ e.cur = e.maxMatchOff
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v < minOff {
+ v = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.longTable[:] {
+ v := e.longTable[i].offset
+ v2 := e.longTable[i].prev
+ if v < minOff {
+ v = 0
+ v2 = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ if v2 < minOff {
+ v2 = 0
+ } else {
+ v2 = v2 - e.cur + e.maxMatchOff
+ }
+ }
+ e.longTable[i] = prevEntry{
+ offset: v,
+ prev: v2,
+ }
+ }
+ e.cur = e.maxMatchOff
+ break
+ }
+
+ s := e.addBlock(src)
+ blk.size = len(src)
+ if len(src) < minNonLiteralBlockSize {
+ blk.extraLits = len(src)
+ blk.literals = blk.literals[:len(src)]
+ copy(blk.literals, src)
+ return
+ }
+
+ // Override src
+ src = e.hist
+ sLimit := int32(len(src)) - inputMargin
+ // stepSize is the number of bytes to skip on every main loop iteration.
+ // It should be >= 1.
+ const stepSize = 1
+
+ const kSearchStrength = 9
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := s
+ cv := load6432(src, s)
+
+ // Relative offsets
+ offset1 := int32(blk.recentOffsets[0])
+ offset2 := int32(blk.recentOffsets[1])
+
+ addLiterals := func(s *seq, until int32) {
+ if until == nextEmit {
+ return
+ }
+ blk.literals = append(blk.literals, src[nextEmit:until]...)
+ s.litLen = uint32(until - nextEmit)
+ }
+ if debug {
+ println("recent offsets:", blk.recentOffsets)
+ }
+
+encodeLoop:
+ for {
+ var t int32
+ // We allow the encoder to optionally turn off repeat offsets across blocks
+ canRepeat := len(blk.sequences) > 2
+ var matched int32
+
+ for {
+ if debugAsserts && canRepeat && offset1 == 0 {
+ panic("offset0 was 0")
+ }
+
+ nextHashS := hash5(cv, betterShortTableBits)
+ nextHashL := hash8(cv, betterLongTableBits)
+ candidateL := e.longTable[nextHashL]
+ candidateS := e.table[nextHashS]
+
+ const repOff = 1
+ repIndex := s - offset1 + repOff
+ off := s + e.cur
+ e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
+ e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
+
+ if canRepeat {
+ if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+ // Consider history as well.
+ var seq seq
+ lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+
+ seq.matchLen = uint32(lenght - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + repOff
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 0
+ seq.offset = 1
+ if debugSequences {
+ println("repeat sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Index match start+1 (long) -> s - 1
+ index0 := s + repOff
+ s += lenght + repOff
+
+ nextEmit = s
+ if s >= sLimit {
+ if debug {
+ println("repeat ended", s, lenght)
+
+ }
+ break encodeLoop
+ }
+ // Index skipped...
+ for index0 < s-1 {
+ cv0 := load6432(src, index0)
+ cv1 := cv0 >> 8
+ h0 := hash8(cv0, betterLongTableBits)
+ off := index0 + e.cur
+ e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+ e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)}
+ index0 += 2
+ }
+ cv = load6432(src, s)
+ continue
+ }
+ const repOff2 = 1
+
+ // We deviate from the reference encoder and also check offset 2.
+ // Still slower and not much better, so disabled.
+ // repIndex = s - offset2 + repOff2
+ if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
+ // Consider history as well.
+ var seq seq
+ lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+
+ seq.matchLen = uint32(lenght - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + repOff2
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 2
+ seq.offset = 2
+ if debugSequences {
+ println("repeat sequence 2", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ index0 := s + repOff2
+ s += lenght + repOff2
+ nextEmit = s
+ if s >= sLimit {
+ if debug {
+ println("repeat ended", s, lenght)
+
+ }
+ break encodeLoop
+ }
+
+ // Index skipped...
+ for index0 < s-1 {
+ cv0 := load6432(src, index0)
+ cv1 := cv0 >> 8
+ h0 := hash8(cv0, betterLongTableBits)
+ off := index0 + e.cur
+ e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+ e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)}
+ index0 += 2
+ }
+ cv = load6432(src, s)
+ // Swap offsets
+ offset1, offset2 = offset2, offset1
+ continue
+ }
+ }
+ // Find the offsets of our two matches.
+ coffsetL := candidateL.offset - e.cur
+ coffsetLP := candidateL.prev - e.cur
+
+ // Check if we have a long match.
+ if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+ // Found a long match, at least 8 bytes.
+ matched = e.matchlen(s+8, coffsetL+8, src) + 8
+ t = coffsetL
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugMatches {
+ println("long match")
+ }
+
+ if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
+ // Found a long match, at least 8 bytes.
+ prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8
+ if prevMatch > matched {
+ matched = prevMatch
+ t = coffsetLP
+ }
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugMatches {
+ println("long match")
+ }
+ }
+ break
+ }
+
+ // Check if we have a long match on prev.
+ if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
+ // Found a long match, at least 8 bytes.
+ matched = e.matchlen(s+8, coffsetLP+8, src) + 8
+ t = coffsetLP
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugMatches {
+ println("long match")
+ }
+ break
+ }
+
+ coffsetS := candidateS.offset - e.cur
+
+ // Check if we have a short match.
+ if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+ // found a regular match
+ matched = e.matchlen(s+4, coffsetS+4, src) + 4
+
+ // See if we can find a long match at s+1
+ const checkAt = 1
+ cv := load6432(src, s+checkAt)
+ nextHashL = hash8(cv, betterLongTableBits)
+ candidateL = e.longTable[nextHashL]
+ coffsetL = candidateL.offset - e.cur
+
+ // We can store it, since we have at least a 4 byte match.
+ e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset}
+ if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+ // Found a long match, at least 8 bytes.
+ matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
+ if matchedNext > matched {
+ t = coffsetL
+ s += checkAt
+ matched = matchedNext
+ if debugMatches {
+ println("long match (after short)")
+ }
+ break
+ }
+ }
+
+ // Check prev long...
+ coffsetL = candidateL.prev - e.cur
+ if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+ // Found a long match, at least 8 bytes.
+ matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
+ if matchedNext > matched {
+ t = coffsetL
+ s += checkAt
+ matched = matchedNext
+ if debugMatches {
+ println("prev long match (after short)")
+ }
+ break
+ }
+ }
+ t = coffsetS
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugAsserts && t < 0 {
+ panic("t<0")
+ }
+ if debugMatches {
+ println("short match")
+ }
+ break
+ }
+
+ // No match found, move forward in input.
+ s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+
+ // A 4-byte match has been found. Update recent offsets.
+ // We'll later see if more than 4 bytes.
+ offset2 = offset1
+ offset1 = s - t
+
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+
+ if debugAsserts && canRepeat && int(offset1) > len(src) {
+ panic("invalid offset")
+ }
+
+ // Extend the n-byte match as long as possible.
+ l := matched
+
+ // Extend backwards
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+ s--
+ t--
+ l++
+ }
+
+ // Write our sequence
+ var seq seq
+ seq.litLen = uint32(s - nextEmit)
+ seq.matchLen = uint32(l - zstdMinMatch)
+ if seq.litLen > 0 {
+ blk.literals = append(blk.literals, src[nextEmit:s]...)
+ }
+ seq.offset = uint32(s-t) + 3
+ s += l
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ nextEmit = s
+ if s >= sLimit {
+ break encodeLoop
+ }
+
+ // Index match start+1 (long) -> s - 1
+ index0 := s - l + 1
+ for index0 < s-1 {
+ cv0 := load6432(src, index0)
+ cv1 := cv0 >> 8
+ h0 := hash8(cv0, betterLongTableBits)
+ off := index0 + e.cur
+ e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+ e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)}
+ index0 += 2
+ }
+
+ cv = load6432(src, s)
+ if !canRepeat {
+ continue
+ }
+
+ // Check offset 2
+ for {
+ o2 := s - offset2
+ if load3232(src, o2) != uint32(cv) {
+ // Do regular search
+ break
+ }
+
+ // Store this, since we have it.
+ nextHashS := hash5(cv, betterShortTableBits)
+ nextHashL := hash8(cv, betterLongTableBits)
+
+ // We have at least 4 byte match.
+ // No need to check backwards. We come straight from a match
+ l := 4 + e.matchlen(s+4, o2+4, src)
+
+ e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
+ e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+ seq.matchLen = uint32(l) - zstdMinMatch
+ seq.litLen = 0
+
+ // Since litlen is always 0, this is offset 1.
+ seq.offset = 1
+ s += l
+ nextEmit = s
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Swap offset 1 and 2.
+ offset1, offset2 = offset2, offset1
+ if s >= sLimit {
+ // Finished
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+ }
+
+ if int(nextEmit) < len(src) {
+ blk.literals = append(blk.literals, src[nextEmit:]...)
+ blk.extraLits = len(src) - int(nextEmit)
+ }
+ blk.recentOffsets[0] = uint32(offset1)
+ blk.recentOffsets[1] = uint32(offset2)
+ if debug {
+ println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+ }
+}
+
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+ e.ensureHist(len(src))
+ e.Encode(blk, src)
+}
+
+// Encode improves compression...
+func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) {
+ const (
+ // Input margin is the number of bytes we read (8)
+ // and the maximum we will read ahead (2)
+ inputMargin = 8 + 2
+ minNonLiteralBlockSize = 16
+ )
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.longTable[:] {
+ e.longTable[i] = prevEntry{}
+ }
+ e.cur = e.maxMatchOff
+ e.allDirty = true
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v < minOff {
+ v = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.longTable[:] {
+ v := e.longTable[i].offset
+ v2 := e.longTable[i].prev
+ if v < minOff {
+ v = 0
+ v2 = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ if v2 < minOff {
+ v2 = 0
+ } else {
+ v2 = v2 - e.cur + e.maxMatchOff
+ }
+ }
+ e.longTable[i] = prevEntry{
+ offset: v,
+ prev: v2,
+ }
+ }
+ e.allDirty = true
+ e.cur = e.maxMatchOff
+ break
+ }
+
+ s := e.addBlock(src)
+ blk.size = len(src)
+ if len(src) < minNonLiteralBlockSize {
+ blk.extraLits = len(src)
+ blk.literals = blk.literals[:len(src)]
+ copy(blk.literals, src)
+ return
+ }
+
+ // Override src
+ src = e.hist
+ sLimit := int32(len(src)) - inputMargin
+ // stepSize is the number of bytes to skip on every main loop iteration.
+ // It should be >= 1.
+ const stepSize = 1
+
+ const kSearchStrength = 9
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := s
+ cv := load6432(src, s)
+
+ // Relative offsets
+ offset1 := int32(blk.recentOffsets[0])
+ offset2 := int32(blk.recentOffsets[1])
+
+ addLiterals := func(s *seq, until int32) {
+ if until == nextEmit {
+ return
+ }
+ blk.literals = append(blk.literals, src[nextEmit:until]...)
+ s.litLen = uint32(until - nextEmit)
+ }
+ if debug {
+ println("recent offsets:", blk.recentOffsets)
+ }
+
+encodeLoop:
+ for {
+ var t int32
+ // We allow the encoder to optionally turn off repeat offsets across blocks
+ canRepeat := len(blk.sequences) > 2
+ var matched int32
+
+ for {
+ if debugAsserts && canRepeat && offset1 == 0 {
+ panic("offset0 was 0")
+ }
+
+ nextHashS := hash5(cv, betterShortTableBits)
+ nextHashL := hash8(cv, betterLongTableBits)
+ candidateL := e.longTable[nextHashL]
+ candidateS := e.table[nextHashS]
+
+ const repOff = 1
+ repIndex := s - offset1 + repOff
+ off := s + e.cur
+ e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
+ e.markLongShardDirty(nextHashL)
+ e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
+ e.markShortShardDirty(nextHashS)
+
+ if canRepeat {
+ if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+ // Consider history as well.
+ var seq seq
+ lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+
+ seq.matchLen = uint32(lenght - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + repOff
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 0
+ seq.offset = 1
+ if debugSequences {
+ println("repeat sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Index match start+1 (long) -> s - 1
+ index0 := s + repOff
+ s += lenght + repOff
+
+ nextEmit = s
+ if s >= sLimit {
+ if debug {
+ println("repeat ended", s, lenght)
+
+ }
+ break encodeLoop
+ }
+ // Index skipped...
+ for index0 < s-1 {
+ cv0 := load6432(src, index0)
+ cv1 := cv0 >> 8
+ h0 := hash8(cv0, betterLongTableBits)
+ off := index0 + e.cur
+ e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+ e.markLongShardDirty(h0)
+ h1 := hash5(cv1, betterShortTableBits)
+ e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
+ e.markShortShardDirty(h1)
+ index0 += 2
+ }
+ cv = load6432(src, s)
+ continue
+ }
+ const repOff2 = 1
+
+ // We deviate from the reference encoder and also check offset 2.
+ // Still slower and not much better, so disabled.
+ // repIndex = s - offset2 + repOff2
+ if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
+ // Consider history as well.
+ var seq seq
+ lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+
+ seq.matchLen = uint32(lenght - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + repOff2
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 2
+ seq.offset = 2
+ if debugSequences {
+ println("repeat sequence 2", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ index0 := s + repOff2
+ s += lenght + repOff2
+ nextEmit = s
+ if s >= sLimit {
+ if debug {
+ println("repeat ended", s, lenght)
+
+ }
+ break encodeLoop
+ }
+
+ // Index skipped...
+ for index0 < s-1 {
+ cv0 := load6432(src, index0)
+ cv1 := cv0 >> 8
+ h0 := hash8(cv0, betterLongTableBits)
+ off := index0 + e.cur
+ e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+ e.markLongShardDirty(h0)
+ h1 := hash5(cv1, betterShortTableBits)
+ e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
+ e.markShortShardDirty(h1)
+ index0 += 2
+ }
+ cv = load6432(src, s)
+ // Swap offsets
+ offset1, offset2 = offset2, offset1
+ continue
+ }
+ }
+ // Find the offsets of our two matches.
+ coffsetL := candidateL.offset - e.cur
+ coffsetLP := candidateL.prev - e.cur
+
+ // Check if we have a long match.
+ if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+ // Found a long match, at least 8 bytes.
+ matched = e.matchlen(s+8, coffsetL+8, src) + 8
+ t = coffsetL
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugMatches {
+ println("long match")
+ }
+
+ if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
+ // Found a long match, at least 8 bytes.
+ prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8
+ if prevMatch > matched {
+ matched = prevMatch
+ t = coffsetLP
+ }
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugMatches {
+ println("long match")
+ }
+ }
+ break
+ }
+
+ // Check if we have a long match on prev.
+ if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
+ // Found a long match, at least 8 bytes.
+ matched = e.matchlen(s+8, coffsetLP+8, src) + 8
+ t = coffsetLP
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugMatches {
+ println("long match")
+ }
+ break
+ }
+
+ coffsetS := candidateS.offset - e.cur
+
+ // Check if we have a short match.
+ if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+ // found a regular match
+ matched = e.matchlen(s+4, coffsetS+4, src) + 4
+
+ // See if we can find a long match at s+1
+ const checkAt = 1
+ cv := load6432(src, s+checkAt)
+ nextHashL = hash8(cv, betterLongTableBits)
+ candidateL = e.longTable[nextHashL]
+ coffsetL = candidateL.offset - e.cur
+
+ // We can store it, since we have at least a 4 byte match.
+ e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset}
+ e.markLongShardDirty(nextHashL)
+ if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+ // Found a long match, at least 8 bytes.
+ matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
+ if matchedNext > matched {
+ t = coffsetL
+ s += checkAt
+ matched = matchedNext
+ if debugMatches {
+ println("long match (after short)")
+ }
+ break
+ }
+ }
+
+ // Check prev long...
+ coffsetL = candidateL.prev - e.cur
+ if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+ // Found a long match, at least 8 bytes.
+ matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
+ if matchedNext > matched {
+ t = coffsetL
+ s += checkAt
+ matched = matchedNext
+ if debugMatches {
+ println("prev long match (after short)")
+ }
+ break
+ }
+ }
+ t = coffsetS
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugAsserts && t < 0 {
+ panic("t<0")
+ }
+ if debugMatches {
+ println("short match")
+ }
+ break
+ }
+
+ // No match found, move forward in input.
+ s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+
+ // A 4-byte match has been found. Update recent offsets.
+ // We'll later see if more than 4 bytes.
+ offset2 = offset1
+ offset1 = s - t
+
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+
+ if debugAsserts && canRepeat && int(offset1) > len(src) {
+ panic("invalid offset")
+ }
+
+ // Extend the n-byte match as long as possible.
+ l := matched
+
+ // Extend backwards
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+ s--
+ t--
+ l++
+ }
+
+ // Write our sequence
+ var seq seq
+ seq.litLen = uint32(s - nextEmit)
+ seq.matchLen = uint32(l - zstdMinMatch)
+ if seq.litLen > 0 {
+ blk.literals = append(blk.literals, src[nextEmit:s]...)
+ }
+ seq.offset = uint32(s-t) + 3
+ s += l
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ nextEmit = s
+ if s >= sLimit {
+ break encodeLoop
+ }
+
+ // Index match start+1 (long) -> s - 1
+ index0 := s - l + 1
+ for index0 < s-1 {
+ cv0 := load6432(src, index0)
+ cv1 := cv0 >> 8
+ h0 := hash8(cv0, betterLongTableBits)
+ off := index0 + e.cur
+ e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+ e.markLongShardDirty(h0)
+ h1 := hash5(cv1, betterShortTableBits)
+ e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
+ e.markShortShardDirty(h1)
+ index0 += 2
+ }
+
+ cv = load6432(src, s)
+ if !canRepeat {
+ continue
+ }
+
+ // Check offset 2
+ for {
+ o2 := s - offset2
+ if load3232(src, o2) != uint32(cv) {
+ // Do regular search
+ break
+ }
+
+ // Store this, since we have it.
+ nextHashS := hash5(cv, betterShortTableBits)
+ nextHashL := hash8(cv, betterLongTableBits)
+
+ // We have at least 4 byte match.
+ // No need to check backwards. We come straight from a match
+ l := 4 + e.matchlen(s+4, o2+4, src)
+
+ e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
+ e.markLongShardDirty(nextHashL)
+ e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.markShortShardDirty(nextHashS)
+ seq.matchLen = uint32(l) - zstdMinMatch
+ seq.litLen = 0
+
+ // Since litlen is always 0, this is offset 1.
+ seq.offset = 1
+ s += l
+ nextEmit = s
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Swap offset 1 and 2.
+ offset1, offset2 = offset2, offset1
+ if s >= sLimit {
+ // Finished
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+ }
+
+ if int(nextEmit) < len(src) {
+ blk.literals = append(blk.literals, src[nextEmit:]...)
+ blk.extraLits = len(src) - int(nextEmit)
+ }
+ blk.recentOffsets[0] = uint32(offset1)
+ blk.recentOffsets[1] = uint32(offset2)
+ if debug {
+ println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+ }
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) {
+ e.resetBase(d, singleBlock)
+ if d != nil {
+ panic("betterFastEncoder: Reset with dict")
+ }
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) {
+ e.resetBase(d, singleBlock)
+ if d == nil {
+ return
+ }
+ // Init or copy dict table
+ if len(e.dictTable) != len(e.table) || d.id != e.lastDictID {
+ if len(e.dictTable) != len(e.table) {
+ e.dictTable = make([]tableEntry, len(e.table))
+ }
+ end := int32(len(d.content)) - 8 + e.maxMatchOff
+ for i := e.maxMatchOff; i < end; i += 4 {
+ const hashLog = betterShortTableBits
+
+ cv := load6432(d.content, i-e.maxMatchOff)
+ nextHash := hash5(cv, hashLog) // 0 -> 4
+ nextHash1 := hash5(cv>>8, hashLog) // 1 -> 5
+ nextHash2 := hash5(cv>>16, hashLog) // 2 -> 6
+ nextHash3 := hash5(cv>>24, hashLog) // 3 -> 7
+ e.dictTable[nextHash] = tableEntry{
+ val: uint32(cv),
+ offset: i,
+ }
+ e.dictTable[nextHash1] = tableEntry{
+ val: uint32(cv >> 8),
+ offset: i + 1,
+ }
+ e.dictTable[nextHash2] = tableEntry{
+ val: uint32(cv >> 16),
+ offset: i + 2,
+ }
+ e.dictTable[nextHash3] = tableEntry{
+ val: uint32(cv >> 24),
+ offset: i + 3,
+ }
+ }
+ e.lastDictID = d.id
+ e.allDirty = true
+ }
+
+ // Init or copy dict table
+ if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID {
+ if len(e.dictLongTable) != len(e.longTable) {
+ e.dictLongTable = make([]prevEntry, len(e.longTable))
+ }
+ if len(d.content) >= 8 {
+ cv := load6432(d.content, 0)
+ h := hash8(cv, betterLongTableBits)
+ e.dictLongTable[h] = prevEntry{
+ offset: e.maxMatchOff,
+ prev: e.dictLongTable[h].offset,
+ }
+
+ end := int32(len(d.content)) - 8 + e.maxMatchOff
+ off := 8 // First to read
+ for i := e.maxMatchOff + 1; i < end; i++ {
+ cv = cv>>8 | (uint64(d.content[off]) << 56)
+ h := hash8(cv, betterLongTableBits)
+ e.dictLongTable[h] = prevEntry{
+ offset: i,
+ prev: e.dictLongTable[h].offset,
+ }
+ off++
+ }
+ }
+ e.lastDictID = d.id
+ e.allDirty = true
+ }
+
+ // Reset table to initial state
+ {
+ dirtyShardCnt := 0
+ if !e.allDirty {
+ for i := range e.shortTableShardDirty {
+ if e.shortTableShardDirty[i] {
+ dirtyShardCnt++
+ }
+ }
+ }
+ const shardCnt = betterShortTableShardCnt
+ const shardSize = betterShortTableShardSize
+ if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
+ copy(e.table[:], e.dictTable)
+ for i := range e.shortTableShardDirty {
+ e.shortTableShardDirty[i] = false
+ }
+ } else {
+ for i := range e.shortTableShardDirty {
+ if !e.shortTableShardDirty[i] {
+ continue
+ }
+
+ copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+ e.shortTableShardDirty[i] = false
+ }
+ }
+ }
+ {
+ dirtyShardCnt := 0
+ if !e.allDirty {
+ for i := range e.shortTableShardDirty {
+ if e.shortTableShardDirty[i] {
+ dirtyShardCnt++
+ }
+ }
+ }
+ const shardCnt = betterLongTableShardCnt
+ const shardSize = betterLongTableShardSize
+ if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
+ copy(e.longTable[:], e.dictLongTable)
+ for i := range e.longTableShardDirty {
+ e.longTableShardDirty[i] = false
+ }
+ } else {
+ for i := range e.longTableShardDirty {
+ if !e.longTableShardDirty[i] {
+ continue
+ }
+
+ copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize])
+ e.longTableShardDirty[i] = false
+ }
+ }
+ }
+ e.cur = e.maxMatchOff
+ e.allDirty = false
+}
+
+func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) {
+ e.longTableShardDirty[entryNum/betterLongTableShardSize] = true
+}
+
+func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) {
+ e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
index 02c79814fc2..8629d43d868 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -4,11 +4,16 @@
package zstd
+import "fmt"
+
const (
dFastLongTableBits = 17 // Bits used in the long match table
dFastLongTableSize = 1 << dFastLongTableBits // Size of the table
dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
+ dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table
+ dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard
+
dFastShortTableBits = tableBits // Bits used in the short match table
dFastShortTableSize = 1 << dFastShortTableBits // Size of the table
dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
@@ -19,6 +24,13 @@ type doubleFastEncoder struct {
longTable [dFastLongTableSize]tableEntry
}
+type doubleFastEncoderDict struct {
+ fastEncoderDict
+ longTable [dFastLongTableSize]tableEntry
+ dictLongTable []tableEntry
+ longTableShardDirty [dLongTableShardCnt]bool
+}
+
// Encode mimmics functionality in zstd_dfast.c
func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
const (
@@ -29,7 +41,7 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
)
// Protect against e.cur wraparound.
- for e.cur > (1<<30)+e.maxMatchOff {
+ for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
@@ -61,6 +73,7 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
e.longTable[i].offset = v
}
e.cur = e.maxMatchOff
+ break
}
s := e.addBlock(src)
@@ -77,21 +90,13 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
sLimit := int32(len(src)) - inputMargin
// stepSize is the number of bytes to skip on every main loop iteration.
// It should be >= 1.
- stepSize := int32(e.o.targetLength)
- if stepSize == 0 {
- stepSize++
- }
-
- // TEMPLATE
+ const stepSize = 1
const kSearchStrength = 8
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
cv := load6432(src, s)
- // nextHash is the hash at s
- nextHashS := hash5(cv, dFastShortTableBits)
- nextHashL := hash8(cv, dFastLongTableBits)
// Relative offsets
offset1 := int32(blk.recentOffsets[0])
@@ -115,12 +120,12 @@ encodeLoop:
canRepeat := len(blk.sequences) > 2
for {
- if debug && canRepeat && offset1 == 0 {
+ if debugAsserts && canRepeat && offset1 == 0 {
panic("offset0 was 0")
}
- nextHashS = nextHashS & dFastShortTableMask
- nextHashL = nextHashL & dFastLongTableMask
+ nextHashS := hash5(cv, dFastShortTableBits)
+ nextHashL := hash8(cv, dFastLongTableBits)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -172,24 +177,288 @@ encodeLoop:
break encodeLoop
}
cv = load6432(src, s)
- nextHashS = hash5(cv, dFastShortTableBits)
- nextHashL = hash8(cv, dFastLongTableBits)
continue
}
- const repOff2 = 1
- // We deviate from the reference encoder and also check offset 2.
- // Slower and not consistently better, so disabled.
- // repIndex = s - offset2 + repOff2
- if false && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff2*8)) {
+ }
+ // Find the offsets of our two matches.
+ coffsetL := s - (candidateL.offset - e.cur)
+ coffsetS := s - (candidateS.offset - e.cur)
+
+ // Check if we have a long match.
+ if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+ // Found a long match, likely at least 8 bytes.
+ // Reference encoder checks all 8 bytes, we only check 4,
+ // but the likelihood of both the first 4 bytes and the hash matching should be enough.
+ t = candidateL.offset - e.cur
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugMatches {
+ println("long match")
+ }
+ break
+ }
+
+ // Check if we have a short match.
+ if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+ // found a regular match
+ // See if we can find a long match at s+1
+ const checkAt = 1
+ cv := load6432(src, s+checkAt)
+ nextHashL = hash8(cv, dFastLongTableBits)
+ candidateL = e.longTable[nextHashL]
+ coffsetL = s - (candidateL.offset - e.cur) + checkAt
+
+ // We can store it, since we have at least a 4 byte match.
+ e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
+ if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+ // Found a long match, likely at least 8 bytes.
+ // Reference encoder checks all 8 bytes, we only check 4,
+ // but the likelihood of both the first 4 bytes and the hash matching should be enough.
+ t = candidateL.offset - e.cur
+ s += checkAt
+ if debugMatches {
+ println("long match (after short)")
+ }
+ break
+ }
+
+ t = candidateS.offset - e.cur
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugAsserts && t < 0 {
+ panic("t<0")
+ }
+ if debugMatches {
+ println("short match")
+ }
+ break
+ }
+
+ // No match found, move forward in input.
+ s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+
+ // A 4-byte match has been found. Update recent offsets.
+ // We'll later see if more than 4 bytes.
+ offset2 = offset1
+ offset1 = s - t
+
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+
+ if debugAsserts && canRepeat && int(offset1) > len(src) {
+ panic("invalid offset")
+ }
+
+ // Extend the 4-byte match as long as possible.
+ l := e.matchlen(s+4, t+4, src) + 4
+
+ // Extend backwards
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+ s--
+ t--
+ l++
+ }
+
+ // Write our sequence
+ var seq seq
+ seq.litLen = uint32(s - nextEmit)
+ seq.matchLen = uint32(l - zstdMinMatch)
+ if seq.litLen > 0 {
+ blk.literals = append(blk.literals, src[nextEmit:s]...)
+ }
+ seq.offset = uint32(s-t) + 3
+ s += l
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ nextEmit = s
+ if s >= sLimit {
+ break encodeLoop
+ }
+
+ // Index match start+1 (long) and start+2 (short)
+ index0 := s - l + 1
+ // Index match end-2 (long) and end-1 (short)
+ index1 := s - 2
+
+ cv0 := load6432(src, index0)
+ cv1 := load6432(src, index1)
+ te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
+ te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
+ e.longTable[hash8(cv0, dFastLongTableBits)] = te0
+ e.longTable[hash8(cv1, dFastLongTableBits)] = te1
+ cv0 >>= 8
+ cv1 >>= 8
+ te0.offset++
+ te1.offset++
+ te0.val = uint32(cv0)
+ te1.val = uint32(cv1)
+ e.table[hash5(cv0, dFastShortTableBits)] = te0
+ e.table[hash5(cv1, dFastShortTableBits)] = te1
+
+ cv = load6432(src, s)
+
+ if !canRepeat {
+ continue
+ }
+
+ // Check offset 2
+ for {
+ o2 := s - offset2
+ if load3232(src, o2) != uint32(cv) {
+ // Do regular search
+ break
+ }
+
+ // Store this, since we have it.
+ nextHashS := hash5(cv, dFastShortTableBits)
+ nextHashL := hash8(cv, dFastLongTableBits)
+
+ // We have at least 4 byte match.
+ // No need to check backwards. We come straight from a match
+ l := 4 + e.matchlen(s+4, o2+4, src)
+
+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.longTable[nextHashL] = entry
+ e.table[nextHashS] = entry
+ seq.matchLen = uint32(l) - zstdMinMatch
+ seq.litLen = 0
+
+ // Since litlen is always 0, this is offset 1.
+ seq.offset = 1
+ s += l
+ nextEmit = s
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Swap offset 1 and 2.
+ offset1, offset2 = offset2, offset1
+ if s >= sLimit {
+ // Finished
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+ }
+
+ if int(nextEmit) < len(src) {
+ blk.literals = append(blk.literals, src[nextEmit:]...)
+ blk.extraLits = len(src) - int(nextEmit)
+ }
+ blk.recentOffsets[0] = uint32(offset1)
+ blk.recentOffsets[1] = uint32(offset2)
+ if debug {
+ println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+ }
+}
+
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+ const (
+ // Input margin is the number of bytes we read (8)
+ // and the maximum we will read ahead (2)
+ inputMargin = 8 + 2
+ minNonLiteralBlockSize = 16
+ )
+
+ // Protect against e.cur wraparound.
+ if e.cur >= bufferReset {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.longTable[:] {
+ e.longTable[i] = tableEntry{}
+ }
+ e.cur = e.maxMatchOff
+ }
+
+ s := int32(0)
+ blk.size = len(src)
+ if len(src) < minNonLiteralBlockSize {
+ blk.extraLits = len(src)
+ blk.literals = blk.literals[:len(src)]
+ copy(blk.literals, src)
+ return
+ }
+
+ // Override src
+ sLimit := int32(len(src)) - inputMargin
+ // stepSize is the number of bytes to skip on every main loop iteration.
+ // It should be >= 1.
+ const stepSize = 1
+
+ const kSearchStrength = 8
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := s
+ cv := load6432(src, s)
+
+ // Relative offsets
+ offset1 := int32(blk.recentOffsets[0])
+ offset2 := int32(blk.recentOffsets[1])
+
+ addLiterals := func(s *seq, until int32) {
+ if until == nextEmit {
+ return
+ }
+ blk.literals = append(blk.literals, src[nextEmit:until]...)
+ s.litLen = uint32(until - nextEmit)
+ }
+ if debug {
+ println("recent offsets:", blk.recentOffsets)
+ }
+
+encodeLoop:
+ for {
+ var t int32
+ for {
+
+ nextHashS := hash5(cv, dFastShortTableBits)
+ nextHashL := hash8(cv, dFastLongTableBits)
+ candidateL := e.longTable[nextHashL]
+ candidateS := e.table[nextHashS]
+
+ const repOff = 1
+ repIndex := s - offset1 + repOff
+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.longTable[nextHashL] = entry
+ e.table[nextHashS] = entry
+
+ if len(blk.sequences) > 2 {
+ if load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff2, repIndex+4, src)
+ //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:]))
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
- start := s + repOff2
+ start := s + repOff
// We end the search early, so we don't risk 0 literals
// and have to do special offset treatment.
startLimit := nextEmit + 1
@@ -198,33 +467,29 @@ encodeLoop:
if tMin < 0 {
tMin = 0
}
- for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+ for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] {
repIndex--
start--
seq.matchLen++
}
addLiterals(&seq, start)
- // rep 2
- seq.offset = 2
+ // rep 0
+ seq.offset = 1
if debugSequences {
- println("repeat sequence 2", seq, "next s:", s)
+ println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + repOff2
+ s += length + repOff
nextEmit = s
if s >= sLimit {
if debug {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
}
cv = load6432(src, s)
- nextHashS = hash5(cv, dFastShortTableBits)
- nextHashL = hash8(cv, dFastLongTableBits)
- // Swap offsets
- offset1, offset2 = offset2, offset1
continue
}
}
@@ -238,13 +503,13 @@ encodeLoop:
// Reference encoder checks all 8 bytes, we only check 4,
// but the likelihood of both the first 4 bytes and the hash matching should be enough.
t = candidateL.offset - e.cur
- if debug && s <= t {
- panic("s <= t")
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur))
}
- if debug && s-t > e.maxMatchOff {
+ if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
- if debug {
+ if debugMatches {
println("long match")
}
break
@@ -268,23 +533,23 @@ encodeLoop:
// but the likelihood of both the first 4 bytes and the hash matching should be enough.
t = candidateL.offset - e.cur
s += checkAt
- if debug {
+ if debugMatches {
println("long match (after short)")
}
break
}
t = candidateS.offset - e.cur
- if debug && s <= t {
- panic("s <= t")
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
- if debug && s-t > e.maxMatchOff {
+ if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
- if debug && t < 0 {
+ if debugAsserts && t < 0 {
panic("t<0")
}
- if debug {
+ if debugMatches {
println("short match")
}
break
@@ -296,8 +561,6 @@ encodeLoop:
break encodeLoop
}
cv = load6432(src, s)
- nextHashS = hash5(cv, dFastShortTableBits)
- nextHashL = hash8(cv, dFastLongTableBits)
}
// A 4-byte match has been found. Update recent offsets.
@@ -305,23 +568,20 @@ encodeLoop:
offset2 = offset1
offset1 = s - t
- if debug && s <= t {
- panic("s <= t")
- }
-
- if debug && canRepeat && int(offset1) > len(src) {
- panic("invalid offset")
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
// Extend the 4-byte match as long as possible.
- l := e.matchlen(s+4, t+4, src) + 4
+ //l := e.matchlen(s+4, t+4, src) + 4
+ l := int32(matchLen(src[s+4:], src[t+4:])) + 4
// Extend backwards
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
- for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+ for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
@@ -345,38 +605,55 @@ encodeLoop:
break encodeLoop
}
- // Index match start + 2 and end - 2
- index0 := s - l + 2
+ // Index match start+1 (long) and start+2 (short)
+ index0 := s - l + 1
+ // Index match end-2 (long) and end-1 (short)
index1 := s - 2
- if l == 4 {
- // if l is 4, we would check the same place twice, so index s-1 instead.
- index1++
- }
cv0 := load6432(src, index0)
cv1 := load6432(src, index1)
- entry0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
- entry1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
- e.table[hash5(cv0, dFastShortTableBits)&dFastShortTableMask] = entry0
- e.longTable[hash8(cv0, dFastLongTableBits)&dFastLongTableMask] = entry0
- e.table[hash5(cv1, dFastShortTableBits)&dFastShortTableMask] = entry1
- e.longTable[hash8(cv1, dFastLongTableBits)&dFastLongTableMask] = entry1
+ te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
+ te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
+ e.longTable[hash8(cv0, dFastLongTableBits)] = te0
+ e.longTable[hash8(cv1, dFastLongTableBits)] = te1
+ cv0 >>= 8
+ cv1 >>= 8
+ te0.offset++
+ te1.offset++
+ te0.val = uint32(cv0)
+ te1.val = uint32(cv1)
+ e.table[hash5(cv0, dFastShortTableBits)] = te0
+ e.table[hash5(cv1, dFastShortTableBits)] = te1
cv = load6432(src, s)
- nextHashS = hash5(cv, dFastShortTableBits)
- nextHashL = hash8(cv, dFastLongTableBits)
+
+ if len(blk.sequences) <= 2 {
+ continue
+ }
// Check offset 2
- if o2 := s - offset2; canRepeat && o2 > 0 && load3232(src, o2) == uint32(cv) {
+ for {
+ o2 := s - offset2
+ if load3232(src, o2) != uint32(cv) {
+ // Do regular search
+ break
+ }
+
+ // Store this, since we have it.
+ nextHashS := hash5(cv1>>8, dFastShortTableBits)
+ nextHashL := hash8(cv, dFastLongTableBits)
+
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
- l := 4 + e.matchlen(s+4, o2+4, src)
- // Store this, since we have it.
+ //l := 4 + e.matchlen(s+4, o2+4, src)
+ l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
+
entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
- e.longTable[nextHashL&dFastLongTableMask] = entry
- e.table[nextHashS&dFastShortTableMask] = entry
+ e.longTable[nextHashL] = entry
+ e.table[nextHashS] = entry
seq.matchLen = uint32(l) - zstdMinMatch
seq.litLen = 0
+
// Since litlen is always 0, this is offset 1.
seq.offset = 1
s += l
@@ -389,12 +666,10 @@ encodeLoop:
// Swap offset 1 and 2.
offset1, offset2 = offset2, offset1
if s >= sLimit {
+ // Finished
break encodeLoop
}
- // Prepare next loop.
cv = load6432(src, s)
- nextHashS = hash5(cv, dFastShortTableBits)
- nextHashL = hash8(cv, dFastLongTableBits)
}
}
@@ -402,9 +677,445 @@ encodeLoop:
blk.literals = append(blk.literals, src[nextEmit:]...)
blk.extraLits = len(src) - int(nextEmit)
}
- blk.recentOffsets[0] = uint32(offset1)
- blk.recentOffsets[1] = uint32(offset2)
if debug {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
+
+ // We do not store history, so we must offset e.cur to avoid false matches for next user.
+ if e.cur < bufferReset {
+ e.cur += int32(len(src))
+ }
+}
+
+// Encode will encode the content, with a dictionary if initialized for it.
+func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) {
+ const (
+ // Input margin is the number of bytes we read (8)
+ // and the maximum we will read ahead (2)
+ inputMargin = 8 + 2
+ minNonLiteralBlockSize = 16
+ )
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.longTable[:] {
+ e.longTable[i] = tableEntry{}
+ }
+ e.markAllShardsDirty()
+ e.cur = e.maxMatchOff
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v < minOff {
+ v = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.longTable[:] {
+ v := e.longTable[i].offset
+ if v < minOff {
+ v = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ }
+ e.longTable[i].offset = v
+ }
+ e.markAllShardsDirty()
+ e.cur = e.maxMatchOff
+ break
+ }
+
+ s := e.addBlock(src)
+ blk.size = len(src)
+ if len(src) < minNonLiteralBlockSize {
+ blk.extraLits = len(src)
+ blk.literals = blk.literals[:len(src)]
+ copy(blk.literals, src)
+ return
+ }
+
+ // Override src
+ src = e.hist
+ sLimit := int32(len(src)) - inputMargin
+ // stepSize is the number of bytes to skip on every main loop iteration.
+ // It should be >= 1.
+ const stepSize = 1
+
+ const kSearchStrength = 8
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := s
+ cv := load6432(src, s)
+
+ // Relative offsets
+ offset1 := int32(blk.recentOffsets[0])
+ offset2 := int32(blk.recentOffsets[1])
+
+ addLiterals := func(s *seq, until int32) {
+ if until == nextEmit {
+ return
+ }
+ blk.literals = append(blk.literals, src[nextEmit:until]...)
+ s.litLen = uint32(until - nextEmit)
+ }
+ if debug {
+ println("recent offsets:", blk.recentOffsets)
+ }
+
+encodeLoop:
+ for {
+ var t int32
+ // We allow the encoder to optionally turn off repeat offsets across blocks
+ canRepeat := len(blk.sequences) > 2
+
+ for {
+ if debugAsserts && canRepeat && offset1 == 0 {
+ panic("offset0 was 0")
+ }
+
+ nextHashS := hash5(cv, dFastShortTableBits)
+ nextHashL := hash8(cv, dFastLongTableBits)
+ candidateL := e.longTable[nextHashL]
+ candidateS := e.table[nextHashS]
+
+ const repOff = 1
+ repIndex := s - offset1 + repOff
+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.longTable[nextHashL] = entry
+ e.markLongShardDirty(nextHashL)
+ e.table[nextHashS] = entry
+ e.markShardDirty(nextHashS)
+
+ if canRepeat {
+ if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+ // Consider history as well.
+ var seq seq
+ lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+
+ seq.matchLen = uint32(lenght - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + repOff
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 0
+ seq.offset = 1
+ if debugSequences {
+ println("repeat sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ s += lenght + repOff
+ nextEmit = s
+ if s >= sLimit {
+ if debug {
+ println("repeat ended", s, lenght)
+
+ }
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ continue
+ }
+ }
+ // Find the offsets of our two matches.
+ coffsetL := s - (candidateL.offset - e.cur)
+ coffsetS := s - (candidateS.offset - e.cur)
+
+ // Check if we have a long match.
+ if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+ // Found a long match, likely at least 8 bytes.
+ // Reference encoder checks all 8 bytes, we only check 4,
+ // but the likelihood of both the first 4 bytes and the hash matching should be enough.
+ t = candidateL.offset - e.cur
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugMatches {
+ println("long match")
+ }
+ break
+ }
+
+ // Check if we have a short match.
+ if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+ // found a regular match
+ // See if we can find a long match at s+1
+ const checkAt = 1
+ cv := load6432(src, s+checkAt)
+ nextHashL = hash8(cv, dFastLongTableBits)
+ candidateL = e.longTable[nextHashL]
+ coffsetL = s - (candidateL.offset - e.cur) + checkAt
+
+ // We can store it, since we have at least a 4 byte match.
+ e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
+ e.markLongShardDirty(nextHashL)
+ if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+ // Found a long match, likely at least 8 bytes.
+ // Reference encoder checks all 8 bytes, we only check 4,
+ // but the likelihood of both the first 4 bytes and the hash matching should be enough.
+ t = candidateL.offset - e.cur
+ s += checkAt
+ if debugMatches {
+ println("long match (after short)")
+ }
+ break
+ }
+
+ t = candidateS.offset - e.cur
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugAsserts && t < 0 {
+ panic("t<0")
+ }
+ if debugMatches {
+ println("short match")
+ }
+ break
+ }
+
+ // No match found, move forward in input.
+ s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+
+ // A 4-byte match has been found. Update recent offsets.
+ // We'll later see if more than 4 bytes.
+ offset2 = offset1
+ offset1 = s - t
+
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+
+ if debugAsserts && canRepeat && int(offset1) > len(src) {
+ panic("invalid offset")
+ }
+
+ // Extend the 4-byte match as long as possible.
+ l := e.matchlen(s+4, t+4, src) + 4
+
+ // Extend backwards
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+ s--
+ t--
+ l++
+ }
+
+ // Write our sequence
+ var seq seq
+ seq.litLen = uint32(s - nextEmit)
+ seq.matchLen = uint32(l - zstdMinMatch)
+ if seq.litLen > 0 {
+ blk.literals = append(blk.literals, src[nextEmit:s]...)
+ }
+ seq.offset = uint32(s-t) + 3
+ s += l
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ nextEmit = s
+ if s >= sLimit {
+ break encodeLoop
+ }
+
+ // Index match start+1 (long) and start+2 (short)
+ index0 := s - l + 1
+ // Index match end-2 (long) and end-1 (short)
+ index1 := s - 2
+
+ cv0 := load6432(src, index0)
+ cv1 := load6432(src, index1)
+ te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
+ te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
+ longHash1 := hash8(cv0, dFastLongTableBits)
+ longHash2 := hash8(cv0, dFastLongTableBits)
+ e.longTable[longHash1] = te0
+ e.longTable[longHash2] = te1
+ e.markLongShardDirty(longHash1)
+ e.markLongShardDirty(longHash2)
+ cv0 >>= 8
+ cv1 >>= 8
+ te0.offset++
+ te1.offset++
+ te0.val = uint32(cv0)
+ te1.val = uint32(cv1)
+ hashVal1 := hash5(cv0, dFastShortTableBits)
+ hashVal2 := hash5(cv1, dFastShortTableBits)
+ e.table[hashVal1] = te0
+ e.markShardDirty(hashVal1)
+ e.table[hashVal2] = te1
+ e.markShardDirty(hashVal2)
+
+ cv = load6432(src, s)
+
+ if !canRepeat {
+ continue
+ }
+
+ // Check offset 2
+ for {
+ o2 := s - offset2
+ if load3232(src, o2) != uint32(cv) {
+ // Do regular search
+ break
+ }
+
+ // Store this, since we have it.
+ nextHashS := hash5(cv, dFastShortTableBits)
+ nextHashL := hash8(cv, dFastLongTableBits)
+
+ // We have at least 4 byte match.
+ // No need to check backwards. We come straight from a match
+ l := 4 + e.matchlen(s+4, o2+4, src)
+
+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.longTable[nextHashL] = entry
+ e.markLongShardDirty(nextHashL)
+ e.table[nextHashS] = entry
+ e.markShardDirty(nextHashS)
+ seq.matchLen = uint32(l) - zstdMinMatch
+ seq.litLen = 0
+
+ // Since litlen is always 0, this is offset 1.
+ seq.offset = 1
+ s += l
+ nextEmit = s
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Swap offset 1 and 2.
+ offset1, offset2 = offset2, offset1
+ if s >= sLimit {
+ // Finished
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+ }
+
+ if int(nextEmit) < len(src) {
+ blk.literals = append(blk.literals, src[nextEmit:]...)
+ blk.extraLits = len(src) - int(nextEmit)
+ }
+ blk.recentOffsets[0] = uint32(offset1)
+ blk.recentOffsets[1] = uint32(offset2)
+ if debug {
+ println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+ }
+ // If we encoded more than 64K mark all dirty.
+ if len(src) > 64<<10 {
+ e.markAllShardsDirty()
+ }
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) {
+ e.fastEncoder.Reset(d, singleBlock)
+ if d != nil {
+ panic("doubleFastEncoder: Reset with dict not supported")
+ }
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
+ allDirty := e.allDirty
+ e.fastEncoderDict.Reset(d, singleBlock)
+ if d == nil {
+ return
+ }
+
+ // Init or copy dict table
+ if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID {
+ if len(e.dictLongTable) != len(e.longTable) {
+ e.dictLongTable = make([]tableEntry, len(e.longTable))
+ }
+ if len(d.content) >= 8 {
+ cv := load6432(d.content, 0)
+ e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{
+ val: uint32(cv),
+ offset: e.maxMatchOff,
+ }
+ end := int32(len(d.content)) - 8 + e.maxMatchOff
+ for i := e.maxMatchOff + 1; i < end; i++ {
+ cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56)
+ e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{
+ val: uint32(cv),
+ offset: i,
+ }
+ }
+ }
+ e.lastDictID = d.id
+ e.allDirty = true
+ }
+ // Reset table to initial state
+ e.cur = e.maxMatchOff
+
+ dirtyShardCnt := 0
+ if !allDirty {
+ for i := range e.longTableShardDirty {
+ if e.longTableShardDirty[i] {
+ dirtyShardCnt++
+ }
+ }
+ }
+
+ if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
+ copy(e.longTable[:], e.dictLongTable)
+ for i := range e.longTableShardDirty {
+ e.longTableShardDirty[i] = false
+ }
+ return
+ }
+ for i := range e.longTableShardDirty {
+ if !e.longTableShardDirty[i] {
+ continue
+ }
+
+ copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+ e.longTableShardDirty[i] = false
+ }
+}
+
+func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) {
+ e.longTableShardDirty[entryNum/dLongTableShardSize] = true
}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
index a8edaa88859..ba4a17e1064 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -5,15 +5,17 @@
package zstd
import (
+ "fmt"
+ "math"
"math/bits"
-
- "github.com/klauspost/compress/zstd/internal/xxhash"
)
const (
- tableBits = 15 // Bits used in the table
- tableSize = 1 << tableBits // Size of the table
- tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
+ tableBits = 15 // Bits used in the table
+ tableSize = 1 << tableBits // Size of the table
+ tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table
+ tableShardSize = tableSize / tableShardCnt // Size of an individual shard
+ tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
maxMatchLength = 131074
)
@@ -23,47 +25,15 @@ type tableEntry struct {
}
type fastEncoder struct {
- o encParams
- // cur is the offset at the start of hist
- cur int32
- // maximum offset. Should be at least 2x block size.
- maxMatchOff int32
- hist []byte
- crc *xxhash.Digest
- table [tableSize]tableEntry
- tmp [8]byte
- blk *blockEnc
-}
-
-// CRC returns the underlying CRC writer.
-func (e *fastEncoder) CRC() *xxhash.Digest {
- return e.crc
-}
-
-// AppendCRC will append the CRC to the destination slice and return it.
-func (e *fastEncoder) AppendCRC(dst []byte) []byte {
- crc := e.crc.Sum(e.tmp[:0])
- dst = append(dst, crc[7], crc[6], crc[5], crc[4])
- return dst
-}
-
-// WindowSize returns the window size of the encoder,
-// or a window size small enough to contain the input size, if > 0.
-func (e *fastEncoder) WindowSize(size int) int32 {
- if size > 0 && size < int(e.maxMatchOff) {
- b := int32(1) << uint(bits.Len(uint(size)))
- // Keep minimum window.
- if b < 1024 {
- b = 1024
- }
- return b
- }
- return e.maxMatchOff
+ fastBase
+ table [tableSize]tableEntry
}
-// Block returns the current block.
-func (e *fastEncoder) Block() *blockEnc {
- return e.blk
+type fastEncoderDict struct {
+ fastEncoder
+ dictTable []tableEntry
+ tableShardDirty [tableShardCnt]bool
+ allDirty bool
}
// Encode mimmics functionality in zstd_fast.c
@@ -74,7 +44,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
)
// Protect against e.cur wraparound.
- for e.cur > (1<<30)+e.maxMatchOff {
+ for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
@@ -94,6 +64,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
e.table[i].offset = v
}
e.cur = e.maxMatchOff
+ break
}
s := e.addBlock(src)
@@ -110,22 +81,16 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
sLimit := int32(len(src)) - inputMargin
// stepSize is the number of bytes to skip on every main loop iteration.
// It should be >= 2.
- stepSize := int32(e.o.targetLength)
- if stepSize == 0 {
- stepSize++
- }
- stepSize++
+ const stepSize = 2
// TEMPLATE
const hashLog = tableBits
// seems global, but would be nice to tweak.
- const kSearchStrength = 8
+ const kSearchStrength = 7
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
cv := load6432(src, s)
- // nextHash is the hash at s
- nextHash := hash6(cv, hashLog)
// Relative offsets
offset1 := int32(blk.recentOffsets[0])
@@ -153,12 +118,12 @@ encodeLoop:
canRepeat := len(blk.sequences) > 2
for {
- if debug && canRepeat && offset1 == 0 {
+ if debugAsserts && canRepeat && offset1 == 0 {
panic("offset0 was 0")
}
- nextHash2 := hash6(cv>>8, hashLog) & tableMask
- nextHash = nextHash & tableMask
+ nextHash := hash6(cv, hashLog)
+ nextHash2 := hash6(cv>>8, hashLog)
candidate := e.table[nextHash]
candidate2 := e.table[nextHash2]
repIndex := s - offset1 + 2
@@ -169,9 +134,22 @@ encodeLoop:
if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+6, repIndex+4, src)
+ var length int32
+ // length = 4 + e.matchlen(s+6, repIndex+4, src)
+ {
+ a := src[s+6:]
+ b := src[repIndex+4:]
+ endI := len(a) & (math.MaxInt32 - 7)
+ length = int32(endI) + 4
+ for i := 0; i < endI; i += 8 {
+ if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+ length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+ break
+ }
+ }
+ }
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -197,18 +175,16 @@ encodeLoop:
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + 2
+ s += length + 2
nextEmit = s
if s >= sLimit {
if debug {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
}
cv = load6432(src, s)
- //nextHash = hashLen(cv, hashLog, mls)
- nextHash = hash6(cv, hashLog)
continue
}
coffset0 := s - (candidate.offset - e.cur)
@@ -216,10 +192,10 @@ encodeLoop:
if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
// found a regular match
t = candidate.offset - e.cur
- if debug && s <= t {
- panic("s <= t")
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
- if debug && s-t > e.maxMatchOff {
+ if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
break
@@ -229,13 +205,13 @@ encodeLoop:
// found a regular match
t = candidate2.offset - e.cur
s++
- if debug && s <= t {
- panic("s <= t")
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
- if debug && s-t > e.maxMatchOff {
+ if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
- if debug && t < 0 {
+ if debugAsserts && t < 0 {
panic("t<0")
}
break
@@ -245,22 +221,34 @@ encodeLoop:
break encodeLoop
}
cv = load6432(src, s)
- nextHash = hash6(cv, hashLog)
}
// A 4-byte match has been found. We'll later see if more than 4 bytes.
offset2 = offset1
offset1 = s - t
- if debug && s <= t {
- panic("s <= t")
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
- if debug && canRepeat && int(offset1) > len(src) {
+ if debugAsserts && canRepeat && int(offset1) > len(src) {
panic("invalid offset")
}
// Extend the 4-byte match as long as possible.
- l := e.matchlen(s+4, t+4, src) + 4
+ //l := e.matchlen(s+4, t+4, src) + 4
+ var l int32
+ {
+ a := src[s+4:]
+ b := src[t+4:]
+ endI := len(a) & (math.MaxInt32 - 7)
+ l = int32(endI) + 4
+ for i := 0; i < endI; i += 8 {
+ if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+ l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+ break
+ }
+ }
+ }
// Extend backwards
tMin := s - e.maxMatchOff
@@ -292,15 +280,29 @@ encodeLoop:
break encodeLoop
}
cv = load6432(src, s)
- nextHash = hash6(cv, hashLog)
// Check offset 2
- if o2 := s - offset2; canRepeat && o2 > 0 && load3232(src, o2) == uint32(cv) {
+ if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
- l := 4 + e.matchlen(s+4, o2+4, src)
+ //l := 4 + e.matchlen(s+4, o2+4, src)
+ var l int32
+ {
+ a := src[s+4:]
+ b := src[o2+4:]
+ endI := len(a) & (math.MaxInt32 - 7)
+ l = int32(endI) + 4
+ for i := 0; i < endI; i += 8 {
+ if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+ l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+ break
+ }
+ }
+ }
+
// Store this, since we have it.
- e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+ nextHash := hash6(cv, hashLog)
+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
seq.matchLen = uint32(l) - zstdMinMatch
seq.litLen = 0
// Since litlen is always 0, this is offset 1.
@@ -319,7 +321,6 @@ encodeLoop:
}
// Prepare next loop.
cv = load6432(src, s)
- nextHash = hash6(cv, hashLog)
}
}
@@ -334,83 +335,684 @@ encodeLoop:
}
}
-func (e *fastEncoder) addBlock(src []byte) int32 {
- // check if we have space already
- if len(e.hist)+len(src) > cap(e.hist) {
- if cap(e.hist) == 0 {
- l := e.maxMatchOff * 2
- // Make it at least 1MB.
- if l < 1<<20 {
- l = 1 << 20
- }
- e.hist = make([]byte, 0, l)
- } else {
- if cap(e.hist) < int(e.maxMatchOff*2) {
- panic("unexpected buffer size")
- }
- // Move down
- offset := int32(len(e.hist)) - e.maxMatchOff
- copy(e.hist[0:e.maxMatchOff], e.hist[offset:])
- e.cur += offset
- e.hist = e.hist[:e.maxMatchOff]
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+ const (
+ inputMargin = 8
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+ if debug {
+ if len(src) > maxBlockSize {
+ panic("src too big")
}
}
- s := int32(len(e.hist))
- e.hist = append(e.hist, src...)
- return s
-}
-// useBlock will replace the block with the provided one,
-// but transfer recent offsets from the previous.
-func (e *fastEncoder) UseBlock(enc *blockEnc) {
- enc.reset(e.blk)
- e.blk = enc
+ // Protect against e.cur wraparound.
+ if e.cur >= bufferReset {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ e.cur = e.maxMatchOff
+ }
+
+ s := int32(0)
+ blk.size = len(src)
+ if len(src) < minNonLiteralBlockSize {
+ blk.extraLits = len(src)
+ blk.literals = blk.literals[:len(src)]
+ copy(blk.literals, src)
+ return
+ }
+
+ sLimit := int32(len(src)) - inputMargin
+ // stepSize is the number of bytes to skip on every main loop iteration.
+ // It should be >= 2.
+ const stepSize = 2
+
+ // TEMPLATE
+ const hashLog = tableBits
+ // seems global, but would be nice to tweak.
+ const kSearchStrength = 8
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := s
+ cv := load6432(src, s)
+
+ // Relative offsets
+ offset1 := int32(blk.recentOffsets[0])
+ offset2 := int32(blk.recentOffsets[1])
+
+ addLiterals := func(s *seq, until int32) {
+ if until == nextEmit {
+ return
+ }
+ blk.literals = append(blk.literals, src[nextEmit:until]...)
+ s.litLen = uint32(until - nextEmit)
+ }
+ if debug {
+ println("recent offsets:", blk.recentOffsets)
+ }
+
+encodeLoop:
+ for {
+ // t will contain the match offset when we find one.
+ // When existing the search loop, we have already checked 4 bytes.
+ var t int32
+
+ // We will not use repeat offsets across blocks.
+ // By not using them for the first 3 matches
+
+ for {
+ nextHash := hash6(cv, hashLog)
+ nextHash2 := hash6(cv>>8, hashLog)
+ candidate := e.table[nextHash]
+ candidate2 := e.table[nextHash2]
+ repIndex := s - offset1 + 2
+
+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
+
+ if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) {
+ // Consider history as well.
+ var seq seq
+ // length := 4 + e.matchlen(s+6, repIndex+4, src)
+ // length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:]))
+ var length int32
+ {
+ a := src[s+6:]
+ b := src[repIndex+4:]
+ endI := len(a) & (math.MaxInt32 - 7)
+ length = int32(endI) + 4
+ for i := 0; i < endI; i += 8 {
+ if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+ length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+ break
+ }
+ }
+ }
+
+ seq.matchLen = uint32(length - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + 2
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ sMin := s - e.maxMatchOff
+ if sMin < 0 {
+ sMin = 0
+ }
+ for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 0
+ seq.offset = 1
+ if debugSequences {
+ println("repeat sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ s += length + 2
+ nextEmit = s
+ if s >= sLimit {
+ if debug {
+ println("repeat ended", s, length)
+
+ }
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ continue
+ }
+ coffset0 := s - (candidate.offset - e.cur)
+ coffset1 := s - (candidate2.offset - e.cur) + 1
+ if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
+ // found a regular match
+ t = candidate.offset - e.cur
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugAsserts && t < 0 {
+ panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff))
+ }
+ break
+ }
+
+ if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
+ // found a regular match
+ t = candidate2.offset - e.cur
+ s++
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugAsserts && t < 0 {
+ panic("t<0")
+ }
+ break
+ }
+ s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+ // A 4-byte match has been found. We'll later see if more than 4 bytes.
+ offset2 = offset1
+ offset1 = s - t
+
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+
+ if debugAsserts && t < 0 {
+ panic(fmt.Sprintf("t (%d) < 0 ", t))
+ }
+ // Extend the 4-byte match as long as possible.
+ //l := e.matchlenNoHist(s+4, t+4, src) + 4
+ // l := int32(matchLen(src[s+4:], src[t+4:])) + 4
+ var l int32
+ {
+ a := src[s+4:]
+ b := src[t+4:]
+ endI := len(a) & (math.MaxInt32 - 7)
+ l = int32(endI) + 4
+ for i := 0; i < endI; i += 8 {
+ if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+ l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+ break
+ }
+ }
+ }
+
+ // Extend backwards
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+
+ // Write our sequence.
+ var seq seq
+ seq.litLen = uint32(s - nextEmit)
+ seq.matchLen = uint32(l - zstdMinMatch)
+ if seq.litLen > 0 {
+ blk.literals = append(blk.literals, src[nextEmit:s]...)
+ }
+ // Don't use repeat offsets
+ seq.offset = uint32(s-t) + 3
+ s += l
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ nextEmit = s
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+
+ // Check offset 2
+ if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) {
+ // We have at least 4 byte match.
+ // No need to check backwards. We come straight from a match
+ //l := 4 + e.matchlenNoHist(s+4, o2+4, src)
+ // l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
+ var l int32
+ {
+ a := src[s+4:]
+ b := src[o2+4:]
+ endI := len(a) & (math.MaxInt32 - 7)
+ l = int32(endI) + 4
+ for i := 0; i < endI; i += 8 {
+ if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+ l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+ break
+ }
+ }
+ }
+
+ // Store this, since we have it.
+ nextHash := hash6(cv, hashLog)
+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+ seq.matchLen = uint32(l) - zstdMinMatch
+ seq.litLen = 0
+ // Since litlen is always 0, this is offset 1.
+ seq.offset = 1
+ s += l
+ nextEmit = s
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Swap offset 1 and 2.
+ offset1, offset2 = offset2, offset1
+ if s >= sLimit {
+ break encodeLoop
+ }
+ // Prepare next loop.
+ cv = load6432(src, s)
+ }
+ }
+
+ if int(nextEmit) < len(src) {
+ blk.literals = append(blk.literals, src[nextEmit:]...)
+ blk.extraLits = len(src) - int(nextEmit)
+ }
+ if debug {
+ println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+ }
+ // We do not store history, so we must offset e.cur to avoid false matches for next user.
+ if e.cur < bufferReset {
+ e.cur += int32(len(src))
+ }
}
-func (e *fastEncoder) matchlen(s, t int32, src []byte) int32 {
+// Encode will encode the content, with a dictionary if initialized for it.
+func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) {
+ const (
+ inputMargin = 8
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+ if e.allDirty || len(src) > 32<<10 {
+ e.fastEncoder.Encode(blk, src)
+ e.allDirty = true
+ return
+ }
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ e.cur = e.maxMatchOff
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v < minOff {
+ v = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ }
+ e.table[i].offset = v
+ }
+ e.cur = e.maxMatchOff
+ break
+ }
+
+ s := e.addBlock(src)
+ blk.size = len(src)
+ if len(src) < minNonLiteralBlockSize {
+ blk.extraLits = len(src)
+ blk.literals = blk.literals[:len(src)]
+ copy(blk.literals, src)
+ return
+ }
+
+ // Override src
+ src = e.hist
+ sLimit := int32(len(src)) - inputMargin
+ // stepSize is the number of bytes to skip on every main loop iteration.
+ // It should be >= 2.
+ const stepSize = 2
+
+ // TEMPLATE
+ const hashLog = tableBits
+ // seems global, but would be nice to tweak.
+ const kSearchStrength = 7
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := s
+ cv := load6432(src, s)
+
+ // Relative offsets
+ offset1 := int32(blk.recentOffsets[0])
+ offset2 := int32(blk.recentOffsets[1])
+
+ addLiterals := func(s *seq, until int32) {
+ if until == nextEmit {
+ return
+ }
+ blk.literals = append(blk.literals, src[nextEmit:until]...)
+ s.litLen = uint32(until - nextEmit)
+ }
if debug {
- if s < 0 {
- panic("s<0")
+ println("recent offsets:", blk.recentOffsets)
+ }
+
+encodeLoop:
+ for {
+ // t will contain the match offset when we find one.
+ // When existing the search loop, we have already checked 4 bytes.
+ var t int32
+
+ // We will not use repeat offsets across blocks.
+ // By not using them for the first 3 matches
+ canRepeat := len(blk.sequences) > 2
+
+ for {
+ if debugAsserts && canRepeat && offset1 == 0 {
+ panic("offset0 was 0")
+ }
+
+ nextHash := hash6(cv, hashLog)
+ nextHash2 := hash6(cv>>8, hashLog)
+ candidate := e.table[nextHash]
+ candidate2 := e.table[nextHash2]
+ repIndex := s - offset1 + 2
+
+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.markShardDirty(nextHash)
+ e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
+ e.markShardDirty(nextHash2)
+
+ if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
+ // Consider history as well.
+ var seq seq
+ var length int32
+ // length = 4 + e.matchlen(s+6, repIndex+4, src)
+ {
+ a := src[s+6:]
+ b := src[repIndex+4:]
+ endI := len(a) & (math.MaxInt32 - 7)
+ length = int32(endI) + 4
+ for i := 0; i < endI; i += 8 {
+ if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+ length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+ break
+ }
+ }
+ }
+
+ seq.matchLen = uint32(length - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + 2
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ sMin := s - e.maxMatchOff
+ if sMin < 0 {
+ sMin = 0
+ }
+ for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 0
+ seq.offset = 1
+ if debugSequences {
+ println("repeat sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ s += length + 2
+ nextEmit = s
+ if s >= sLimit {
+ if debug {
+ println("repeat ended", s, length)
+
+ }
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ continue
+ }
+ coffset0 := s - (candidate.offset - e.cur)
+ coffset1 := s - (candidate2.offset - e.cur) + 1
+ if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
+ // found a regular match
+ t = candidate.offset - e.cur
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ break
+ }
+
+ if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
+ // found a regular match
+ t = candidate2.offset - e.cur
+ s++
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugAsserts && t < 0 {
+ panic("t<0")
+ }
+ break
+ }
+ s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+ // A 4-byte match has been found. We'll later see if more than 4 bytes.
+ offset2 = offset1
+ offset1 = s - t
+
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
- if t < 0 {
- panic("t<0")
+
+ if debugAsserts && canRepeat && int(offset1) > len(src) {
+ panic("invalid offset")
+ }
+
+ // Extend the 4-byte match as long as possible.
+ //l := e.matchlen(s+4, t+4, src) + 4
+ var l int32
+ {
+ a := src[s+4:]
+ b := src[t+4:]
+ endI := len(a) & (math.MaxInt32 - 7)
+ l = int32(endI) + 4
+ for i := 0; i < endI; i += 8 {
+ if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+ l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+ break
+ }
+ }
+ }
+
+ // Extend backwards
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+ s--
+ t--
+ l++
+ }
+
+ // Write our sequence.
+ var seq seq
+ seq.litLen = uint32(s - nextEmit)
+ seq.matchLen = uint32(l - zstdMinMatch)
+ if seq.litLen > 0 {
+ blk.literals = append(blk.literals, src[nextEmit:s]...)
+ }
+ // Don't use repeat offsets
+ seq.offset = uint32(s-t) + 3
+ s += l
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ nextEmit = s
+ if s >= sLimit {
+ break encodeLoop
}
- if s-t > e.maxMatchOff {
- panic(s - t)
+ cv = load6432(src, s)
+
+ // Check offset 2
+ if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
+ // We have at least 4 byte match.
+ // No need to check backwards. We come straight from a match
+ //l := 4 + e.matchlen(s+4, o2+4, src)
+ var l int32
+ {
+ a := src[s+4:]
+ b := src[o2+4:]
+ endI := len(a) & (math.MaxInt32 - 7)
+ l = int32(endI) + 4
+ for i := 0; i < endI; i += 8 {
+ if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+ l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+ break
+ }
+ }
+ }
+
+ // Store this, since we have it.
+ nextHash := hash6(cv, hashLog)
+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.markShardDirty(nextHash)
+ seq.matchLen = uint32(l) - zstdMinMatch
+ seq.litLen = 0
+ // Since litlen is always 0, this is offset 1.
+ seq.offset = 1
+ s += l
+ nextEmit = s
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Swap offset 1 and 2.
+ offset1, offset2 = offset2, offset1
+ if s >= sLimit {
+ break encodeLoop
+ }
+ // Prepare next loop.
+ cv = load6432(src, s)
}
}
- s1 := int(s) + maxMatchLength - 4
- if s1 > len(src) {
- s1 = len(src)
+
+ if int(nextEmit) < len(src) {
+ blk.literals = append(blk.literals, src[nextEmit:]...)
+ blk.extraLits = len(src) - int(nextEmit)
+ }
+ blk.recentOffsets[0] = uint32(offset1)
+ blk.recentOffsets[1] = uint32(offset2)
+ if debug {
+ println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
+}
- // Extend the match to be as long as possible.
- return int32(matchLen(src[s:s1], src[t:]))
+// ResetDict will reset and set a dictionary if not nil
+func (e *fastEncoder) Reset(d *dict, singleBlock bool) {
+ e.resetBase(d, singleBlock)
+ if d != nil {
+ panic("fastEncoder: Reset with dict")
+ }
}
-// Reset the encoding table.
-func (e *fastEncoder) Reset() {
- if e.blk == nil {
- e.blk = &blockEnc{}
- e.blk.init()
- } else {
- e.blk.reset(nil)
+// ResetDict will reset and set a dictionary if not nil
+func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
+ e.resetBase(d, singleBlock)
+ if d == nil {
+ return
+ }
+
+ // Init or copy dict table
+ if len(e.dictTable) != len(e.table) || d.id != e.lastDictID {
+ if len(e.dictTable) != len(e.table) {
+ e.dictTable = make([]tableEntry, len(e.table))
+ }
+ if true {
+ end := e.maxMatchOff + int32(len(d.content)) - 8
+ for i := e.maxMatchOff; i < end; i += 3 {
+ const hashLog = tableBits
+
+ cv := load6432(d.content, i-e.maxMatchOff)
+ nextHash := hash6(cv, hashLog) // 0 -> 5
+ nextHash1 := hash6(cv>>8, hashLog) // 1 -> 6
+ nextHash2 := hash6(cv>>16, hashLog) // 2 -> 7
+ e.dictTable[nextHash] = tableEntry{
+ val: uint32(cv),
+ offset: i,
+ }
+ e.dictTable[nextHash1] = tableEntry{
+ val: uint32(cv >> 8),
+ offset: i + 1,
+ }
+ e.dictTable[nextHash2] = tableEntry{
+ val: uint32(cv >> 16),
+ offset: i + 2,
+ }
+ }
+ }
+ e.lastDictID = d.id
+ e.allDirty = true
+ }
+
+ e.cur = e.maxMatchOff
+ dirtyShardCnt := 0
+ if !e.allDirty {
+ for i := range e.tableShardDirty {
+ if e.tableShardDirty[i] {
+ dirtyShardCnt++
+ }
+ }
}
- e.blk.initNewEncode()
- if e.crc == nil {
- e.crc = xxhash.New()
- } else {
- e.crc.Reset()
+
+ const shardCnt = tableShardCnt
+ const shardSize = tableShardSize
+ if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
+ copy(e.table[:], e.dictTable)
+ for i := range e.tableShardDirty {
+ e.tableShardDirty[i] = false
+ }
+ e.allDirty = false
+ return
}
- if cap(e.hist) < int(e.maxMatchOff*2) {
- l := e.maxMatchOff * 2
- // Make it at least 1MB.
- if l < 1<<20 {
- l = 1 << 20
+ for i := range e.tableShardDirty {
+ if !e.tableShardDirty[i] {
+ continue
}
- e.hist = make([]byte, 0, l)
+
+ copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+ e.tableShardDirty[i] = false
}
- // We offset current position so everything will be out of reach
- e.cur += e.maxMatchOff + int32(len(e.hist))
- e.hist = e.hist[:0]
+ e.allDirty = false
+}
+
+func (e *fastEncoderDict) markAllShardsDirty() {
+ e.allDirty = true
+}
+
+func (e *fastEncoderDict) markShardDirty(entryNum uint32) {
+ e.tableShardDirty[entryNum/tableShardSize] = true
}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_params.go b/vendor/github.com/klauspost/compress/zstd/enc_params.go
deleted file mode 100644
index b6779ecb6d7..00000000000
--- a/vendor/github.com/klauspost/compress/zstd/enc_params.go
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2019+ Klaus Post. All rights reserved.
-// License information can be found in the LICENSE file.
-// Based on work by Yann Collet, released under BSD License.
-
-package zstd
-
-type encParams struct {
- // largest match distance : larger == more compression, more memory needed during decompression
- windowLog uint8
-
- // fully searched segment : larger == more compression, slower, more memory (useless for fast)
- chainLog uint8
-
- // dispatch table : larger == faster, more memory
- hashLog uint8
-
- // < nb of searches : larger == more compression, slower
- searchLog uint8
-
- // < match length searched : larger == faster decompression, sometimes less compression
- minMatch uint8
-
- // acceptable match size for optimal parser (only) : larger == more compression, slower
- targetLength uint32
-
- // see ZSTD_strategy definition above
- strategy strategy
-}
-
-// strategy defines the algorithm to use when generating sequences.
-type strategy uint8
-
-const (
- // Compression strategies, listed from fastest to strongest
- strategyFast strategy = iota + 1
- strategyDfast
- strategyGreedy
- strategyLazy
- strategyLazy2
- strategyBtlazy2
- strategyBtopt
- strategyBtultra
- strategyBtultra2
- // note : new strategies _might_ be added in the future.
- // Only the order (from fast to strong) is guaranteed
-
-)
-
-var defEncParams = [4][]encParams{
- { // "default" - for any srcSize > 256 KB
- // W, C, H, S, L, TL, strat
- {19, 12, 13, 1, 6, 1, strategyFast}, // base for negative levels
- {19, 13, 14, 1, 7, 0, strategyFast}, // level 1
- {20, 15, 16, 1, 6, 0, strategyFast}, // level 2
- {21, 16, 17, 1, 5, 1, strategyDfast}, // level 3
- {21, 18, 18, 1, 5, 1, strategyDfast}, // level 4
- {21, 18, 19, 2, 5, 2, strategyGreedy}, // level 5
- {21, 19, 19, 3, 5, 4, strategyGreedy}, // level 6
- {21, 19, 19, 3, 5, 8, strategyLazy}, // level 7
- {21, 19, 19, 3, 5, 16, strategyLazy2}, // level 8
- {21, 19, 20, 4, 5, 16, strategyLazy2}, // level 9
- {22, 20, 21, 4, 5, 16, strategyLazy2}, // level 10
- {22, 21, 22, 4, 5, 16, strategyLazy2}, // level 11
- {22, 21, 22, 5, 5, 16, strategyLazy2}, // level 12
- {22, 21, 22, 5, 5, 32, strategyBtlazy2}, // level 13
- {22, 22, 23, 5, 5, 32, strategyBtlazy2}, // level 14
- {22, 23, 23, 6, 5, 32, strategyBtlazy2}, // level 15
- {22, 22, 22, 5, 5, 48, strategyBtopt}, // level 16
- {23, 23, 22, 5, 4, 64, strategyBtopt}, // level 17
- {23, 23, 22, 6, 3, 64, strategyBtultra}, // level 18
- {23, 24, 22, 7, 3, 256, strategyBtultra2}, // level 19
- {25, 25, 23, 7, 3, 256, strategyBtultra2}, // level 20
- {26, 26, 24, 7, 3, 512, strategyBtultra2}, // level 21
- {27, 27, 25, 9, 3, 999, strategyBtultra2}, // level 22
- },
- { // for srcSize <= 256 KB
- // W, C, H, S, L, T, strat
- {18, 12, 13, 1, 5, 1, strategyFast}, // base for negative levels
- {18, 13, 14, 1, 6, 0, strategyFast}, // level 1
- {18, 14, 14, 1, 5, 1, strategyDfast}, // level 2
- {18, 16, 16, 1, 4, 1, strategyDfast}, // level 3
- {18, 16, 17, 2, 5, 2, strategyGreedy}, // level 4.
- {18, 18, 18, 3, 5, 2, strategyGreedy}, // level 5.
- {18, 18, 19, 3, 5, 4, strategyLazy}, // level 6.
- {18, 18, 19, 4, 4, 4, strategyLazy}, // level 7
- {18, 18, 19, 4, 4, 8, strategyLazy2}, // level 8
- {18, 18, 19, 5, 4, 8, strategyLazy2}, // level 9
- {18, 18, 19, 6, 4, 8, strategyLazy2}, // level 10
- {18, 18, 19, 5, 4, 12, strategyBtlazy2}, // level 11.
- {18, 19, 19, 7, 4, 12, strategyBtlazy2}, // level 12.
- {18, 18, 19, 4, 4, 16, strategyBtopt}, // level 13
- {18, 18, 19, 4, 3, 32, strategyBtopt}, // level 14.
- {18, 18, 19, 6, 3, 128, strategyBtopt}, // level 15.
- {18, 19, 19, 6, 3, 128, strategyBtultra}, // level 16.
- {18, 19, 19, 8, 3, 256, strategyBtultra}, // level 17.
- {18, 19, 19, 6, 3, 128, strategyBtultra2}, // level 18.
- {18, 19, 19, 8, 3, 256, strategyBtultra2}, // level 19.
- {18, 19, 19, 10, 3, 512, strategyBtultra2}, // level 20.
- {18, 19, 19, 12, 3, 512, strategyBtultra2}, // level 21.
- {18, 19, 19, 13, 3, 999, strategyBtultra2}, // level 22.
- },
- { // for srcSize <= 128 KB
- // W, C, H, S, L, T, strat
- {17, 12, 12, 1, 5, 1, strategyFast}, // base for negative levels
- {17, 12, 13, 1, 6, 0, strategyFast}, // level 1
- {17, 13, 15, 1, 5, 0, strategyFast}, // level 2
- {17, 15, 16, 2, 5, 1, strategyDfast}, // level 3
- {17, 17, 17, 2, 4, 1, strategyDfast}, // level 4
- {17, 16, 17, 3, 4, 2, strategyGreedy}, // level 5
- {17, 17, 17, 3, 4, 4, strategyLazy}, // level 6
- {17, 17, 17, 3, 4, 8, strategyLazy2}, // level 7
- {17, 17, 17, 4, 4, 8, strategyLazy2}, // level 8
- {17, 17, 17, 5, 4, 8, strategyLazy2}, // level 9
- {17, 17, 17, 6, 4, 8, strategyLazy2}, // level 10
- {17, 17, 17, 5, 4, 8, strategyBtlazy2}, // level 11
- {17, 18, 17, 7, 4, 12, strategyBtlazy2}, // level 12
- {17, 18, 17, 3, 4, 12, strategyBtopt}, // level 13.
- {17, 18, 17, 4, 3, 32, strategyBtopt}, // level 14.
- {17, 18, 17, 6, 3, 256, strategyBtopt}, // level 15.
- {17, 18, 17, 6, 3, 128, strategyBtultra}, // level 16.
- {17, 18, 17, 8, 3, 256, strategyBtultra}, // level 17.
- {17, 18, 17, 10, 3, 512, strategyBtultra}, // level 18.
- {17, 18, 17, 5, 3, 256, strategyBtultra2}, // level 19.
- {17, 18, 17, 7, 3, 512, strategyBtultra2}, // level 20.
- {17, 18, 17, 9, 3, 512, strategyBtultra2}, // level 21.
- {17, 18, 17, 11, 3, 999, strategyBtultra2}, // level 22.
- },
- { // for srcSize <= 16 KB
- // W, C, H, S, L, T, strat
- {14, 12, 13, 1, 5, 1, strategyFast}, // base for negative levels
- {14, 14, 15, 1, 5, 0, strategyFast}, // level 1
- {14, 14, 15, 1, 4, 0, strategyFast}, // level 2
- {14, 14, 15, 2, 4, 1, strategyDfast}, // level 3
- {14, 14, 14, 4, 4, 2, strategyGreedy}, // level 4
- {14, 14, 14, 3, 4, 4, strategyLazy}, // level 5.
- {14, 14, 14, 4, 4, 8, strategyLazy2}, // level 6
- {14, 14, 14, 6, 4, 8, strategyLazy2}, // level 7
- {14, 14, 14, 8, 4, 8, strategyLazy2}, // level 8.
- {14, 15, 14, 5, 4, 8, strategyBtlazy2}, // level 9.
- {14, 15, 14, 9, 4, 8, strategyBtlazy2}, // level 10.
- {14, 15, 14, 3, 4, 12, strategyBtopt}, // level 11.
- {14, 15, 14, 4, 3, 24, strategyBtopt}, // level 12.
- {14, 15, 14, 5, 3, 32, strategyBtultra}, // level 13.
- {14, 15, 15, 6, 3, 64, strategyBtultra}, // level 14.
- {14, 15, 15, 7, 3, 256, strategyBtultra}, // level 15.
- {14, 15, 15, 5, 3, 48, strategyBtultra2}, // level 16.
- {14, 15, 15, 6, 3, 128, strategyBtultra2}, // level 17.
- {14, 15, 15, 7, 3, 256, strategyBtultra2}, // level 18.
- {14, 15, 15, 8, 3, 256, strategyBtultra2}, // level 19.
- {14, 15, 15, 8, 3, 512, strategyBtultra2}, // level 20.
- {14, 15, 15, 9, 3, 512, strategyBtultra2}, // level 21.
- {14, 15, 15, 10, 3, 999, strategyBtultra2}, // level 22.
- },
-}
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index ed028f5a793..6f0265099ec 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -29,26 +29,28 @@ type Encoder struct {
type encoder interface {
Encode(blk *blockEnc, src []byte)
+ EncodeNoHist(blk *blockEnc, src []byte)
Block() *blockEnc
CRC() *xxhash.Digest
AppendCRC([]byte) []byte
WindowSize(size int) int32
UseBlock(*blockEnc)
- Reset()
+ Reset(d *dict, singleBlock bool)
}
type encoderState struct {
- w io.Writer
- filling []byte
- current []byte
- previous []byte
- encoder encoder
- writing *blockEnc
- err error
- writeErr error
- nWritten int64
- headerWritten bool
- eofWritten bool
+ w io.Writer
+ filling []byte
+ current []byte
+ previous []byte
+ encoder encoder
+ writing *blockEnc
+ err error
+ writeErr error
+ nWritten int64
+ headerWritten bool
+ eofWritten bool
+ fullFrameWritten bool
// This waitgroup indicates an encode is running.
wg sync.WaitGroup
@@ -59,6 +61,7 @@ type encoderState struct {
// NewWriter will create a new Zstandard encoder.
// If the encoder will be used for encoding blocks a nil writer can be used.
func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) {
+ initPredefined()
var e Encoder
e.o.setDefault()
for _, o := range opts {
@@ -69,27 +72,24 @@ func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) {
}
if w != nil {
e.Reset(w)
- } else {
- e.init.Do(func() {
- e.initialize()
- })
}
return &e, nil
}
func (e *Encoder) initialize() {
+ if e.o.concurrent == 0 {
+ e.o.setDefault()
+ }
e.encoders = make(chan encoder, e.o.concurrent)
for i := 0; i < e.o.concurrent; i++ {
- e.encoders <- e.o.encoder()
+ enc := e.o.encoder()
+ e.encoders <- enc
}
}
// Reset will re-initialize the writer and new writes will encode to the supplied writer
// as a new, independent stream.
func (e *Encoder) Reset(w io.Writer) {
- e.init.Do(func() {
- e.initialize()
- })
s := &e.state
s.wg.Wait()
s.wWg.Wait()
@@ -106,16 +106,17 @@ func (e *Encoder) Reset(w io.Writer) {
s.encoder = e.o.encoder()
}
if s.writing == nil {
- s.writing = &blockEnc{}
+ s.writing = &blockEnc{lowMem: e.o.lowMem}
s.writing.init()
}
s.writing.initNewEncode()
s.filling = s.filling[:0]
s.current = s.current[:0]
s.previous = s.previous[:0]
- s.encoder.Reset()
+ s.encoder.Reset(e.o.dict, false)
s.headerWritten = false
s.eofWritten = false
+ s.fullFrameWritten = false
s.w = w
s.err = nil
s.nWritten = 0
@@ -154,7 +155,7 @@ func (e *Encoder) Write(p []byte) (n int, err error) {
if err != nil {
return n, err
}
- if debug && len(s.filling) > 0 {
+ if debugAsserts && len(s.filling) > 0 {
panic(len(s.filling))
}
}
@@ -174,14 +175,38 @@ func (e *Encoder) nextBlock(final bool) error {
return fmt.Errorf("block > maxStoreBlockSize")
}
if !s.headerWritten {
+ // If we have a single block encode, do a sync compression.
+ if final && len(s.filling) == 0 && !e.o.fullZero {
+ s.headerWritten = true
+ s.fullFrameWritten = true
+ s.eofWritten = true
+ return nil
+ }
+ if final && len(s.filling) > 0 {
+ s.current = e.EncodeAll(s.filling, s.current[:0])
+ var n2 int
+ n2, s.err = s.w.Write(s.current)
+ if s.err != nil {
+ return s.err
+ }
+ s.nWritten += int64(n2)
+ s.current = s.current[:0]
+ s.filling = s.filling[:0]
+ s.headerWritten = true
+ s.fullFrameWritten = true
+ s.eofWritten = true
+ return nil
+ }
+
var tmp [maxHeaderSize]byte
fh := frameHeader{
ContentSize: 0,
WindowSize: uint32(s.encoder.WindowSize(0)),
SingleSegment: false,
Checksum: e.o.crc,
- DictID: 0,
+ DictID: e.o.dict.ID(),
}
+
dst, err := fh.appendTo(tmp[:0])
if err != nil {
return err
@@ -211,6 +236,7 @@ func (e *Encoder) nextBlock(final bool) error {
s.wWg.Wait()
_, s.err = s.w.Write(blk.output)
s.nWritten += int64(len(blk.output))
+ s.eofWritten = true
}
return s.err
}
@@ -256,7 +282,12 @@ func (e *Encoder) nextBlock(final bool) error {
}
s.wWg.Done()
}()
- err := blk.encode()
+ err := errIncompressible
+ // If we got the exact same number of literals as input,
+ // assume the literals cannot be compressed.
+ if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
+ err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+ }
switch err {
case errIncompressible:
if debug {
@@ -285,12 +316,20 @@ func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) {
if debug {
println("Using ReadFrom")
}
- // Maybe handle stuff queued?
+
+ // Flush any current writes.
+ if len(e.state.filling) > 0 {
+ if err := e.nextBlock(false); err != nil {
+ return 0, err
+ }
+ }
e.state.filling = e.state.filling[:e.o.blockSize]
src := e.state.filling
for {
n2, err := r.Read(src)
- _, _ = e.state.encoder.CRC().Write(src[:n2])
+ if e.o.crc {
+ _, _ = e.state.encoder.CRC().Write(src[:n2])
+ }
// src is now the unfilled part...
src = src[n2:]
n += int64(n2)
@@ -300,7 +339,7 @@ func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) {
if debug {
println("ReadFrom: got EOF final block:", len(e.state.filling))
}
- return n, e.nextBlock(true)
+ return n, nil
default:
if debug {
println("ReadFrom: got error:", err)
@@ -355,6 +394,9 @@ func (e *Encoder) Close() error {
if err != nil {
return err
}
+ if e.state.fullFrameWritten {
+ return s.err
+ }
s.wg.Wait()
s.wWg.Wait()
@@ -387,27 +429,42 @@ func (e *Encoder) Close() error {
// EncodeAll will encode all input in src and append it to dst.
// This function can be called concurrently, but each call will only run on a single goroutine.
-// If empty input is given, nothing is returned.
+// If empty input is given, nothing is returned, unless WithZeroFrames is specified.
// Encoded blocks can be concatenated and the result will be the combined input stream.
// Data compressed with EncodeAll can be decoded with the Decoder,
// using either a stream or DecodeAll.
func (e *Encoder) EncodeAll(src, dst []byte) []byte {
if len(src) == 0 {
+ if e.o.fullZero {
+ // Add frame header.
+ fh := frameHeader{
+ ContentSize: 0,
+ WindowSize: MinWindowSize,
+ SingleSegment: true,
+ // Adding a checksum would be a waste of space.
+ Checksum: false,
+ DictID: 0,
+ }
+ dst, _ = fh.appendTo(dst)
+
+ // Write raw block as last one only.
+ var blk blockHeader
+ blk.setSize(0)
+ blk.setType(blockTypeRaw)
+ blk.setLast(true)
+ dst = blk.appendTo(dst)
+ }
return dst
}
- e.init.Do(func() {
- e.o.setDefault()
- e.initialize()
- })
+ e.init.Do(e.initialize)
enc := <-e.encoders
defer func() {
// Release encoder reference to last block.
- enc.Reset()
+ // If a non-single block is needed the encoder will reset again.
e.encoders <- enc
}()
- enc.Reset()
- blk := enc.Block()
- single := len(src) > 1<<20
+ // Use single segments when above minimum window and below 1MB.
+ single := len(src) < 1<<20 && len(src) > MinWindowSize
if e.o.single != nil {
single = *e.o.single
}
@@ -416,11 +473,11 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
WindowSize: uint32(enc.WindowSize(len(src))),
SingleSegment: single,
Checksum: e.o.crc,
- DictID: 0,
+ DictID: e.o.dict.ID(),
}
// If less than 1MB, allocate a buffer up front.
- if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 {
+ if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem {
dst = make([]byte, 0, len(src))
}
dst, err := fh.appendTo(dst)
@@ -428,34 +485,81 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
panic(err)
}
- for len(src) > 0 {
- todo := src
- if len(todo) > e.o.blockSize {
- todo = todo[:e.o.blockSize]
- }
- src = src[len(todo):]
+ // If we can do everything in one block, prefer that.
+ if len(src) <= maxCompressedBlockSize {
+ enc.Reset(e.o.dict, true)
+ // Slightly faster with no history and everything in one block.
if e.o.crc {
- _, _ = enc.CRC().Write(todo)
+ _, _ = enc.CRC().Write(src)
}
- blk.reset(nil)
- blk.pushOffsets()
- enc.Encode(blk, todo)
- if len(src) == 0 {
- blk.last = true
+ blk := enc.Block()
+ blk.last = true
+ if e.o.dict == nil {
+ enc.EncodeNoHist(blk, src)
+ } else {
+ enc.Encode(blk, src)
}
- err := blk.encode()
+
+ // If we got the exact same number of literals as input,
+ // assume the literals cannot be compressed.
+ err := errIncompressible
+ oldout := blk.output
+ if len(blk.literals) != len(src) || len(src) != e.o.blockSize {
+ // Output directly to dst
+ blk.output = dst
+ err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+ }
+
switch err {
case errIncompressible:
if debug {
println("Storing incompressible block as raw")
}
- blk.encodeRaw(todo)
- blk.popOffsets()
+ dst = blk.encodeRawTo(dst, src)
case nil:
+ dst = blk.output
default:
panic(err)
}
- dst = append(dst, blk.output...)
+ blk.output = oldout
+ } else {
+ enc.Reset(e.o.dict, false)
+ blk := enc.Block()
+ for len(src) > 0 {
+ todo := src
+ if len(todo) > e.o.blockSize {
+ todo = todo[:e.o.blockSize]
+ }
+ src = src[len(todo):]
+ if e.o.crc {
+ _, _ = enc.CRC().Write(todo)
+ }
+ blk.pushOffsets()
+ enc.Encode(blk, todo)
+ if len(src) == 0 {
+ blk.last = true
+ }
+ err := errIncompressible
+ // If we got the exact same number of literals as input,
+ // assume the literals cannot be compressed.
+ if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
+ err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
+ }
+
+ switch err {
+ case errIncompressible:
+ if debug {
+ println("Storing incompressible block as raw")
+ }
+ dst = blk.encodeRawTo(dst, todo)
+ blk.popOffsets()
+ case nil:
+ dst = append(dst, blk.output...)
+ default:
+ panic(err)
+ }
+ blk.reset(nil)
+ }
}
if e.o.crc {
dst = enc.AppendCRC(dst)
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
index 6e210c4a055..18a47eb03e8 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -1,44 +1,67 @@
package zstd
import (
+ "errors"
"fmt"
"runtime"
"strings"
)
-// DOption is an option for creating a encoder.
+// EOption is an option for creating a encoder.
type EOption func(*encoderOptions) error
// options retains accumulated state of multiple options.
type encoderOptions struct {
- concurrent int
- crc bool
- single *bool
- pad int
- blockSize int
- windowSize int
- level EncoderLevel
+ concurrent int
+ level EncoderLevel
+ single *bool
+ pad int
+ blockSize int
+ windowSize int
+ crc bool
+ fullZero bool
+ noEntropy bool
+ allLitEntropy bool
+ customWindow bool
+ customALEntropy bool
+ lowMem bool
+ dict *dict
}
func (o *encoderOptions) setDefault() {
*o = encoderOptions{
- // use less ram: true for now, but may change.
- concurrent: runtime.GOMAXPROCS(0),
- crc: true,
- single: nil,
- blockSize: 1 << 16,
- windowSize: 1 << 22,
- level: SpeedDefault,
+ concurrent: runtime.GOMAXPROCS(0),
+ crc: true,
+ single: nil,
+ blockSize: 1 << 16,
+ windowSize: 8 << 20,
+ level: SpeedDefault,
+ allLitEntropy: true,
+ lowMem: false,
}
}
// encoder returns an encoder with the selected options.
func (o encoderOptions) encoder() encoder {
switch o.level {
- case SpeedDefault:
- return &doubleFastEncoder{fastEncoder: fastEncoder{maxMatchOff: int32(o.windowSize)}}
case SpeedFastest:
- return &fastEncoder{maxMatchOff: int32(o.windowSize)}
+ if o.dict != nil {
+ return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+ }
+ return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+
+ case SpeedDefault:
+ if o.dict != nil {
+ return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}}
+ }
+ return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+ case SpeedBetterCompression:
+ if o.dict != nil {
+ return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+ }
+ return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+ case SpeedBestCompression:
+ return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
}
panic("unknown compression level")
}
@@ -63,6 +86,31 @@ func WithEncoderConcurrency(n int) EOption {
}
}
+// WithWindowSize will set the maximum allowed back-reference distance.
+// The value must be a power of two between MinWindowSize and MaxWindowSize.
+// A larger value will enable better compression but allocate more memory and,
+// for above-default values, take considerably longer.
+// The default value is determined by the compression level.
+func WithWindowSize(n int) EOption {
+ return func(o *encoderOptions) error {
+ switch {
+ case n < MinWindowSize:
+ return fmt.Errorf("window size must be at least %d", MinWindowSize)
+ case n > MaxWindowSize:
+ return fmt.Errorf("window size must be at most %d", MaxWindowSize)
+ case (n & (n - 1)) != 0:
+ return errors.New("window size must be a power of 2")
+ }
+
+ o.windowSize = n
+ o.customWindow = true
+ if o.blockSize > o.windowSize {
+ o.blockSize = o.windowSize
+ }
+ return nil
+ }
+}
+
// WithEncoderPadding will add padding to all output so the size will be a multiple of n.
// This can be used to obfuscate the exact output size or make blocks of a certain size.
// The contents will be a skippable frame, so it will be invisible by the decoder.
@@ -103,25 +151,25 @@ const (
// This is roughly equivalent to the default Zstandard mode (level 3).
SpeedDefault
+ // SpeedBetterCompression will yield better compression than the default.
+ // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage.
+ // By using this, notice that CPU usage may go up in the future.
+ SpeedBetterCompression
+
+ // SpeedBestCompression will choose the best available compression option.
+ // This will offer the best compression no matter the CPU cost.
+ SpeedBestCompression
+
// speedLast should be kept as the last actual compression option.
// The is not for external usage, but is used to keep track of the valid options.
speedLast
-
- // SpeedBetterCompression will (in the future) yield better compression than the default,
- // but at approximately 4x the CPU usage of the default.
- // For now this is not implemented.
- SpeedBetterCompression = SpeedDefault
-
- // SpeedBestCompression will choose the best available compression option.
- // For now this is not implemented.
- SpeedBestCompression = SpeedDefault
)
// EncoderLevelFromString will convert a string representation of an encoding level back
// to a compression level. The compare is not case sensitive.
// If the string wasn't recognized, (false, SpeedDefault) will be returned.
func EncoderLevelFromString(s string) (bool, EncoderLevel) {
- for l := EncoderLevel(speedNotSet + 1); l < speedLast; l++ {
+ for l := speedNotSet + 1; l < speedLast; l++ {
if strings.EqualFold(s, l.String()) {
return true, l
}
@@ -136,8 +184,12 @@ func EncoderLevelFromZstd(level int) EncoderLevel {
switch {
case level < 3:
return SpeedFastest
- case level >= 3:
+ case level >= 3 && level < 6:
return SpeedDefault
+ case level >= 6 && level < 10:
+ return SpeedBetterCompression
+ case level >= 10:
+ return SpeedBetterCompression
}
return SpeedDefault
}
@@ -149,6 +201,10 @@ func (e EncoderLevel) String() string {
return "fastest"
case SpeedDefault:
return "default"
+ case SpeedBetterCompression:
+ return "better"
+ case SpeedBestCompression:
+ return "best"
default:
return "invalid"
}
@@ -162,6 +218,54 @@ func WithEncoderLevel(l EncoderLevel) EOption {
return fmt.Errorf("unknown encoder level")
}
o.level = l
+ if !o.customWindow {
+ switch o.level {
+ case SpeedFastest:
+ o.windowSize = 4 << 20
+ case SpeedDefault:
+ o.windowSize = 8 << 20
+ case SpeedBetterCompression:
+ o.windowSize = 16 << 20
+ case SpeedBestCompression:
+ o.windowSize = 32 << 20
+ }
+ }
+ if !o.customALEntropy {
+ o.allLitEntropy = l > SpeedFastest
+ }
+
+ return nil
+ }
+}
+
+// WithZeroFrames will encode 0 length input as full frames.
+// This can be needed for compatibility with zstandard usage,
+// but is not needed for this package.
+func WithZeroFrames(b bool) EOption {
+ return func(o *encoderOptions) error {
+ o.fullZero = b
+ return nil
+ }
+}
+
+// WithAllLitEntropyCompression will apply entropy compression if no matches are found.
+// Disabling this will skip incompressible data faster, but in cases with no matches but
+// skewed character distribution compression is lost.
+// Default value depends on the compression level selected.
+func WithAllLitEntropyCompression(b bool) EOption {
+ return func(o *encoderOptions) error {
+ o.customALEntropy = true
+ o.allLitEntropy = b
+ return nil
+ }
+}
+
+// WithNoEntropyCompression will always skip entropy compression of literals.
+// This can be useful if content has matches, but unlikely to benefit from entropy
+// compression. Usually the slight speed improvement is not worth enabling this.
+func WithNoEntropyCompression(b bool) EOption {
+ return func(o *encoderOptions) error {
+ o.noEntropy = b
return nil
}
}
@@ -182,3 +286,27 @@ func WithSingleSegment(b bool) EOption {
return nil
}
}
+
+// WithLowerEncoderMem will trade in some memory cases trade less memory usage for
+// slower encoding speed.
+// This will not change the window size which is the primary function for reducing
+// memory usage. See WithWindowSize.
+func WithLowerEncoderMem(b bool) EOption {
+ return func(o *encoderOptions) error {
+ o.lowMem = b
+ return nil
+ }
+}
+
+// WithEncoderDict allows to register a dictionary that will be used for the encode.
+// The encoder *may* choose to use no dictionary instead for certain payloads.
+func WithEncoderDict(dict []byte) EOption {
+ return func(o *encoderOptions) error {
+ d, err := loadDict(dict)
+ if err != nil {
+ return err
+ }
+ o.dict = d
+ return nil
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 8fa264fc24d..fc4a566d39a 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -16,16 +16,11 @@ import (
)
type frameDec struct {
- o decoderOptions
- crc hash.Hash64
- frameDone sync.WaitGroup
- offset int64
+ o decoderOptions
+ crc hash.Hash64
+ offset int64
- WindowSize uint64
- DictionaryID uint32
- FrameContentSize uint64
- HasCheckSum bool
- SingleSegment bool
+ WindowSize uint64
// maxWindowSize is the maximum windows size to support.
// should never be bigger than max-int.
@@ -39,14 +34,25 @@ type frameDec struct {
rawInput byteBuffer
+ // Byte buffer that can be reused for small input blocks.
+ bBuf byteBuf
+
+ FrameContentSize uint64
+ frameDone sync.WaitGroup
+
+ DictionaryID *uint32
+ HasCheckSum bool
+ SingleSegment bool
+
// asyncRunning indicates whether the async routine processes input on 'decoding'.
- asyncRunning bool
asyncRunningMu sync.Mutex
+ asyncRunning bool
}
const (
// The minimum Window_Size is 1 KB.
- minWindowSize = 1 << 10
+ MinWindowSize = 1 << 10
+ MaxWindowSize = 1 << 29
)
var (
@@ -57,7 +63,10 @@ var (
func newFrameDec(o decoderOptions) *frameDec {
d := frameDec{
o: o,
- maxWindowSize: 1 << 30,
+ maxWindowSize: MaxWindowSize,
+ }
+ if d.maxWindowSize > o.maxDecodedSize {
+ d.maxWindowSize = o.maxDecodedSize
}
return &d
}
@@ -133,7 +142,7 @@ func (d *frameDec) reset(br byteBuffer) error {
// Read Dictionary_ID
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
- d.DictionaryID = 0
+ d.DictionaryID = nil
if size := fhd & 3; size != 0 {
if size == 3 {
size = 4
@@ -145,19 +154,22 @@ func (d *frameDec) reset(br byteBuffer) error {
}
return io.ErrUnexpectedEOF
}
+ var id uint32
switch size {
case 1:
- d.DictionaryID = uint32(b[0])
+ id = uint32(b[0])
case 2:
- d.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8)
+ id = uint32(b[0]) | (uint32(b[1]) << 8)
case 4:
- d.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+ id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
}
if debug {
- println("Dict size", size, "ID:", d.DictionaryID)
+ println("Dict size", size, "ID:", id)
}
- if d.DictionaryID != 0 {
- return ErrUnknownDictionary
+ if id > 0 {
+ // ID 0 means "sorry, no dictionary anyway".
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
+ d.DictionaryID = &id
}
}
@@ -187,14 +199,14 @@ func (d *frameDec) reset(br byteBuffer) error {
// When FCS_Field_Size is 2, the offset of 256 is added.
d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256
case 4:
- d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3] << 24))
+ d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24)
case 8:
d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24)
d.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
}
if debug {
- println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]))
+ println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize)
}
}
// Move this to shared.
@@ -209,8 +221,8 @@ func (d *frameDec) reset(br byteBuffer) error {
if d.WindowSize == 0 && d.SingleSegment {
// We may not need window in this case.
d.WindowSize = d.FrameContentSize
- if d.WindowSize < minWindowSize {
- d.WindowSize = minWindowSize
+ if d.WindowSize < MinWindowSize {
+ d.WindowSize = MinWindowSize
}
}
@@ -219,12 +231,16 @@ func (d *frameDec) reset(br byteBuffer) error {
return ErrWindowSizeExceeded
}
// The minimum Window_Size is 1 KB.
- if d.WindowSize < minWindowSize {
+ if d.WindowSize < MinWindowSize {
println("got window size: ", d.WindowSize)
return ErrWindowSizeTooSmall
}
d.history.windowSize = int(d.WindowSize)
- d.history.maxSize = d.history.windowSize + maxBlockSize
+ if d.o.lowMem && d.history.windowSize < maxBlockSize {
+ d.history.maxSize = d.history.windowSize * 2
+ } else {
+ d.history.maxSize = d.history.windowSize + maxBlockSize
+ }
// history contains input - maybe we do something
d.rawInput = br
return nil
@@ -232,7 +248,9 @@ func (d *frameDec) reset(br byteBuffer) error {
// next will start decoding the next block from stream.
func (d *frameDec) next(block *blockDec) error {
- println("decoding new block")
+ if debug {
+ printf("decoding new block %p:%p", block, block.data)
+ }
err := block.reset(d.rawInput, d.WindowSize)
if err != nil {
println("block error:", err)
@@ -280,13 +298,13 @@ func (d *frameDec) checkCRC() error {
if !d.HasCheckSum {
return nil
}
- var tmp [8]byte
- gotB := d.crc.Sum(tmp[:0])
+ var tmp [4]byte
+ got := d.crc.Sum64()
// Flip to match file order.
- gotB[0] = gotB[7]
- gotB[1] = gotB[6]
- gotB[2] = gotB[5]
- gotB[3] = gotB[4]
+ tmp[0] = byte(got >> 0)
+ tmp[1] = byte(got >> 8)
+ tmp[2] = byte(got >> 16)
+ tmp[3] = byte(got >> 24)
// We can overwrite upper tmp now
want := d.rawInput.readSmall(4)
@@ -295,18 +313,22 @@ func (d *frameDec) checkCRC() error {
return io.ErrUnexpectedEOF
}
- if !bytes.Equal(gotB[:4], want) {
- println("CRC Check Failed:", gotB[:4], "!=", want)
+ if !bytes.Equal(tmp[:], want) {
+ if debug {
+ println("CRC Check Failed:", tmp[:], "!=", want)
+ }
return ErrCRCMismatch
}
- println("CRC ok")
+ if debug {
+ println("CRC ok", tmp[:])
+ }
return nil
}
func (d *frameDec) initAsync() {
if !d.o.lowMem && !d.SingleSegment {
- // set max extra size history to 20MB.
- d.history.maxSize = d.history.windowSize + maxBlockSize*10
+ // set max extra size history to 10MB.
+ d.history.maxSize = d.history.windowSize + maxBlockSize*5
}
// re-alloc if more than one extra block size.
if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize {
@@ -332,8 +354,6 @@ func (d *frameDec) initAsync() {
// When the frame has finished decoding the *bufio.Reader
// containing the remaining input will be sent on frameDec.frameDone.
func (d *frameDec) startDecoder(output chan decodeOutput) {
- // TODO: Init to dictionary
- d.history.reset()
written := int64(0)
defer func() {
@@ -401,6 +421,7 @@ func (d *frameDec) startDecoder(output chan decodeOutput) {
}
written += int64(len(r.b))
if d.SingleSegment && uint64(written) > d.FrameContentSize {
+ println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize)
r.err = ErrFrameSizeExceeded
output <- r
return
@@ -423,10 +444,8 @@ func (d *frameDec) startDecoder(output chan decodeOutput) {
}
}
-// runDecoder will create a sync decoder that will decodeAsync a block of data.
+// runDecoder will create a sync decoder that will decode a block of data.
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
- // TODO: Init to dictionary
- d.history.reset()
saved := d.history.b
// We use the history for output to avoid copying it.
@@ -451,6 +470,7 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
break
}
if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize {
+ println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize)
err = ErrFrameSizeExceeded
break
}
@@ -463,9 +483,10 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if err == nil {
if n != len(dst)-crcStart {
err = io.ErrShortWrite
+ } else {
+ err = d.checkCRC()
}
}
- err = d.checkCRC()
}
}
d.history.b = saved
diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go
index acac325275a..4ef7f5a3e3d 100644
--- a/vendor/github.com/klauspost/compress/zstd/frameenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go
@@ -5,7 +5,7 @@
package zstd
import (
- "errors"
+ "encoding/binary"
"fmt"
"io"
"math"
@@ -17,7 +17,7 @@ type frameHeader struct {
WindowSize uint32
SingleSegment bool
Checksum bool
- DictID uint32 // Not stored.
+ DictID uint32
}
const maxHeaderSize = 14
@@ -31,6 +31,24 @@ func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
if f.SingleSegment {
fhd |= 1 << 5
}
+
+ var dictIDContent []byte
+ if f.DictID > 0 {
+ var tmp [4]byte
+ if f.DictID < 256 {
+ fhd |= 1
+ tmp[0] = uint8(f.DictID)
+ dictIDContent = tmp[:1]
+ } else if f.DictID < 1<<16 {
+ fhd |= 2
+ binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID))
+ dictIDContent = tmp[:2]
+ } else {
+ fhd |= 3
+ binary.LittleEndian.PutUint32(tmp[:4], f.DictID)
+ dictIDContent = tmp[:4]
+ }
+ }
var fcs uint8
if f.ContentSize >= 256 {
fcs++
@@ -41,6 +59,7 @@ func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
if f.ContentSize >= 0xffffffff {
fcs++
}
+
fhd |= fcs << 6
dst = append(dst, fhd)
@@ -49,8 +68,8 @@ func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3
dst = append(dst, uint8(windowLog))
}
- if f.SingleSegment && f.ContentSize == 0 {
- return nil, errors.New("single segment, but no size set")
+ if f.DictID > 0 {
+ dst = append(dst, dictIDContent...)
}
switch fcs {
case 0:
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
index a86d00bc359..e6d3d49b39c 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
@@ -19,7 +19,7 @@ const (
* Increasing memory usage improves compression ratio
* Reduced memory usage can improve speed, due to cache effect
* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
- maxMemoryUsage = 11
+ maxMemoryUsage = tablelogAbsoluteMax + 2
maxTableLog = maxMemoryUsage - 2
maxTablesize = 1 << maxTableLog
@@ -55,7 +55,7 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
if b.remain() < 4 {
return errors.New("input too small")
}
- bitStream := b.Uint32()
+ bitStream := b.Uint32NC()
nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog
if nbBits > tablelogAbsoluteMax {
println("Invalid tablelog:", nbBits)
@@ -79,7 +79,8 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
n0 += 24
if r := b.remain(); r > 5 {
b.advance(2)
- bitStream = b.Uint32() >> bitCount
+ // The check above should make sure we can read 32 bits
+ bitStream = b.Uint32NC() >> bitCount
} else {
// end of bit stream
bitStream >>= 16
@@ -104,10 +105,11 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
charnum++
}
- if r := b.remain(); r >= 7 || r+int(bitCount>>3) >= 4 {
+ if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 {
b.advance(bitCount >> 3)
bitCount &= 7
- bitStream = b.Uint32() >> bitCount
+ // The check above should make sure we can read 32 bits
+ bitStream = b.Uint32NC() >> bitCount
} else {
bitStream >>= 2
}
@@ -118,7 +120,7 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
if int32(bitStream)&(threshold-1) < max {
count = int32(bitStream) & (threshold - 1)
- if debug && nbBits < 1 {
+ if debugAsserts && nbBits < 1 {
panic("nbBits underflow")
}
bitCount += nbBits - 1
@@ -148,17 +150,16 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
threshold >>= 1
}
- //println("b.off:", b.off, "len:", len(b.b), "bc:", bitCount, "remain:", b.remain())
- if r := b.remain(); r >= 7 || r+int(bitCount>>3) >= 4 {
+ if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 {
b.advance(bitCount >> 3)
bitCount &= 7
+ // The check above should make sure we can read 32 bits
+ bitStream = b.Uint32NC() >> (bitCount & 31)
} else {
bitCount -= (uint)(8 * (len(b.b) - 4 - b.off))
b.off = len(b.b) - 4
- //println("b.off:", b.off, "len:", len(b.b), "bc:", bitCount, "iend", iend)
+ bitStream = b.Uint32() >> (bitCount & 31)
}
- bitStream = b.Uint32() >> (bitCount & 31)
- //printf("bitstream is now: 0b%b", bitStream)
}
s.symbolLen = charnum
if s.symbolLen <= 1 {
@@ -184,29 +185,75 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
// decSymbol contains information about a state entry,
// Including the state offset base, the output symbol and
// the number of bits to read for the low part of the destination state.
-type decSymbol struct {
- newState uint16
- addBits uint8 // Used for symbols until transformed.
- nbBits uint8
- baseline uint32
+// Using a composite uint64 is faster than a struct with separate members.
+type decSymbol uint64
+
+func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol {
+ return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32)
+}
+
+func (d decSymbol) nbBits() uint8 {
+ return uint8(d)
+}
+
+func (d decSymbol) addBits() uint8 {
+ return uint8(d >> 8)
+}
+
+func (d decSymbol) newState() uint16 {
+ return uint16(d >> 16)
+}
+
+func (d decSymbol) baseline() uint32 {
+ return uint32(d >> 32)
+}
+
+func (d decSymbol) baselineInt() int {
+ return int(d >> 32)
+}
+
+func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) {
+ *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32)
+}
+
+func (d *decSymbol) setNBits(nBits uint8) {
+ const mask = 0xffffffffffffff00
+ *d = (*d & mask) | decSymbol(nBits)
+}
+
+func (d *decSymbol) setAddBits(addBits uint8) {
+ const mask = 0xffffffffffff00ff
+ *d = (*d & mask) | (decSymbol(addBits) << 8)
+}
+
+func (d *decSymbol) setNewState(state uint16) {
+ const mask = 0xffffffff0000ffff
+ *d = (*d & mask) | decSymbol(state)<<16
+}
+
+func (d *decSymbol) setBaseline(baseline uint32) {
+ const mask = 0xffffffff
+ *d = (*d & mask) | decSymbol(baseline)<<32
+}
+
+func (d *decSymbol) setExt(addBits uint8, baseline uint32) {
+ const mask = 0xffff00ff
+ *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32)
}
// decSymbolValue returns the transformed decSymbol for the given symbol.
func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) {
if int(symb) >= len(t) {
- return decSymbol{}, fmt.Errorf("rle symbol %d >= max %d", symb, len(t))
+ return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t))
}
lu := t[symb]
- return decSymbol{
- addBits: lu.addBits,
- baseline: lu.baseLine,
- }, nil
+ return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil
}
// setRLE will set the decoder til RLE mode.
func (s *fseDecoder) setRLE(symbol decSymbol) {
s.actualTableLog = 0
- s.maxBits = symbol.addBits
+ s.maxBits = symbol.addBits()
s.dt[0] = symbol
}
@@ -220,7 +267,7 @@ func (s *fseDecoder) buildDtable() error {
{
for i, v := range s.norm[:s.symbolLen] {
if v == -1 {
- s.dt[highThreshold].addBits = uint8(i)
+ s.dt[highThreshold].setAddBits(uint8(i))
highThreshold--
symbolNext[i] = 1
} else {
@@ -235,7 +282,7 @@ func (s *fseDecoder) buildDtable() error {
position := uint32(0)
for ss, v := range s.norm[:s.symbolLen] {
for i := 0; i < int(v); i++ {
- s.dt[position].addBits = uint8(ss)
+ s.dt[position].setAddBits(uint8(ss))
position = (position + step) & tableMask
for position > highThreshold {
// lowprob area
@@ -253,11 +300,11 @@ func (s *fseDecoder) buildDtable() error {
{
tableSize := uint16(1 << s.actualTableLog)
for u, v := range s.dt[:tableSize] {
- symbol := v.addBits
+ symbol := v.addBits()
nextState := symbolNext[symbol]
symbolNext[symbol] = nextState + 1
nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
- s.dt[u&maxTableMask].nbBits = nBits
+ s.dt[u&maxTableMask].setNBits(nBits)
newState := (nextState << nBits) - tableSize
if newState > tableSize {
return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
@@ -266,7 +313,7 @@ func (s *fseDecoder) buildDtable() error {
// Seems weird that this is possible with nbits > 0.
return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
}
- s.dt[u&maxTableMask].newState = newState
+ s.dt[u&maxTableMask].setNewState(newState)
}
}
return nil
@@ -279,25 +326,21 @@ func (s *fseDecoder) transform(t []baseOffset) error {
tableSize := uint16(1 << s.actualTableLog)
s.maxBits = 0
for i, v := range s.dt[:tableSize] {
- if int(v.addBits) >= len(t) {
- return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits, len(t))
+ add := v.addBits()
+ if int(add) >= len(t) {
+ return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t))
}
- lu := t[v.addBits]
+ lu := t[add]
if lu.addBits > s.maxBits {
s.maxBits = lu.addBits
}
- s.dt[i&maxTableMask] = decSymbol{
- newState: v.newState,
- nbBits: v.nbBits,
- addBits: lu.addBits,
- baseline: lu.baseLine,
- }
+ v.setExt(lu.addBits, lu.baseLine)
+ s.dt[i] = v
}
return nil
}
type fseState struct {
- // TODO: Check if *[1 << maxTablelog]decSymbol is faster.
dt []decSymbol
state decSymbol
}
@@ -312,26 +355,31 @@ func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) {
// next returns the current symbol and sets the next state.
// At least tablelog bits must be available in the bit reader.
func (s *fseState) next(br *bitReader) {
- lowBits := uint16(br.getBits(s.state.nbBits))
- s.state = s.dt[s.state.newState+lowBits]
+ lowBits := uint16(br.getBits(s.state.nbBits()))
+ s.state = s.dt[s.state.newState()+lowBits]
}
// finished returns true if all bits have been read from the bitstream
// and the next state would require reading bits from the input.
func (s *fseState) finished(br *bitReader) bool {
- return br.finished() && s.state.nbBits > 0
+ return br.finished() && s.state.nbBits() > 0
}
// final returns the current state symbol without decoding the next.
func (s *fseState) final() (int, uint8) {
- return int(s.state.baseline), s.state.addBits
+ return s.state.baselineInt(), s.state.addBits()
+}
+
+// final returns the current state symbol without decoding the next.
+func (s decSymbol) final() (int, uint8) {
+ return s.baselineInt(), s.addBits()
}
// nextFast returns the next symbol and sets the next state.
// This can only be used if no symbols are 0 bits.
// At least tablelog bits must be available in the bit reader.
func (s *fseState) nextFast(br *bitReader) (uint32, uint8) {
- lowBits := uint16(br.getBitsFast(s.state.nbBits))
- s.state = s.dt[s.state.newState+lowBits]
- return s.state.baseline, s.state.addBits
+ lowBits := uint16(br.getBitsFast(s.state.nbBits()))
+ s.state = s.dt[s.state.newState()+lowBits]
+ return s.state.baseline(), s.state.addBits()
}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
index dfa6cf7cead..b80709d5ea8 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
@@ -97,7 +97,7 @@ func (s *fseEncoder) prepare() (*fseEncoder, error) {
func (s *fseEncoder) allocCtable() {
tableSize := 1 << s.actualTableLog
// get tableSymbol that is big enough.
- if cap(s.ct.tableSymbol) < int(tableSize) {
+ if cap(s.ct.tableSymbol) < tableSize {
s.ct.tableSymbol = make([]byte, tableSize)
}
s.ct.tableSymbol = s.ct.tableSymbol[:tableSize]
@@ -202,13 +202,13 @@ func (s *fseEncoder) buildCTable() error {
case 0:
case -1, 1:
symbolTT[i].deltaNbBits = tl
- symbolTT[i].deltaFindState = int16(total - 1)
+ symbolTT[i].deltaFindState = total - 1
total++
default:
maxBitsOut := uint32(tableLog) - highBit(uint32(v-1))
minStatePlus := uint32(v) << maxBitsOut
symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus
- symbolTT[i].deltaFindState = int16(total - v)
+ symbolTT[i].deltaFindState = total - v
total += v
}
}
@@ -327,7 +327,7 @@ func (s *fseEncoder) normalizeCount(length int) error {
if err != nil {
return err
}
- if debug {
+ if debugAsserts {
err = s.validateNorm()
if err != nil {
return err
@@ -336,7 +336,7 @@ func (s *fseEncoder) normalizeCount(length int) error {
return s.buildCTable()
}
s.norm[largest] += stillToDistribute
- if debug {
+ if debugAsserts {
err := s.validateNorm()
if err != nil {
return err
@@ -353,8 +353,8 @@ func (s *fseEncoder) normalizeCount2(length int) error {
distributed uint32
total = uint32(length)
tableLog = s.actualTableLog
- lowThreshold = uint32(total >> tableLog)
- lowOne = uint32((total * 3) >> (tableLog + 1))
+ lowThreshold = total >> tableLog
+ lowOne = (total * 3) >> (tableLog + 1)
)
for i, cnt := range s.count[:s.symbolLen] {
if cnt == 0 {
@@ -379,7 +379,7 @@ func (s *fseEncoder) normalizeCount2(length int) error {
if (total / toDistribute) > lowOne {
// risk of rounding to zero
- lowOne = uint32((total * 3) / (toDistribute * 2))
+ lowOne = (total * 3) / (toDistribute * 2)
for i, cnt := range s.count[:s.symbolLen] {
if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) {
s.norm[i] = 1
@@ -502,13 +502,22 @@ func (s *fseEncoder) validateNorm() (err error) {
// writeCount will write the normalized histogram count to header.
// This is read back by readNCount.
func (s *fseEncoder) writeCount(out []byte) ([]byte, error) {
+ if s.useRLE {
+ return append(out, s.rleVal), nil
+ }
+ if s.preDefined || s.reUsed {
+ // Never write predefined.
+ return out, nil
+ }
+
var (
tableLog = s.actualTableLog
tableSize = 1 << tableLog
previous0 bool
charnum uint16
- maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3
+ // maximum header size plus 2 extra bytes for final output if bitCount == 0.
+ maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + 2
// Write Table Size
bitStream = uint32(tableLog - minEncTablelog)
@@ -516,15 +525,12 @@ func (s *fseEncoder) writeCount(out []byte) ([]byte, error) {
remaining = int16(tableSize + 1) /* +1 for extra accuracy */
threshold = int16(tableSize)
nbBits = uint(tableLog + 1)
+ outP = len(out)
)
- if s.useRLE {
- return append(out, s.rleVal), nil
+ if cap(out) < outP+maxHeaderSize {
+ out = append(out, make([]byte, maxHeaderSize*3)...)
+ out = out[:len(out)-maxHeaderSize*3]
}
- if s.preDefined || s.reUsed {
- // Never write predefined.
- return out, nil
- }
- outP := len(out)
out = out[:outP+maxHeaderSize]
// stops at 1
@@ -594,11 +600,14 @@ func (s *fseEncoder) writeCount(out []byte) ([]byte, error) {
}
}
+ if outP+2 > len(out) {
+ return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen])
+ }
out[outP] = byte(bitStream)
out[outP+1] = byte(bitStream >> 8)
outP += int((bitCount + 7) / 8)
- if uint16(charnum) > s.symbolLen {
+ if charnum > s.symbolLen {
return nil, errors.New("internal error: charnum > s.symbolLen")
}
return out[:outP], nil
@@ -610,7 +619,7 @@ func (s *fseEncoder) writeCount(out []byte) ([]byte, error) {
func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 {
minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16
threshold := (minNbBits + 1) << 16
- if debug {
+ if debugAsserts {
if !(s.actualTableLog < 16) {
panic("!s.actualTableLog < 16")
}
@@ -624,7 +633,7 @@ func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 {
// linear interpolation (very approximate)
normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog
bitMultiplier := uint32(1) << accuracyLog
- if debug {
+ if debugAsserts {
if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold {
panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold")
}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go
index 5186de8027d..6c17dc17f4f 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go
@@ -7,6 +7,7 @@ package zstd
import (
"fmt"
"math"
+ "sync"
)
var (
@@ -69,85 +70,89 @@ func fillBase(dst []baseOffset, base uint32, bits ...uint8) {
}
}
-func init() {
- // Literals length codes
- tmp := make([]baseOffset, 36)
- for i := range tmp[:16] {
- tmp[i] = baseOffset{
- baseLine: uint32(i),
- addBits: 0,
+var predef sync.Once
+
+func initPredefined() {
+ predef.Do(func() {
+ // Literals length codes
+ tmp := make([]baseOffset, 36)
+ for i := range tmp[:16] {
+ tmp[i] = baseOffset{
+ baseLine: uint32(i),
+ addBits: 0,
+ }
}
- }
- fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
- symbolTableX[tableLiteralLengths] = tmp
-
- // Match length codes
- tmp = make([]baseOffset, 53)
- for i := range tmp[:32] {
- tmp[i] = baseOffset{
- // The transformation adds the 3 length.
- baseLine: uint32(i) + 3,
- addBits: 0,
- }
- }
- fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
- symbolTableX[tableMatchLengths] = tmp
-
- // Offset codes
- tmp = make([]baseOffset, maxOffsetBits+1)
- tmp[1] = baseOffset{
- baseLine: 1,
- addBits: 1,
- }
- fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)
- symbolTableX[tableOffsets] = tmp
-
- // Fill predefined tables and transform them.
- // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
- for i := range fsePredef[:] {
- f := &fsePredef[i]
- switch tableIndex(i) {
- case tableLiteralLengths:
- // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243
- f.actualTableLog = 6
- copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
- -1, -1, -1, -1})
- f.symbolLen = 36
- case tableOffsets:
- // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281
- f.actualTableLog = 5
- copy(f.norm[:], []int16{
- 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1})
- f.symbolLen = 29
- case tableMatchLengths:
- //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304
- f.actualTableLog = 6
- copy(f.norm[:], []int16{
- 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1,
- -1, -1, -1, -1, -1})
- f.symbolLen = 53
+ fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
+ symbolTableX[tableLiteralLengths] = tmp
+
+ // Match length codes
+ tmp = make([]baseOffset, 53)
+ for i := range tmp[:32] {
+ tmp[i] = baseOffset{
+ // The transformation adds the 3 length.
+ baseLine: uint32(i) + 3,
+ addBits: 0,
+ }
}
- if err := f.buildDtable(); err != nil {
- panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
+ fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
+ symbolTableX[tableMatchLengths] = tmp
+
+ // Offset codes
+ tmp = make([]baseOffset, maxOffsetBits+1)
+ tmp[1] = baseOffset{
+ baseLine: 1,
+ addBits: 1,
}
- if err := f.transform(symbolTableX[i]); err != nil {
- panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
+ fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)
+ symbolTableX[tableOffsets] = tmp
+
+ // Fill predefined tables and transform them.
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
+ for i := range fsePredef[:] {
+ f := &fsePredef[i]
+ switch tableIndex(i) {
+ case tableLiteralLengths:
+ // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243
+ f.actualTableLog = 6
+ copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
+ -1, -1, -1, -1})
+ f.symbolLen = 36
+ case tableOffsets:
+ // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281
+ f.actualTableLog = 5
+ copy(f.norm[:], []int16{
+ 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1})
+ f.symbolLen = 29
+ case tableMatchLengths:
+ //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304
+ f.actualTableLog = 6
+ copy(f.norm[:], []int16{
+ 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1,
+ -1, -1, -1, -1, -1})
+ f.symbolLen = 53
+ }
+ if err := f.buildDtable(); err != nil {
+ panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
+ }
+ if err := f.transform(symbolTableX[i]); err != nil {
+ panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
+ }
+ f.preDefined = true
+
+ // Create encoder as well
+ enc := &fsePredefEnc[i]
+ copy(enc.norm[:], f.norm[:])
+ enc.symbolLen = f.symbolLen
+ enc.actualTableLog = f.actualTableLog
+ if err := enc.buildCTable(); err != nil {
+ panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err))
+ }
+ enc.setBits(bitTables[i])
+ enc.preDefined = true
}
- f.preDefined = true
-
- // Create encoder as well
- enc := &fsePredefEnc[i]
- copy(enc.norm[:], f.norm[:])
- enc.symbolLen = f.symbolLen
- enc.actualTableLog = f.actualTableLog
- if err := enc.buildCTable(); err != nil {
- panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err))
- }
- enc.setBits(bitTables[i])
- enc.preDefined = true
- }
+ })
}
diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go
index 819d87f881c..4a752067fc9 100644
--- a/vendor/github.com/klauspost/compress/zstd/hash.go
+++ b/vendor/github.com/klauspost/compress/zstd/hash.go
@@ -64,7 +64,7 @@ func hash6(u uint64, h uint8) uint32 {
return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
}
-// hash6 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
+// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash7(u uint64, h uint8) uint32 {
return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))
diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go
index e8c419bd533..f783e32d251 100644
--- a/vendor/github.com/klauspost/compress/zstd/history.go
+++ b/vendor/github.com/klauspost/compress/zstd/history.go
@@ -17,6 +17,7 @@ type history struct {
windowSize int
maxSize int
error bool
+ dict *dict
}
// reset will reset the history to initial state of a frame.
@@ -36,12 +37,27 @@ func (h *history) reset() {
}
h.decoders = sequenceDecs{}
if h.huffTree != nil {
- huffDecoderPool.Put(h.huffTree)
+ if h.dict == nil || h.dict.litEnc != h.huffTree {
+ huffDecoderPool.Put(h.huffTree)
+ }
}
h.huffTree = nil
+ h.dict = nil
//printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
}
+func (h *history) setDict(dict *dict) {
+ if dict == nil {
+ return
+ }
+ h.dict = dict
+ h.decoders.litLengths = dict.llDec
+ h.decoders.offsets = dict.ofDec
+ h.decoders.matchLengths = dict.mlDec
+ h.recentOffsets = dict.offsets
+ h.huffTree = dict.litEnc
+}
+
// append bytes to history.
// This function will make sure there is space for it,
// if the buffer has been allocated with enough extra space.
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
index d580e32aed4..2c9c5357a14 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
@@ -179,13 +179,13 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40
MOVQ ·prime2v(SB), R14
// Load slice.
- MOVQ b_base+8(FP), CX
- MOVQ b_len+16(FP), DX
+ MOVQ arg1_base+8(FP), CX
+ MOVQ arg1_len+16(FP), DX
LEAQ (CX)(DX*1), BX
SUBQ $32, BX
// Load vN from d.
- MOVQ d+0(FP), AX
+ MOVQ arg+0(FP), AX
MOVQ 0(AX), R8 // v1
MOVQ 8(AX), R9 // v2
MOVQ 16(AX), R10 // v3
@@ -209,7 +209,7 @@ blockLoop:
MOVQ R11, 24(AX)
// The number of bytes written is CX minus the old base pointer.
- SUBQ b_base+8(FP), CX
+ SUBQ arg1_base+8(FP), CX
MOVQ CX, ret+32(FP)
RET
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
index cef69e35b52..1dd39e63b7e 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -62,8 +62,10 @@ type sequenceDecs struct {
matchLengths sequenceDec
prevOffset [3]int
hist []byte
+ dict []byte
literals []byte
out []byte
+ windowSize int
maxBits uint8
}
@@ -82,88 +84,180 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []
s.hist = hist.b
s.prevOffset = hist.recentOffsets
s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits
+ s.windowSize = hist.windowSize
s.out = out
+ s.dict = nil
+ if hist.dict != nil {
+ s.dict = hist.dict.content
+ }
return nil
}
// decode sequences from the stream with the provided history.
func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
startSize := len(s.out)
+ // Grab full sizes tables, to avoid bounds checks.
+ llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
+ llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
+
for i := seqs - 1; i >= 0; i-- {
if br.overread() {
printf("reading sequence %d, exceeded available data\n", seqs-i)
return io.ErrUnexpectedEOF
}
- var litLen, matchOff, matchLen int
+ var ll, mo, ml int
if br.off > 4+((maxOffsetBits+16+16)>>3) {
- litLen, matchOff, matchLen = s.nextFast(br)
+ // inlined function:
+ // ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
+
+ // Final will not read from stream.
+ var llB, mlB, moB uint8
+ ll, llB = llState.final()
+ ml, mlB = mlState.final()
+ mo, moB = ofState.final()
+
+ // extra bits are stored in reverse order.
+ br.fillFast()
+ mo += br.getBits(moB)
+ if s.maxBits > 32 {
+ br.fillFast()
+ }
+ ml += br.getBits(mlB)
+ ll += br.getBits(llB)
+
+ if moB > 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = mo
+ } else {
+ // mo = s.adjustOffset(mo, ll, moB)
+ // Inlined for rather big speedup
+ if ll == 0 {
+ // There is an exception though, when current sequence's literals_length = 0.
+ // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
+ // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
+ mo++
+ }
+
+ if mo == 0 {
+ mo = s.prevOffset[0]
+ } else {
+ var temp int
+ if mo == 3 {
+ temp = s.prevOffset[0] - 1
+ } else {
+ temp = s.prevOffset[mo]
+ }
+
+ if temp == 0 {
+ // 0 is not valid; input is corrupted; force offset to 1
+ println("temp was 0")
+ temp = 1
+ }
+
+ if mo != 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ }
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = temp
+ mo = temp
+ }
+ }
br.fillFast()
} else {
- litLen, matchOff, matchLen = s.next(br)
+ ll, mo, ml = s.next(br, llState, mlState, ofState)
br.fill()
}
if debugSequences {
- println("Seq", seqs-i-1, "Litlen:", litLen, "matchOff:", matchOff, "(abs) matchLen:", matchLen)
+ println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
}
- if litLen > len(s.literals) {
- return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", litLen, len(s.literals))
+ if ll > len(s.literals) {
+ return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals))
}
- size := litLen + matchLen + len(s.out)
+ size := ll + ml + len(s.out)
if size-startSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size", size)
}
if size > cap(s.out) {
- // Not enough size, will be extremely rarely triggered,
+ // Not enough size, which can happen under high volume block streaming conditions
// but could be if destination slice is too small for sync operations.
- // We add maxBlockSize to the capacity.
- s.out = append(s.out, make([]byte, maxBlockSize)...)
- s.out = s.out[:len(s.out)-maxBlockSize]
- }
- if matchLen > maxMatchLen {
- return fmt.Errorf("match len (%d) bigger than max allowed length", matchLen)
- }
- if matchOff > len(s.out)+len(hist)+litLen {
- return fmt.Errorf("match offset (%d) bigger than current history (%d)", matchOff, len(s.out)+len(hist)+litLen)
+ // over-allocating here can create a large amount of GC pressure so we try to keep
+ // it as contained as possible
+ used := len(s.out) - startSize
+ addBytes := 256 + ll + ml + used>>2
+ // Clamp to max block size.
+ if used+addBytes > maxBlockSize {
+ addBytes = maxBlockSize - used
+ }
+ s.out = append(s.out, make([]byte, addBytes)...)
+ s.out = s.out[:len(s.out)-addBytes]
}
- if matchOff == 0 && matchLen > 0 {
- return fmt.Errorf("zero matchoff and matchlen > 0")
+ if ml > maxMatchLen {
+ return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
}
- s.out = append(s.out, s.literals[:litLen]...)
- s.literals = s.literals[litLen:]
+ // Add literals
+ s.out = append(s.out, s.literals[:ll]...)
+ s.literals = s.literals[ll:]
out := s.out
+ if mo == 0 && ml > 0 {
+ return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
+ }
+
+ if mo > len(s.out)+len(hist) || mo > s.windowSize {
+ if len(s.dict) == 0 {
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist))
+ }
+
+ // we may be in dictionary.
+ dictO := len(s.dict) - (mo - (len(s.out) + len(hist)))
+ if dictO < 0 || dictO >= len(s.dict) {
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist))
+ }
+ end := dictO + ml
+ if end > len(s.dict) {
+ out = append(out, s.dict[dictO:]...)
+ mo -= len(s.dict) - dictO
+ ml -= len(s.dict) - dictO
+ } else {
+ out = append(out, s.dict[dictO:end]...)
+ mo = 0
+ ml = 0
+ }
+ }
+
// Copy from history.
// TODO: Blocks without history could be made to ignore this completely.
- if v := matchOff - len(s.out); v > 0 {
+ if v := mo - len(s.out); v > 0 {
// v is the start position in history from end.
start := len(s.hist) - v
- if matchLen > v {
+ if ml > v {
// Some goes into current block.
// Copy remainder of history
out = append(out, s.hist[start:]...)
- matchOff -= v
- matchLen -= v
+ mo -= v
+ ml -= v
} else {
- out = append(out, s.hist[start:start+matchLen]...)
- matchLen = 0
+ out = append(out, s.hist[start:start+ml]...)
+ ml = 0
}
}
// We must be in current buffer now
- if matchLen > 0 {
- start := len(s.out) - matchOff
- if matchLen <= len(s.out)-start {
+ if ml > 0 {
+ start := len(s.out) - mo
+ if ml <= len(s.out)-start {
// No overlap
- out = append(out, s.out[start:start+matchLen]...)
+ out = append(out, s.out[start:start+ml]...)
} else {
// Overlapping copy
// Extend destination slice and copy one byte at the time.
- out = out[:len(out)+matchLen]
- src := out[start : start+matchLen]
+ out = out[:len(out)+ml]
+ src := out[start : start+ml]
// Destination is the space we just added.
- dst := out[len(out)-matchLen:]
+ dst := out[len(out)-ml:]
dst = dst[:len(src)]
for i := range src {
dst[i] = src[i]
@@ -175,30 +269,25 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
// This is the last sequence, so we shouldn't update state.
break
}
- if true {
- // Manually inlined, ~ 5-20% faster
- // Update all 3 states at once. Approx 20% faster.
- a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
-
- nBits := a.nbBits + b.nbBits + c.nbBits
- if nBits == 0 {
- s.litLengths.state.state = s.litLengths.state.dt[a.newState]
- s.matchLengths.state.state = s.matchLengths.state.dt[b.newState]
- s.offsets.state.state = s.offsets.state.dt[c.newState]
- } else {
- bits := br.getBitsFast(nBits)
- lowBits := uint16(bits >> ((c.nbBits + b.nbBits) & 31))
- s.litLengths.state.state = s.litLengths.state.dt[a.newState+lowBits]
- lowBits = uint16(bits >> (c.nbBits & 31))
- lowBits &= bitMask[b.nbBits&15]
- s.matchLengths.state.state = s.matchLengths.state.dt[b.newState+lowBits]
-
- lowBits = uint16(bits) & bitMask[c.nbBits&15]
- s.offsets.state.state = s.offsets.state.dt[c.newState+lowBits]
- }
+ // Manually inlined, ~ 5-20% faster
+ // Update all 3 states at once. Approx 20% faster.
+ nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
+ if nBits == 0 {
+ llState = llTable[llState.newState()&maxTableMask]
+ mlState = mlTable[mlState.newState()&maxTableMask]
+ ofState = ofTable[ofState.newState()&maxTableMask]
} else {
- s.updateAlt(br)
+ bits := br.getBitsFast(nBits)
+ lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
+ llState = llTable[(llState.newState()+lowBits)&maxTableMask]
+
+ lowBits = uint16(bits >> (ofState.nbBits() & 31))
+ lowBits &= bitMask[mlState.nbBits()&15]
+ mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
+
+ lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
+ ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
}
}
@@ -230,55 +319,49 @@ func (s *sequenceDecs) updateAlt(br *bitReader) {
// Update all 3 states at once. Approx 20% faster.
a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
- nBits := a.nbBits + b.nbBits + c.nbBits
+ nBits := a.nbBits() + b.nbBits() + c.nbBits()
if nBits == 0 {
- s.litLengths.state.state = s.litLengths.state.dt[a.newState]
- s.matchLengths.state.state = s.matchLengths.state.dt[b.newState]
- s.offsets.state.state = s.offsets.state.dt[c.newState]
+ s.litLengths.state.state = s.litLengths.state.dt[a.newState()]
+ s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()]
+ s.offsets.state.state = s.offsets.state.dt[c.newState()]
return
}
bits := br.getBitsFast(nBits)
- lowBits := uint16(bits >> ((c.nbBits + b.nbBits) & 31))
- s.litLengths.state.state = s.litLengths.state.dt[a.newState+lowBits]
+ lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31))
+ s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits]
- lowBits = uint16(bits >> (c.nbBits & 31))
- lowBits &= bitMask[b.nbBits&15]
- s.matchLengths.state.state = s.matchLengths.state.dt[b.newState+lowBits]
+ lowBits = uint16(bits >> (c.nbBits() & 31))
+ lowBits &= bitMask[b.nbBits()&15]
+ s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits]
- lowBits = uint16(bits) & bitMask[c.nbBits&15]
- s.offsets.state.state = s.offsets.state.dt[c.newState+lowBits]
+ lowBits = uint16(bits) & bitMask[c.nbBits()&15]
+ s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits]
}
// nextFast will return new states when there are at least 4 unused bytes left on the stream when done.
-func (s *sequenceDecs) nextFast(br *bitReader) (ll, mo, ml int) {
+func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
// Final will not read from stream.
- ll, llB := s.litLengths.state.final()
- ml, mlB := s.matchLengths.state.final()
- mo, moB := s.offsets.state.final()
+ ll, llB := llState.final()
+ ml, mlB := mlState.final()
+ mo, moB := ofState.final()
// extra bits are stored in reverse order.
br.fillFast()
- if s.maxBits <= 32 {
- mo += br.getBits(moB)
- ml += br.getBits(mlB)
- ll += br.getBits(llB)
- } else {
- mo += br.getBits(moB)
+ mo += br.getBits(moB)
+ if s.maxBits > 32 {
br.fillFast()
- // matchlength+literal length, max 32 bits
- ml += br.getBits(mlB)
- ll += br.getBits(llB)
}
+ ml += br.getBits(mlB)
+ ll += br.getBits(llB)
- // mo = s.adjustOffset(mo, ll, moB)
- // Inlined for rather big speedup
if moB > 1 {
s.prevOffset[2] = s.prevOffset[1]
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = mo
return
}
-
+ // mo = s.adjustOffset(mo, ll, moB)
+ // Inlined for rather big speedup
if ll == 0 {
// There is an exception though, when current sequence's literals_length = 0.
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
@@ -312,11 +395,11 @@ func (s *sequenceDecs) nextFast(br *bitReader) (ll, mo, ml int) {
return
}
-func (s *sequenceDecs) next(br *bitReader) (ll, mo, ml int) {
+func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
// Final will not read from stream.
- ll, llB := s.litLengths.state.final()
- ml, mlB := s.matchLengths.state.final()
- mo, moB := s.offsets.state.final()
+ ll, llB := llState.final()
+ ml, mlB := mlState.final()
+ mo, moB := ofState.final()
// extra bits are stored in reverse order.
br.fill()
diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go
index e9e518570e4..c95fe5111c1 100644
--- a/vendor/github.com/klauspost/compress/zstd/snappy.go
+++ b/vendor/github.com/klauspost/compress/zstd/snappy.go
@@ -80,6 +80,7 @@ type SnappyConverter struct {
// If any error is detected on the Snappy stream it is returned.
// The number of bytes written is returned.
func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
+ initPredefined()
r.err = nil
r.r = in
if r.block == nil {
@@ -110,7 +111,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
// Add empty last block
r.block.reset(nil)
r.block.last = true
- err := r.block.encodeLits()
+ err := r.block.encodeLits(r.block.literals, false)
if err != nil {
return written, err
}
@@ -177,7 +178,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
r.err = ErrSnappyCorrupt
return written, r.err
}
- err = r.block.encode()
+ err = r.block.encode(nil, false, false)
switch err {
case errIncompressible:
r.block.popOffsets()
@@ -187,7 +188,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
println("snappy.Decode:", err)
return written, err
}
- err = r.block.encodeLits()
+ err = r.block.encodeLits(r.block.literals, false)
if err != nil {
return written, err
}
@@ -234,7 +235,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
r.err = ErrSnappyCorrupt
return written, r.err
}
- err := r.block.encodeLits()
+ err := r.block.encodeLits(r.block.literals, false)
if err != nil {
return written, err
}
@@ -416,7 +417,7 @@ var crcTable = crc32.MakeTable(crc32.Castagnoli)
// https://github.com/google/snappy/blob/master/framing_format.txt
func snappyCRC(b []byte) uint32 {
c := crc32.Update(0, crcTable, b)
- return uint32(c>>15|c<<17) + 0xa282ead8
+ return c>>15 | c<<17 + 0xa282ead8
}
// snappyDecodedLen returns the length of the decoded block and the number of bytes
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
index b975954c1cd..9056beef271 100644
--- a/vendor/github.com/klauspost/compress/zstd/zstd.go
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -4,20 +4,34 @@
package zstd
import (
+ "bytes"
"errors"
"log"
+ "math"
"math/bits"
)
+// enable debug printing
const debug = false
+
+// Enable extra assertions.
+const debugAsserts = debug || false
+
+// print sequence details
const debugSequences = false
+// print detailed matching information
+const debugMatches = false
+
// force encoder to use predefined tables.
const forcePreDef = false
// zstdMinMatch is the minimum zstd match length.
const zstdMinMatch = 3
+// Reset the buffer offset when reaching this.
+const bufferReset = math.MaxInt32 - MaxWindowSize
+
var (
// ErrReservedBlockType is returned when a reserved block type is found.
// Typically this indicates wrong or corrupted input.
@@ -60,6 +74,10 @@ var (
// ErrDecoderClosed will be returned if the Decoder was used after
// Close has been called.
ErrDecoderClosed = errors.New("decoder used after Close")
+
+ // ErrDecoderNilInput is returned when a nil Reader was provided
+ // and an operation other than Reset/DecodeAll/Close was attempted.
+ ErrDecoderNilInput = errors.New("nil input provided as reader")
)
func println(a ...interface{}) {
@@ -74,6 +92,17 @@ func printf(format string, a ...interface{}) {
}
}
+// matchLenFast does matching, but will not match the last up to 7 bytes.
+func matchLenFast(a, b []byte) int {
+ endI := len(a) & (math.MaxInt32 - 7)
+ for i := 0; i < endI; i += 8 {
+ if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+ return i + bits.TrailingZeros64(diff)>>3
+ }
+ }
+ return endI
+}
+
// matchLen returns the maximum length.
// a must be the shortest of the two.
// The function also returns whether all bytes matched.
@@ -84,33 +113,18 @@ func matchLen(a, b []byte) int {
return i + (bits.TrailingZeros64(diff) >> 3)
}
}
+
checked := (len(a) >> 3) << 3
a = a[checked:]
b = b[checked:]
- // TODO: We could do a 4 check.
for i := range a {
if a[i] != b[i] {
- return int(i) + checked
+ return i + checked
}
}
return len(a) + checked
}
-// matchLen returns a match length in src between index s and t
-func matchLenIn(src []byte, s, t int32) int32 {
- s1 := len(src)
- b := src[t:]
- a := src[s:s1]
- b = b[:len(a)]
- // Extend the match to be as long as possible.
- for i := range a {
- if a[i] != b[i] {
- return int32(i)
- }
- }
- return int32(len(a))
-}
-
func load3232(b []byte, i int32) uint32 {
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
b = b[i:]
@@ -133,3 +147,10 @@ func load64(b []byte, i int) uint64 {
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
+
+type byter interface {
+ Bytes() []byte
+ Len() int
+}
+
+var _ byter = &bytes.Buffer{}
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
deleted file mode 100644
index 14127cd831e..00000000000
--- a/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
+++ /dev/null
@@ -1,9 +0,0 @@
-(The MIT License)
-
-Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
deleted file mode 100644
index 09a4a35c9bb..00000000000
--- a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
-# Windows Terminal Sequences
-
-This library allow for enabling Windows terminal color support for Go.
-
-See [Console Virtual Terminal Sequences](https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences) for details.
-
-## Usage
-
-```go
-import (
- "syscall"
-
- sequences "github.com/konsorten/go-windows-terminal-sequences"
-)
-
-func main() {
- sequences.EnableVirtualTerminalProcessing(syscall.Stdout, true)
-}
-
-```
-
-## Authors
-
-The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de).
-
-We thank all the authors who provided code to this library:
-
-* Felix Kollmann
-* Nicolas Perraut
-* @dirty49374
-
-## License
-
-(The MIT License)
-
-Copyright (c) 2018 marvin + konsorten GmbH (open-source@konsorten.de)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
deleted file mode 100644
index 716c6131256..00000000000
--- a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
+++ /dev/null
@@ -1 +0,0 @@
-module github.com/konsorten/go-windows-terminal-sequences
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
deleted file mode 100644
index 57f530ae83f..00000000000
--- a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// +build windows
-
-package sequences
-
-import (
- "syscall"
-)
-
-var (
- kernel32Dll *syscall.LazyDLL = syscall.NewLazyDLL("Kernel32.dll")
- setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode")
-)
-
-func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error {
- const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4
-
- var mode uint32
- err := syscall.GetConsoleMode(syscall.Stdout, &mode)
- if err != nil {
- return err
- }
-
- if enable {
- mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING
- } else {
- mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING
- }
-
- ret, _, err := setConsoleMode.Call(uintptr(stream), uintptr(mode))
- if ret == 0 {
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go
deleted file mode 100644
index df61a6f2f6f..00000000000
--- a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build linux darwin
-
-package sequences
-
-import (
- "fmt"
-)
-
-func EnableVirtualTerminalProcessing(stream uintptr, enable bool) error {
- return fmt.Errorf("windows only package")
-}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go
index a42e9d65ad7..b5f5e261329 100644
--- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go
+++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go
@@ -401,6 +401,7 @@ func (r *Lexer) scanToken() {
// consume resets the current token to allow scanning the next one.
func (r *Lexer) consume() {
r.token.kind = tokenUndef
+ r.token.byteValueCloned = false
r.token.delimValue = 0
}
@@ -528,6 +529,7 @@ func (r *Lexer) Skip() {
func (r *Lexer) SkipRecursive() {
r.scanToken()
var start, end byte
+ startPos := r.start
switch r.token.delimValue {
case '{':
@@ -553,6 +555,14 @@ func (r *Lexer) SkipRecursive() {
level--
if level == 0 {
r.pos += i + 1
+ if !json.Valid(r.Data[startPos:r.pos]) {
+ r.pos = len(r.Data)
+ r.fatalError = &LexerError{
+ Reason: "skipped array/object json value is invalid",
+ Offset: r.pos,
+ Data: string(r.Data[r.pos:]),
+ }
+ }
return
}
case c == '\\' && inQuotes:
@@ -702,6 +712,10 @@ func (r *Lexer) Bytes() []byte {
r.errInvalidToken("string")
return nil
}
+ if err := r.unescapeStringToken(); err != nil {
+ r.errInvalidToken("string")
+ return nil
+ }
ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue)))
n, err := base64.StdEncoding.Decode(ret, r.token.byteValue)
if err != nil {
diff --git a/vendor/github.com/oxtoacart/bpool/bufferpool.go b/vendor/github.com/oxtoacart/bpool/bufferpool.go
index 8c8ac64db27..249a078ed3f 100644
--- a/vendor/github.com/oxtoacart/bpool/bufferpool.go
+++ b/vendor/github.com/oxtoacart/bpool/bufferpool.go
@@ -38,3 +38,8 @@ func (bp *BufferPool) Put(b *bytes.Buffer) {
default: // Discard the buffer if the pool is full.
}
}
+
+// NumPooled returns the number of items currently pooled.
+func (bp *BufferPool) NumPooled() int {
+ return len(bp.c)
+}
diff --git a/vendor/github.com/oxtoacart/bpool/bytepool.go b/vendor/github.com/oxtoacart/bpool/bytepool.go
index ef3898afd60..a5ebe0515c6 100644
--- a/vendor/github.com/oxtoacart/bpool/bytepool.go
+++ b/vendor/github.com/oxtoacart/bpool/bytepool.go
@@ -5,6 +5,7 @@ package bpool
type BytePool struct {
c chan []byte
w int
+ h int
}
// NewBytePool creates a new BytePool bounded to the given maxSize, with new
@@ -31,14 +32,24 @@ func (bp *BytePool) Get() (b []byte) {
// Put returns the given Buffer to the BytePool.
func (bp *BytePool) Put(b []byte) {
+ if cap(b) < bp.w {
+ // someone tried to put back a too small buffer, discard it
+ return
+ }
+
select {
- case bp.c <- b:
+ case bp.c <- b[:bp.w]:
// buffer went back into pool
default:
// buffer didn't go back into pool, just discard
}
}
+// NumPooled returns the number of items currently pooled.
+func (bp *BytePool) NumPooled() int {
+ return len(bp.c)
+}
+
// Width returns the width of the byte arrays in this pool.
func (bp *BytePool) Width() (n int) {
return bp.w
diff --git a/vendor/github.com/oxtoacart/bpool/byteslice.go b/vendor/github.com/oxtoacart/bpool/byteslice.go
new file mode 100644
index 00000000000..7b94b390183
--- /dev/null
+++ b/vendor/github.com/oxtoacart/bpool/byteslice.go
@@ -0,0 +1,81 @@
+package bpool
+
+// WrapByteSlice wraps a []byte as a ByteSlice
+func WrapByteSlice(full []byte, headerLength int) ByteSlice {
+ return ByteSlice{
+ full: full,
+ current: full[headerLength:],
+ head: headerLength,
+ end: len(full),
+ }
+}
+
+// ByteSlice provides a wrapper around []byte with some added convenience
+type ByteSlice struct {
+ full []byte
+ current []byte
+ head int
+ end int
+}
+
+// ResliceTo reslices the end of the current slice.
+func (b ByteSlice) ResliceTo(end int) ByteSlice {
+ return ByteSlice{
+ full: b.full,
+ current: b.current[:end],
+ head: b.head,
+ end: b.head + end,
+ }
+}
+
+// Bytes returns the current slice
+func (b ByteSlice) Bytes() []byte {
+ return b.current
+}
+
+// BytesWithHeader returns the current slice preceded by the header
+func (b ByteSlice) BytesWithHeader() []byte {
+ return b.full[:b.end]
+}
+
+// Full returns the full original buffer underlying the ByteSlice
+func (b ByteSlice) Full() []byte {
+ return b.full
+}
+
+// ByteSlicePool is a bool of byte slices
+type ByteSlicePool interface {
+ // Get gets a byte slice from the pool
+ GetSlice() ByteSlice
+ // Put returns a byte slice to the pool
+ PutSlice(ByteSlice)
+ // NumPooled returns the number of currently pooled items
+ NumPooled() int
+}
+
+// NewByteSlicePool creates a new ByteSlicePool bounded to the
+// given maxSize, with new byte arrays sized based on width
+func NewByteSlicePool(maxSize int, width int) ByteSlicePool {
+ return NewHeaderPreservingByteSlicePool(maxSize, width, 0)
+}
+
+// NewHeaderPreservingByteSlicePool creates a new ByteSlicePool bounded to the
+// given maxSize, with new byte arrays sized based on width and headerLength
+// preserved at the beginning of the slice.
+func NewHeaderPreservingByteSlicePool(maxSize int, width int, headerLength int) ByteSlicePool {
+ return &BytePool{
+ c: make(chan []byte, maxSize),
+ w: width + headerLength,
+ h: headerLength,
+ }
+}
+
+// GetSlice implements the method from interface ByteSlicePool
+func (bp *BytePool) GetSlice() ByteSlice {
+ return WrapByteSlice(bp.Get(), bp.h)
+}
+
+// PutSlice implements the method from interface ByteSlicePool
+func (bp *BytePool) PutSlice(b ByteSlice) {
+ bp.Put(b.Full())
+}
diff --git a/vendor/github.com/oxtoacart/bpool/go.mod b/vendor/github.com/oxtoacart/bpool/go.mod
new file mode 100644
index 00000000000..9bb31472344
--- /dev/null
+++ b/vendor/github.com/oxtoacart/bpool/go.mod
@@ -0,0 +1,5 @@
+module github.com/oxtoacart/bpool
+
+go 1.12
+
+require github.com/stretchr/testify v1.3.0
diff --git a/vendor/github.com/oxtoacart/bpool/go.sum b/vendor/github.com/oxtoacart/bpool/go.sum
new file mode 100644
index 00000000000..4347755afe8
--- /dev/null
+++ b/vendor/github.com/oxtoacart/bpool/go.sum
@@ -0,0 +1,7 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore
index 6b7d7d1e8b9..1fb13abebe7 100644
--- a/vendor/github.com/sirupsen/logrus/.gitignore
+++ b/vendor/github.com/sirupsen/logrus/.gitignore
@@ -1,2 +1,4 @@
logrus
vendor
+
+.idea/
diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
index 584026d67ca..7567f612898 100644
--- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md
+++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
@@ -1,3 +1,39 @@
+# 1.8.1
+Code quality:
+ * move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer
+ * improve timestamp format documentation
+
+Fixes:
+ * fix race condition on logger hooks
+
+
+# 1.8.0
+
+Correct versioning number replacing v1.7.1.
+
+# 1.7.1
+
+Beware this release has introduced a new public API and its semver is therefore incorrect.
+
+Code quality:
+ * use go 1.15 in travis
+ * use magefile as task runner
+
+Fixes:
+ * small fixes about new go 1.13 error formatting system
+ * Fix for long time race condiction with mutating data hooks
+
+Features:
+ * build support for zos
+
+# 1.7.0
+Fixes:
+ * the dependency toward a windows terminal library has been removed
+
+Features:
+ * a new buffer pool management API has been added
+ * a set of `Fn()` functions have been added
+
# 1.6.0
Fixes:
* end of line cleanup
diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md
index 5796706dbfa..5152b6aa406 100644
--- a/vendor/github.com/sirupsen/logrus/README.md
+++ b/vendor/github.com/sirupsen/logrus/README.md
@@ -402,7 +402,7 @@ func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
// source of the official loggers.
serialized, err := json.Marshal(entry.Data)
if err != nil {
- return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err)
}
return append(serialized, '\n'), nil
}
diff --git a/vendor/github.com/sirupsen/logrus/buffer_pool.go b/vendor/github.com/sirupsen/logrus/buffer_pool.go
new file mode 100644
index 00000000000..4545dec07d8
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/buffer_pool.go
@@ -0,0 +1,52 @@
+package logrus
+
+import (
+ "bytes"
+ "sync"
+)
+
+var (
+ bufferPool BufferPool
+)
+
+type BufferPool interface {
+ Put(*bytes.Buffer)
+ Get() *bytes.Buffer
+}
+
+type defaultPool struct {
+ pool *sync.Pool
+}
+
+func (p *defaultPool) Put(buf *bytes.Buffer) {
+ p.pool.Put(buf)
+}
+
+func (p *defaultPool) Get() *bytes.Buffer {
+ return p.pool.Get().(*bytes.Buffer)
+}
+
+func getBuffer() *bytes.Buffer {
+ return bufferPool.Get()
+}
+
+func putBuffer(buf *bytes.Buffer) {
+ buf.Reset()
+ bufferPool.Put(buf)
+}
+
+// SetBufferPool allows to replace the default logrus buffer pool
+// to better meets the specific needs of an application.
+func SetBufferPool(bp BufferPool) {
+ bufferPool = bp
+}
+
+func init() {
+ SetBufferPool(&defaultPool{
+ pool: &sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+ },
+ })
+}
diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go
index f6e062a3466..07a1e5fa724 100644
--- a/vendor/github.com/sirupsen/logrus/entry.go
+++ b/vendor/github.com/sirupsen/logrus/entry.go
@@ -13,7 +13,6 @@ import (
)
var (
- bufferPool *sync.Pool
// qualified package name, cached at first use
logrusPackage string
@@ -31,12 +30,6 @@ const (
)
func init() {
- bufferPool = &sync.Pool{
- New: func() interface{} {
- return new(bytes.Buffer)
- },
- }
-
// start at the bottom of the stack before the package-name cache is primed
minimumCallerDepth = 1
}
@@ -85,6 +78,14 @@ func NewEntry(logger *Logger) *Entry {
}
}
+func (entry *Entry) Dup() *Entry {
+ data := make(Fields, len(entry.Data))
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err}
+}
+
// Returns the bytes representation of this entry from the formatter.
func (entry *Entry) Bytes() ([]byte, error) {
return entry.Logger.Formatter.Format(entry)
@@ -130,11 +131,9 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
for k, v := range fields {
isErrField := false
if t := reflect.TypeOf(v); t != nil {
- switch t.Kind() {
- case reflect.Func:
+ switch {
+ case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func:
isErrField = true
- case reflect.Ptr:
- isErrField = t.Elem().Kind() == reflect.Func
}
}
if isErrField {
@@ -219,65 +218,72 @@ func (entry Entry) HasCaller() (has bool) {
entry.Caller != nil
}
-// This function is not declared with a pointer value because otherwise
-// race conditions will occur when using multiple goroutines
-func (entry Entry) log(level Level, msg string) {
+func (entry *Entry) log(level Level, msg string) {
var buffer *bytes.Buffer
- // Default to now, but allow users to override if they want.
- //
- // We don't have to worry about polluting future calls to Entry#log()
- // with this assignment because this function is declared with a
- // non-pointer receiver.
- if entry.Time.IsZero() {
- entry.Time = time.Now()
+ newEntry := entry.Dup()
+
+ if newEntry.Time.IsZero() {
+ newEntry.Time = time.Now()
}
- entry.Level = level
- entry.Message = msg
- entry.Logger.mu.Lock()
- if entry.Logger.ReportCaller {
- entry.Caller = getCaller()
+ newEntry.Level = level
+ newEntry.Message = msg
+
+ newEntry.Logger.mu.Lock()
+ reportCaller := newEntry.Logger.ReportCaller
+ newEntry.Logger.mu.Unlock()
+
+ if reportCaller {
+ newEntry.Caller = getCaller()
}
- entry.Logger.mu.Unlock()
- entry.fireHooks()
+ newEntry.fireHooks()
- buffer = bufferPool.Get().(*bytes.Buffer)
+ buffer = getBuffer()
+ defer func() {
+ newEntry.Buffer = nil
+ putBuffer(buffer)
+ }()
buffer.Reset()
- defer bufferPool.Put(buffer)
- entry.Buffer = buffer
+ newEntry.Buffer = buffer
- entry.write()
+ newEntry.write()
- entry.Buffer = nil
+ newEntry.Buffer = nil
// To avoid Entry#log() returning a value that only would make sense for
// panic() to use in Entry#Panic(), we avoid the allocation by checking
// directly here.
if level <= PanicLevel {
- panic(&entry)
+ panic(newEntry)
}
}
func (entry *Entry) fireHooks() {
+ var tmpHooks LevelHooks
entry.Logger.mu.Lock()
- defer entry.Logger.mu.Unlock()
- err := entry.Logger.Hooks.Fire(entry.Level, entry)
+ tmpHooks = make(LevelHooks, len(entry.Logger.Hooks))
+ for k, v := range entry.Logger.Hooks {
+ tmpHooks[k] = v
+ }
+ entry.Logger.mu.Unlock()
+
+ err := tmpHooks.Fire(entry.Level, entry)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
}
}
func (entry *Entry) write() {
- entry.Logger.mu.Lock()
- defer entry.Logger.mu.Unlock()
serialized, err := entry.Logger.Formatter.Format(entry)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
return
}
- if _, err = entry.Logger.Out.Write(serialized); err != nil {
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
+ if _, err := entry.Logger.Out.Write(serialized); err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
}
@@ -323,7 +329,6 @@ func (entry *Entry) Fatal(args ...interface{}) {
func (entry *Entry) Panic(args ...interface{}) {
entry.Log(PanicLevel, args...)
- panic(fmt.Sprint(args...))
}
// Entry Printf family functions
diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go
index 42b04f6c809..017c30ce678 100644
--- a/vendor/github.com/sirupsen/logrus/exported.go
+++ b/vendor/github.com/sirupsen/logrus/exported.go
@@ -134,6 +134,51 @@ func Fatal(args ...interface{}) {
std.Fatal(args...)
}
+// TraceFn logs a message from a func at level Trace on the standard logger.
+func TraceFn(fn LogFunction) {
+ std.TraceFn(fn)
+}
+
+// DebugFn logs a message from a func at level Debug on the standard logger.
+func DebugFn(fn LogFunction) {
+ std.DebugFn(fn)
+}
+
+// PrintFn logs a message from a func at level Info on the standard logger.
+func PrintFn(fn LogFunction) {
+ std.PrintFn(fn)
+}
+
+// InfoFn logs a message from a func at level Info on the standard logger.
+func InfoFn(fn LogFunction) {
+ std.InfoFn(fn)
+}
+
+// WarnFn logs a message from a func at level Warn on the standard logger.
+func WarnFn(fn LogFunction) {
+ std.WarnFn(fn)
+}
+
+// WarningFn logs a message from a func at level Warn on the standard logger.
+func WarningFn(fn LogFunction) {
+ std.WarningFn(fn)
+}
+
+// ErrorFn logs a message from a func at level Error on the standard logger.
+func ErrorFn(fn LogFunction) {
+ std.ErrorFn(fn)
+}
+
+// PanicFn logs a message from a func at level Panic on the standard logger.
+func PanicFn(fn LogFunction) {
+ std.PanicFn(fn)
+}
+
+// FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1.
+func FatalFn(fn LogFunction) {
+ std.FatalFn(fn)
+}
+
// Tracef logs a message at level Trace on the standard logger.
func Tracef(format string, args ...interface{}) {
std.Tracef(format, args...)
diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod
index d41329679f8..b3919d5eabf 100644
--- a/vendor/github.com/sirupsen/logrus/go.mod
+++ b/vendor/github.com/sirupsen/logrus/go.mod
@@ -2,10 +2,9 @@ module github.com/sirupsen/logrus
require (
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/konsorten/go-windows-terminal-sequences v1.0.3
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/stretchr/testify v1.2.2
- golang.org/x/sys v0.0.0-20190422165155-953cdadca894
+ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037
)
go 1.13
diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum
index 49c690f2383..694c18b8454 100644
--- a/vendor/github.com/sirupsen/logrus/go.sum
+++ b/vendor/github.com/sirupsen/logrus/go.sum
@@ -1,12 +1,8 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go
index ba7f237112b..c96dc5636bf 100644
--- a/vendor/github.com/sirupsen/logrus/json_formatter.go
+++ b/vendor/github.com/sirupsen/logrus/json_formatter.go
@@ -23,6 +23,9 @@ func (f FieldMap) resolve(key fieldKey) string {
// JSONFormatter formats logs into parsable json
type JSONFormatter struct {
// TimestampFormat sets the format used for marshaling timestamps.
+ // The format to use is the same than for time.Format or time.Parse from the standard
+ // library.
+ // The standard Library already provides a set of predefined format.
TimestampFormat string
// DisableTimestamp allows disabling automatic timestamps in output
@@ -118,7 +121,7 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
encoder.SetIndent("", " ")
}
if err := encoder.Encode(data); err != nil {
- return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err)
+ return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err)
}
return b.Bytes(), nil
diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go
index 6fdda748e4d..337704457a2 100644
--- a/vendor/github.com/sirupsen/logrus/logger.go
+++ b/vendor/github.com/sirupsen/logrus/logger.go
@@ -9,6 +9,11 @@ import (
"time"
)
+// LogFunction For big messages, it can be more efficient to pass a function
+// and only call it if the log level is actually enables rather than
+// generating the log message and then checking if the level is enabled
+type LogFunction func() []interface{}
+
type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
// file, or leave it default which is `os.Stderr`. You can also set this to
@@ -70,7 +75,7 @@ func (mw *MutexWrap) Disable() {
//
// var log = &logrus.Logger{
// Out: os.Stderr,
-// Formatter: new(logrus.JSONFormatter),
+// Formatter: new(logrus.TextFormatter),
// Hooks: make(logrus.LevelHooks),
// Level: logrus.DebugLevel,
// }
@@ -195,6 +200,14 @@ func (logger *Logger) Log(level Level, args ...interface{}) {
}
}
+func (logger *Logger) LogFn(level Level, fn LogFunction) {
+ if logger.IsLevelEnabled(level) {
+ entry := logger.newEntry()
+ entry.Log(level, fn()...)
+ logger.releaseEntry(entry)
+ }
+}
+
func (logger *Logger) Trace(args ...interface{}) {
logger.Log(TraceLevel, args...)
}
@@ -234,6 +247,45 @@ func (logger *Logger) Panic(args ...interface{}) {
logger.Log(PanicLevel, args...)
}
+func (logger *Logger) TraceFn(fn LogFunction) {
+ logger.LogFn(TraceLevel, fn)
+}
+
+func (logger *Logger) DebugFn(fn LogFunction) {
+ logger.LogFn(DebugLevel, fn)
+}
+
+func (logger *Logger) InfoFn(fn LogFunction) {
+ logger.LogFn(InfoLevel, fn)
+}
+
+func (logger *Logger) PrintFn(fn LogFunction) {
+ entry := logger.newEntry()
+ entry.Print(fn()...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) WarnFn(fn LogFunction) {
+ logger.LogFn(WarnLevel, fn)
+}
+
+func (logger *Logger) WarningFn(fn LogFunction) {
+ logger.WarnFn(fn)
+}
+
+func (logger *Logger) ErrorFn(fn LogFunction) {
+ logger.LogFn(ErrorLevel, fn)
+}
+
+func (logger *Logger) FatalFn(fn LogFunction) {
+ logger.LogFn(FatalLevel, fn)
+ logger.Exit(1)
+}
+
+func (logger *Logger) PanicFn(fn LogFunction) {
+ logger.LogFn(PanicLevel, fn)
+}
+
func (logger *Logger) Logln(level Level, args ...interface{}) {
if logger.IsLevelEnabled(level) {
entry := logger.newEntry()
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
index cc4fe6e3177..04748b8515f 100644
--- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
@@ -1,4 +1,4 @@
-// +build linux aix
+// +build linux aix zos
// +build !js
package logrus
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
index 572889db216..2879eb50ea6 100644
--- a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
@@ -5,30 +5,23 @@ package logrus
import (
"io"
"os"
- "syscall"
- sequences "github.com/konsorten/go-windows-terminal-sequences"
+ "golang.org/x/sys/windows"
)
-func initTerminal(w io.Writer) {
- switch v := w.(type) {
- case *os.File:
- sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true)
- }
-}
-
func checkIfTerminal(w io.Writer) bool {
- var ret bool
switch v := w.(type) {
case *os.File:
+ handle := windows.Handle(v.Fd())
var mode uint32
- err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode)
- ret = (err == nil)
- default:
- ret = false
- }
- if ret {
- initTerminal(w)
+ if err := windows.GetConsoleMode(handle, &mode); err != nil {
+ return false
+ }
+ mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
+ if err := windows.SetConsoleMode(handle, mode); err != nil {
+ return false
+ }
+ return true
}
- return ret
+ return false
}
diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go
index 3c28b54caba..be2c6efe5ed 100644
--- a/vendor/github.com/sirupsen/logrus/text_formatter.go
+++ b/vendor/github.com/sirupsen/logrus/text_formatter.go
@@ -53,7 +53,10 @@ type TextFormatter struct {
// the time passed since beginning of execution.
FullTimestamp bool
- // TimestampFormat to use for display when a full timestamp is printed
+ // TimestampFormat to use for display when a full timestamp is printed.
+ // The format to use is the same than for time.Format or time.Parse from the standard
+ // library.
+ // The standard Library already provides a set of predefined format.
TimestampFormat string
// The fields are sorted by default for a consistent output. For applications
@@ -235,6 +238,8 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
levelColor = yellow
case ErrorLevel, FatalLevel, PanicLevel:
levelColor = red
+ case InfoLevel:
+ levelColor = blue
default:
levelColor = blue
}
diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE
index f38ec5956b6..4b0421cf9ee 100644
--- a/vendor/github.com/stretchr/testify/LICENSE
+++ b/vendor/github.com/stretchr/testify/LICENSE
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell
+Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
new file mode 100644
index 00000000000..41649d26792
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
@@ -0,0 +1,394 @@
+package assert
+
+import (
+ "fmt"
+ "reflect"
+)
+
+type CompareType int
+
+const (
+ compareLess CompareType = iota - 1
+ compareEqual
+ compareGreater
+)
+
+var (
+ intType = reflect.TypeOf(int(1))
+ int8Type = reflect.TypeOf(int8(1))
+ int16Type = reflect.TypeOf(int16(1))
+ int32Type = reflect.TypeOf(int32(1))
+ int64Type = reflect.TypeOf(int64(1))
+
+ uintType = reflect.TypeOf(uint(1))
+ uint8Type = reflect.TypeOf(uint8(1))
+ uint16Type = reflect.TypeOf(uint16(1))
+ uint32Type = reflect.TypeOf(uint32(1))
+ uint64Type = reflect.TypeOf(uint64(1))
+
+ float32Type = reflect.TypeOf(float32(1))
+ float64Type = reflect.TypeOf(float64(1))
+
+ stringType = reflect.TypeOf("")
+)
+
+func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
+ obj1Value := reflect.ValueOf(obj1)
+ obj2Value := reflect.ValueOf(obj2)
+
+ // throughout this switch we try and avoid calling .Convert() if possible,
+ // as this has a pretty big performance impact
+ switch kind {
+ case reflect.Int:
+ {
+ intobj1, ok := obj1.(int)
+ if !ok {
+ intobj1 = obj1Value.Convert(intType).Interface().(int)
+ }
+ intobj2, ok := obj2.(int)
+ if !ok {
+ intobj2 = obj2Value.Convert(intType).Interface().(int)
+ }
+ if intobj1 > intobj2 {
+ return compareGreater, true
+ }
+ if intobj1 == intobj2 {
+ return compareEqual, true
+ }
+ if intobj1 < intobj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Int8:
+ {
+ int8obj1, ok := obj1.(int8)
+ if !ok {
+ int8obj1 = obj1Value.Convert(int8Type).Interface().(int8)
+ }
+ int8obj2, ok := obj2.(int8)
+ if !ok {
+ int8obj2 = obj2Value.Convert(int8Type).Interface().(int8)
+ }
+ if int8obj1 > int8obj2 {
+ return compareGreater, true
+ }
+ if int8obj1 == int8obj2 {
+ return compareEqual, true
+ }
+ if int8obj1 < int8obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Int16:
+ {
+ int16obj1, ok := obj1.(int16)
+ if !ok {
+ int16obj1 = obj1Value.Convert(int16Type).Interface().(int16)
+ }
+ int16obj2, ok := obj2.(int16)
+ if !ok {
+ int16obj2 = obj2Value.Convert(int16Type).Interface().(int16)
+ }
+ if int16obj1 > int16obj2 {
+ return compareGreater, true
+ }
+ if int16obj1 == int16obj2 {
+ return compareEqual, true
+ }
+ if int16obj1 < int16obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Int32:
+ {
+ int32obj1, ok := obj1.(int32)
+ if !ok {
+ int32obj1 = obj1Value.Convert(int32Type).Interface().(int32)
+ }
+ int32obj2, ok := obj2.(int32)
+ if !ok {
+ int32obj2 = obj2Value.Convert(int32Type).Interface().(int32)
+ }
+ if int32obj1 > int32obj2 {
+ return compareGreater, true
+ }
+ if int32obj1 == int32obj2 {
+ return compareEqual, true
+ }
+ if int32obj1 < int32obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Int64:
+ {
+ int64obj1, ok := obj1.(int64)
+ if !ok {
+ int64obj1 = obj1Value.Convert(int64Type).Interface().(int64)
+ }
+ int64obj2, ok := obj2.(int64)
+ if !ok {
+ int64obj2 = obj2Value.Convert(int64Type).Interface().(int64)
+ }
+ if int64obj1 > int64obj2 {
+ return compareGreater, true
+ }
+ if int64obj1 == int64obj2 {
+ return compareEqual, true
+ }
+ if int64obj1 < int64obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Uint:
+ {
+ uintobj1, ok := obj1.(uint)
+ if !ok {
+ uintobj1 = obj1Value.Convert(uintType).Interface().(uint)
+ }
+ uintobj2, ok := obj2.(uint)
+ if !ok {
+ uintobj2 = obj2Value.Convert(uintType).Interface().(uint)
+ }
+ if uintobj1 > uintobj2 {
+ return compareGreater, true
+ }
+ if uintobj1 == uintobj2 {
+ return compareEqual, true
+ }
+ if uintobj1 < uintobj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Uint8:
+ {
+ uint8obj1, ok := obj1.(uint8)
+ if !ok {
+ uint8obj1 = obj1Value.Convert(uint8Type).Interface().(uint8)
+ }
+ uint8obj2, ok := obj2.(uint8)
+ if !ok {
+ uint8obj2 = obj2Value.Convert(uint8Type).Interface().(uint8)
+ }
+ if uint8obj1 > uint8obj2 {
+ return compareGreater, true
+ }
+ if uint8obj1 == uint8obj2 {
+ return compareEqual, true
+ }
+ if uint8obj1 < uint8obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Uint16:
+ {
+ uint16obj1, ok := obj1.(uint16)
+ if !ok {
+ uint16obj1 = obj1Value.Convert(uint16Type).Interface().(uint16)
+ }
+ uint16obj2, ok := obj2.(uint16)
+ if !ok {
+ uint16obj2 = obj2Value.Convert(uint16Type).Interface().(uint16)
+ }
+ if uint16obj1 > uint16obj2 {
+ return compareGreater, true
+ }
+ if uint16obj1 == uint16obj2 {
+ return compareEqual, true
+ }
+ if uint16obj1 < uint16obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Uint32:
+ {
+ uint32obj1, ok := obj1.(uint32)
+ if !ok {
+ uint32obj1 = obj1Value.Convert(uint32Type).Interface().(uint32)
+ }
+ uint32obj2, ok := obj2.(uint32)
+ if !ok {
+ uint32obj2 = obj2Value.Convert(uint32Type).Interface().(uint32)
+ }
+ if uint32obj1 > uint32obj2 {
+ return compareGreater, true
+ }
+ if uint32obj1 == uint32obj2 {
+ return compareEqual, true
+ }
+ if uint32obj1 < uint32obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Uint64:
+ {
+ uint64obj1, ok := obj1.(uint64)
+ if !ok {
+ uint64obj1 = obj1Value.Convert(uint64Type).Interface().(uint64)
+ }
+ uint64obj2, ok := obj2.(uint64)
+ if !ok {
+ uint64obj2 = obj2Value.Convert(uint64Type).Interface().(uint64)
+ }
+ if uint64obj1 > uint64obj2 {
+ return compareGreater, true
+ }
+ if uint64obj1 == uint64obj2 {
+ return compareEqual, true
+ }
+ if uint64obj1 < uint64obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Float32:
+ {
+ float32obj1, ok := obj1.(float32)
+ if !ok {
+ float32obj1 = obj1Value.Convert(float32Type).Interface().(float32)
+ }
+ float32obj2, ok := obj2.(float32)
+ if !ok {
+ float32obj2 = obj2Value.Convert(float32Type).Interface().(float32)
+ }
+ if float32obj1 > float32obj2 {
+ return compareGreater, true
+ }
+ if float32obj1 == float32obj2 {
+ return compareEqual, true
+ }
+ if float32obj1 < float32obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Float64:
+ {
+ float64obj1, ok := obj1.(float64)
+ if !ok {
+ float64obj1 = obj1Value.Convert(float64Type).Interface().(float64)
+ }
+ float64obj2, ok := obj2.(float64)
+ if !ok {
+ float64obj2 = obj2Value.Convert(float64Type).Interface().(float64)
+ }
+ if float64obj1 > float64obj2 {
+ return compareGreater, true
+ }
+ if float64obj1 == float64obj2 {
+ return compareEqual, true
+ }
+ if float64obj1 < float64obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.String:
+ {
+ stringobj1, ok := obj1.(string)
+ if !ok {
+ stringobj1 = obj1Value.Convert(stringType).Interface().(string)
+ }
+ stringobj2, ok := obj2.(string)
+ if !ok {
+ stringobj2 = obj2Value.Convert(stringType).Interface().(string)
+ }
+ if stringobj1 > stringobj2 {
+ return compareGreater, true
+ }
+ if stringobj1 == stringobj2 {
+ return compareEqual, true
+ }
+ if stringobj1 < stringobj2 {
+ return compareLess, true
+ }
+ }
+ }
+
+ return compareEqual, false
+}
+
+// Greater asserts that the first element is greater than the second
+//
+// assert.Greater(t, 2, 1)
+// assert.Greater(t, float64(2), float64(1))
+// assert.Greater(t, "b", "a")
+func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs)
+}
+
+// GreaterOrEqual asserts that the first element is greater than or equal to the second
+//
+// assert.GreaterOrEqual(t, 2, 1)
+// assert.GreaterOrEqual(t, 2, 2)
+// assert.GreaterOrEqual(t, "b", "a")
+// assert.GreaterOrEqual(t, "b", "b")
+func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs)
+}
+
+// Less asserts that the first element is less than the second
+//
+// assert.Less(t, 1, 2)
+// assert.Less(t, float64(1), float64(2))
+// assert.Less(t, "a", "b")
+func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs)
+}
+
+// LessOrEqual asserts that the first element is less than or equal to the second
+//
+// assert.LessOrEqual(t, 1, 2)
+// assert.LessOrEqual(t, 2, 2)
+// assert.LessOrEqual(t, "a", "b")
+// assert.LessOrEqual(t, "b", "b")
+func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
+}
+
+// Positive asserts that the specified element is positive
+//
+// assert.Positive(t, 1)
+// assert.Positive(t, 1.23)
+func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
+ zero := reflect.Zero(reflect.TypeOf(e))
+ return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs)
+}
+
+// Negative asserts that the specified element is negative
+//
+// assert.Negative(t, -1)
+// assert.Negative(t, -1.23)
+func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
+ zero := reflect.Zero(reflect.TypeOf(e))
+ return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs)
+}
+
+func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ e1Kind := reflect.ValueOf(e1).Kind()
+ e2Kind := reflect.ValueOf(e2).Kind()
+ if e1Kind != e2Kind {
+ return Fail(t, "Elements should be the same type", msgAndArgs...)
+ }
+
+ compareResult, isComparable := compare(e1, e2, e1Kind)
+ if !isComparable {
+ return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
+ }
+
+ if !containsValue(allowedComparesResults, compareResult) {
+ return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...)
+ }
+
+ return true
+}
+
+func containsValue(values []CompareType, value CompareType) bool {
+ for _, v := range values {
+ if v == value {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
index bf89ecd21f7..4dfd1229a86 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -93,7 +93,7 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args
// EqualValuesf asserts that two objects are equal or convertable to the same types
// and equal.
//
-// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123))
+// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -114,6 +114,24 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
return Error(t, err, append([]interface{}{msg}, args...)...)
}
+// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
+// This is a wrapper for errors.As.
+func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
+}
+
+// ErrorIsf asserts that at least one of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
+}
+
// Eventuallyf asserts that given condition will be met in waitFor time,
// periodically checking target function each tick.
//
@@ -127,7 +145,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick
// Exactlyf asserts that two objects are equal in value and type.
//
-// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123))
+// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -173,7 +191,7 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool
// Greaterf asserts that the first element is greater than the second
//
// assert.Greaterf(t, 2, 1, "error message %s", "formatted")
-// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1))
+// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
// assert.Greaterf(t, "b", "a", "error message %s", "formatted")
func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@@ -225,7 +243,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u
//
// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
-// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+// Returns whether the assertion was successful (true) or not (false).
func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -237,7 +255,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string,
//
// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
-// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+// Returns whether the assertion was successful (true) or not (false).
func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -245,6 +263,18 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri
return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
}
+// HTTPStatusCodef asserts that a specified handler returns a specified status code.
+//
+// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPStatusCode(t, handler, method, url, values, statuscode, append([]interface{}{msg}, args...)...)
+}
+
// HTTPSuccessf asserts that a specified handler returns a success status code.
//
// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
@@ -259,7 +289,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin
// Implementsf asserts that an object is implemented by the specified interface.
//
-// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -309,6 +339,54 @@ func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsil
return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
}
+// IsDecreasingf asserts that the collection is decreasing
+//
+// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
+// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
+// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
+func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsDecreasing(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// IsIncreasingf asserts that the collection is increasing
+//
+// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
+// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
+// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
+func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsIncreasing(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// IsNonDecreasingf asserts that the collection is not decreasing
+//
+// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
+// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
+// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
+func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonDecreasing(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// IsNonIncreasingf asserts that the collection is not increasing
+//
+// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
+// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
+// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
+func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...)
+}
+
// IsTypef asserts that the specified objects are of the same type.
func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@@ -341,7 +419,7 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf
// Lessf asserts that the first element is less than the second
//
// assert.Lessf(t, 1, 2, "error message %s", "formatted")
-// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2))
+// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
// assert.Lessf(t, "a", "b", "error message %s", "formatted")
func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@@ -363,6 +441,17 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args .
return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
}
+// Negativef asserts that the specified element is negative
+//
+// assert.Negativef(t, -1, "error message %s", "formatted")
+// assert.Negativef(t, -1.23, "error message %s", "formatted")
+func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Negative(t, e, append([]interface{}{msg}, args...)...)
+}
+
// Neverf asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
@@ -454,6 +543,25 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string,
return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...)
}
+// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
+//
+// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
+func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
+}
+
// NotNilf asserts that the specified object is not nil.
//
// assert.NotNilf(t, err, "error message %s", "formatted")
@@ -476,7 +584,7 @@ func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bo
// NotRegexpf asserts that a specified regexp does not match a string.
//
-// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@@ -550,9 +658,20 @@ func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg str
return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...)
}
+// Positivef asserts that the specified element is positive
+//
+// assert.Positivef(t, 1, "error message %s", "formatted")
+// assert.Positivef(t, 1.23, "error message %s", "formatted")
+func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Positive(t, e, append([]interface{}{msg}, args...)...)
+}
+
// Regexpf asserts that a specified regexp matches a string.
//
-// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index 75ecdcaa2f3..25337a6f07e 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -169,7 +169,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
// EqualValuesf asserts that two objects are equal or convertable to the same types
// and equal.
//
-// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123))
+// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -204,6 +204,42 @@ func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
return Error(a.t, err, msgAndArgs...)
}
+// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
+// This is a wrapper for errors.As.
+func (a *Assertions) ErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorAs(a.t, err, target, msgAndArgs...)
+}
+
+// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
+// This is a wrapper for errors.As.
+func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorAsf(a.t, err, target, msg, args...)
+}
+
+// ErrorIs asserts that at least one of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorIs(a.t, err, target, msgAndArgs...)
+}
+
+// ErrorIsf asserts that at least one of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorIsf(a.t, err, target, msg, args...)
+}
+
// Errorf asserts that a function returned an error (i.e. not `nil`).
//
// actualObj, err := SomeFunction()
@@ -251,7 +287,7 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg
// Exactlyf asserts that two objects are equal in value and type.
//
-// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123))
+// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted")
func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -370,7 +406,7 @@ func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string,
// Greaterf asserts that the first element is greater than the second
//
// a.Greaterf(2, 1, "error message %s", "formatted")
-// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1))
+// a.Greaterf(float64(2), float64(1), "error message %s", "formatted")
// a.Greaterf("b", "a", "error message %s", "formatted")
func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
@@ -447,7 +483,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri
//
// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
-// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -471,7 +507,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s
//
// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
-// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -479,6 +515,30 @@ func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url
return HTTPRedirectf(a.t, handler, method, url, values, msg, args...)
}
+// HTTPStatusCode asserts that a specified handler returns a specified status code.
+//
+// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPStatusCode(a.t, handler, method, url, values, statuscode, msgAndArgs...)
+}
+
+// HTTPStatusCodef asserts that a specified handler returns a specified status code.
+//
+// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPStatusCodef(a.t, handler, method, url, values, statuscode, msg, args...)
+}
+
// HTTPSuccess asserts that a specified handler returns a success status code.
//
// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
@@ -515,7 +575,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{},
// Implementsf asserts that an object is implemented by the specified interface.
//
-// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -607,6 +667,102 @@ func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilo
return InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
}
+// IsDecreasing asserts that the collection is decreasing
+//
+// a.IsDecreasing([]int{2, 1, 0})
+// a.IsDecreasing([]float{2, 1})
+// a.IsDecreasing([]string{"b", "a"})
+func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsDecreasing(a.t, object, msgAndArgs...)
+}
+
+// IsDecreasingf asserts that the collection is decreasing
+//
+// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted")
+// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted")
+// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted")
+func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsDecreasingf(a.t, object, msg, args...)
+}
+
+// IsIncreasing asserts that the collection is increasing
+//
+// a.IsIncreasing([]int{1, 2, 3})
+// a.IsIncreasing([]float{1, 2})
+// a.IsIncreasing([]string{"a", "b"})
+func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsIncreasing(a.t, object, msgAndArgs...)
+}
+
+// IsIncreasingf asserts that the collection is increasing
+//
+// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted")
+// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted")
+// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted")
+func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsIncreasingf(a.t, object, msg, args...)
+}
+
+// IsNonDecreasing asserts that the collection is not decreasing
+//
+// a.IsNonDecreasing([]int{1, 1, 2})
+// a.IsNonDecreasing([]float{1, 2})
+// a.IsNonDecreasing([]string{"a", "b"})
+func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonDecreasing(a.t, object, msgAndArgs...)
+}
+
+// IsNonDecreasingf asserts that the collection is not decreasing
+//
+// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted")
+// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted")
+// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted")
+func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonDecreasingf(a.t, object, msg, args...)
+}
+
+// IsNonIncreasing asserts that the collection is not increasing
+//
+// a.IsNonIncreasing([]int{2, 1, 1})
+// a.IsNonIncreasing([]float{2, 1})
+// a.IsNonIncreasing([]string{"b", "a"})
+func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonIncreasing(a.t, object, msgAndArgs...)
+}
+
+// IsNonIncreasingf asserts that the collection is not increasing
+//
+// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted")
+// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted")
+// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted")
+func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonIncreasingf(a.t, object, msg, args...)
+}
+
// IsType asserts that the specified objects are of the same type.
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
@@ -706,7 +862,7 @@ func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, ar
// Lessf asserts that the first element is less than the second
//
// a.Lessf(1, 2, "error message %s", "formatted")
-// a.Lessf(float64(1, "error message %s", "formatted"), float64(2))
+// a.Lessf(float64(1), float64(2), "error message %s", "formatted")
// a.Lessf("a", "b", "error message %s", "formatted")
func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
@@ -715,6 +871,28 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i
return Lessf(a.t, e1, e2, msg, args...)
}
+// Negative asserts that the specified element is negative
+//
+// a.Negative(-1)
+// a.Negative(-1.23)
+func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Negative(a.t, e, msgAndArgs...)
+}
+
+// Negativef asserts that the specified element is negative
+//
+// a.Negativef(-1, "error message %s", "formatted")
+// a.Negativef(-1.23, "error message %s", "formatted")
+func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Negativef(a.t, e, msg, args...)
+}
+
// Never asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
@@ -884,6 +1062,26 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr
return NotEqual(a.t, expected, actual, msgAndArgs...)
}
+// NotEqualValues asserts that two objects are not equal even when converted to the same type
+//
+// a.NotEqualValues(obj1, obj2)
+func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualValues(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
+//
+// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted")
+func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualValuesf(a.t, expected, actual, msg, args...)
+}
+
// NotEqualf asserts that the specified values are NOT equal.
//
// a.NotEqualf(obj1, obj2, "error message %s", "formatted")
@@ -897,6 +1095,24 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str
return NotEqualf(a.t, expected, actual, msg, args...)
}
+// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotErrorIs(a.t, err, target, msgAndArgs...)
+}
+
+// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotErrorIsf(a.t, err, target, msg, args...)
+}
+
// NotNil asserts that the specified object is not nil.
//
// a.NotNil(err)
@@ -950,7 +1166,7 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in
// NotRegexpf asserts that a specified regexp does not match a string.
//
-// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
@@ -1089,6 +1305,28 @@ func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) b
return Panicsf(a.t, f, msg, args...)
}
+// Positive asserts that the specified element is positive
+//
+// a.Positive(1)
+// a.Positive(1.23)
+func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Positive(a.t, e, msgAndArgs...)
+}
+
+// Positivef asserts that the specified element is positive
+//
+// a.Positivef(1, "error message %s", "formatted")
+// a.Positivef(1.23, "error message %s", "formatted")
+func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Positivef(a.t, e, msg, args...)
+}
+
// Regexp asserts that a specified regexp matches a string.
//
// a.Regexp(regexp.MustCompile("start"), "it's starting")
@@ -1102,7 +1340,7 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter
// Regexpf asserts that a specified regexp matches a string.
//
-// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go
index 15a486ca6e2..1c3b47182a7 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_order.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go
@@ -5,305 +5,77 @@ import (
"reflect"
)
-func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
- switch kind {
- case reflect.Int:
- {
- intobj1 := obj1.(int)
- intobj2 := obj2.(int)
- if intobj1 > intobj2 {
- return -1, true
- }
- if intobj1 == intobj2 {
- return 0, true
- }
- if intobj1 < intobj2 {
- return 1, true
- }
- }
- case reflect.Int8:
- {
- int8obj1 := obj1.(int8)
- int8obj2 := obj2.(int8)
- if int8obj1 > int8obj2 {
- return -1, true
- }
- if int8obj1 == int8obj2 {
- return 0, true
- }
- if int8obj1 < int8obj2 {
- return 1, true
- }
- }
- case reflect.Int16:
- {
- int16obj1 := obj1.(int16)
- int16obj2 := obj2.(int16)
- if int16obj1 > int16obj2 {
- return -1, true
- }
- if int16obj1 == int16obj2 {
- return 0, true
- }
- if int16obj1 < int16obj2 {
- return 1, true
- }
- }
- case reflect.Int32:
- {
- int32obj1 := obj1.(int32)
- int32obj2 := obj2.(int32)
- if int32obj1 > int32obj2 {
- return -1, true
- }
- if int32obj1 == int32obj2 {
- return 0, true
- }
- if int32obj1 < int32obj2 {
- return 1, true
- }
- }
- case reflect.Int64:
- {
- int64obj1 := obj1.(int64)
- int64obj2 := obj2.(int64)
- if int64obj1 > int64obj2 {
- return -1, true
- }
- if int64obj1 == int64obj2 {
- return 0, true
- }
- if int64obj1 < int64obj2 {
- return 1, true
- }
- }
- case reflect.Uint:
- {
- uintobj1 := obj1.(uint)
- uintobj2 := obj2.(uint)
- if uintobj1 > uintobj2 {
- return -1, true
- }
- if uintobj1 == uintobj2 {
- return 0, true
- }
- if uintobj1 < uintobj2 {
- return 1, true
- }
- }
- case reflect.Uint8:
- {
- uint8obj1 := obj1.(uint8)
- uint8obj2 := obj2.(uint8)
- if uint8obj1 > uint8obj2 {
- return -1, true
- }
- if uint8obj1 == uint8obj2 {
- return 0, true
- }
- if uint8obj1 < uint8obj2 {
- return 1, true
- }
- }
- case reflect.Uint16:
- {
- uint16obj1 := obj1.(uint16)
- uint16obj2 := obj2.(uint16)
- if uint16obj1 > uint16obj2 {
- return -1, true
- }
- if uint16obj1 == uint16obj2 {
- return 0, true
- }
- if uint16obj1 < uint16obj2 {
- return 1, true
- }
- }
- case reflect.Uint32:
- {
- uint32obj1 := obj1.(uint32)
- uint32obj2 := obj2.(uint32)
- if uint32obj1 > uint32obj2 {
- return -1, true
- }
- if uint32obj1 == uint32obj2 {
- return 0, true
- }
- if uint32obj1 < uint32obj2 {
- return 1, true
- }
- }
- case reflect.Uint64:
- {
- uint64obj1 := obj1.(uint64)
- uint64obj2 := obj2.(uint64)
- if uint64obj1 > uint64obj2 {
- return -1, true
- }
- if uint64obj1 == uint64obj2 {
- return 0, true
- }
- if uint64obj1 < uint64obj2 {
- return 1, true
- }
- }
- case reflect.Float32:
- {
- float32obj1 := obj1.(float32)
- float32obj2 := obj2.(float32)
- if float32obj1 > float32obj2 {
- return -1, true
- }
- if float32obj1 == float32obj2 {
- return 0, true
- }
- if float32obj1 < float32obj2 {
- return 1, true
- }
- }
- case reflect.Float64:
- {
- float64obj1 := obj1.(float64)
- float64obj2 := obj2.(float64)
- if float64obj1 > float64obj2 {
- return -1, true
- }
- if float64obj1 == float64obj2 {
- return 0, true
- }
- if float64obj1 < float64obj2 {
- return 1, true
- }
- }
- case reflect.String:
- {
- stringobj1 := obj1.(string)
- stringobj2 := obj2.(string)
- if stringobj1 > stringobj2 {
- return -1, true
- }
- if stringobj1 == stringobj2 {
- return 0, true
- }
- if stringobj1 < stringobj2 {
- return 1, true
- }
- }
- }
-
- return 0, false
-}
-
-// Greater asserts that the first element is greater than the second
-//
-// assert.Greater(t, 2, 1)
-// assert.Greater(t, float64(2), float64(1))
-// assert.Greater(t, "b", "a")
-func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
+// isOrdered checks that collection contains orderable elements.
+func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
+ objKind := reflect.TypeOf(object).Kind()
+ if objKind != reflect.Slice && objKind != reflect.Array {
+ return false
}
- e1Kind := reflect.ValueOf(e1).Kind()
- e2Kind := reflect.ValueOf(e2).Kind()
- if e1Kind != e2Kind {
- return Fail(t, "Elements should be the same type", msgAndArgs...)
- }
+ objValue := reflect.ValueOf(object)
+ objLen := objValue.Len()
- res, isComparable := compare(e1, e2, e1Kind)
- if !isComparable {
- return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
+ if objLen <= 1 {
+ return true
}
- if res != -1 {
- return Fail(t, fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2), msgAndArgs...)
- }
+ value := objValue.Index(0)
+ valueInterface := value.Interface()
+ firstValueKind := value.Kind()
- return true
-}
+ for i := 1; i < objLen; i++ {
+ prevValue := value
+ prevValueInterface := valueInterface
-// GreaterOrEqual asserts that the first element is greater than or equal to the second
-//
-// assert.GreaterOrEqual(t, 2, 1)
-// assert.GreaterOrEqual(t, 2, 2)
-// assert.GreaterOrEqual(t, "b", "a")
-// assert.GreaterOrEqual(t, "b", "b")
-func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
+ value = objValue.Index(i)
+ valueInterface = value.Interface()
- e1Kind := reflect.ValueOf(e1).Kind()
- e2Kind := reflect.ValueOf(e2).Kind()
- if e1Kind != e2Kind {
- return Fail(t, "Elements should be the same type", msgAndArgs...)
- }
+ compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind)
- res, isComparable := compare(e1, e2, e1Kind)
- if !isComparable {
- return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
- }
+ if !isComparable {
+ return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...)
+ }
- if res != -1 && res != 0 {
- return Fail(t, fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2), msgAndArgs...)
+ if !containsValue(allowedComparesResults, compareResult) {
+ return Fail(t, fmt.Sprintf(failMessage, prevValue, value), msgAndArgs...)
+ }
}
return true
}
-// Less asserts that the first element is less than the second
+// IsIncreasing asserts that the collection is increasing
//
-// assert.Less(t, 1, 2)
-// assert.Less(t, float64(1), float64(2))
-// assert.Less(t, "a", "b")
-func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- e1Kind := reflect.ValueOf(e1).Kind()
- e2Kind := reflect.ValueOf(e2).Kind()
- if e1Kind != e2Kind {
- return Fail(t, "Elements should be the same type", msgAndArgs...)
- }
-
- res, isComparable := compare(e1, e2, e1Kind)
- if !isComparable {
- return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
- }
-
- if res != 1 {
- return Fail(t, fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2), msgAndArgs...)
- }
-
- return true
+// assert.IsIncreasing(t, []int{1, 2, 3})
+// assert.IsIncreasing(t, []float{1, 2})
+// assert.IsIncreasing(t, []string{"a", "b"})
+func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs)
}
-// LessOrEqual asserts that the first element is less than or equal to the second
+// IsNonIncreasing asserts that the collection is not increasing
//
-// assert.LessOrEqual(t, 1, 2)
-// assert.LessOrEqual(t, 2, 2)
-// assert.LessOrEqual(t, "a", "b")
-// assert.LessOrEqual(t, "b", "b")
-func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- e1Kind := reflect.ValueOf(e1).Kind()
- e2Kind := reflect.ValueOf(e2).Kind()
- if e1Kind != e2Kind {
- return Fail(t, "Elements should be the same type", msgAndArgs...)
- }
-
- res, isComparable := compare(e1, e2, e1Kind)
- if !isComparable {
- return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
- }
+// assert.IsNonIncreasing(t, []int{2, 1, 1})
+// assert.IsNonIncreasing(t, []float{2, 1})
+// assert.IsNonIncreasing(t, []string{"b", "a"})
+func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs)
+}
- if res != 1 && res != 0 {
- return Fail(t, fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2), msgAndArgs...)
- }
+// IsDecreasing asserts that the collection is decreasing
+//
+// assert.IsDecreasing(t, []int{2, 1, 0})
+// assert.IsDecreasing(t, []float{2, 1})
+// assert.IsDecreasing(t, []string{"b", "a"})
+func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs)
+}
- return true
+// IsNonDecreasing asserts that the collection is not decreasing
+//
+// assert.IsNonDecreasing(t, []int{1, 1, 2})
+// assert.IsNonDecreasing(t, []float{1, 2})
+// assert.IsNonDecreasing(t, []string{"a", "b"})
+func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
}
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index bdd81389a97..bcac4401f57 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -19,7 +19,7 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/pmezard/go-difflib/difflib"
- yaml "gopkg.in/yaml.v2"
+ yaml "gopkg.in/yaml.v3"
)
//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
@@ -45,7 +45,7 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool
// for table driven tests.
type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
-// Comparison a custom function that returns true on success and false on failure
+// Comparison is a custom function that returns true on success and false on failure
type Comparison func() (success bool)
/*
@@ -104,11 +104,11 @@ the problem actually occurred in calling code.*/
// failed.
func CallerInfo() []string {
- pc := uintptr(0)
- file := ""
- line := 0
- ok := false
- name := ""
+ var pc uintptr
+ var ok bool
+ var file string
+ var line int
+ var name string
callers := []string{}
for i := 0; ; i++ {
@@ -172,8 +172,8 @@ func isTest(name, prefix string) bool {
if len(name) == len(prefix) { // "Test" is ok
return true
}
- rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
- return !unicode.IsLower(rune)
+ r, _ := utf8.DecodeRuneInString(name[len(prefix):])
+ return !unicode.IsLower(r)
}
func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
@@ -429,14 +429,27 @@ func samePointers(first, second interface{}) bool {
// to a type conversion in the Go grammar.
func formatUnequalValues(expected, actual interface{}) (e string, a string) {
if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
- return fmt.Sprintf("%T(%#v)", expected, expected),
- fmt.Sprintf("%T(%#v)", actual, actual)
+ return fmt.Sprintf("%T(%s)", expected, truncatingFormat(expected)),
+ fmt.Sprintf("%T(%s)", actual, truncatingFormat(actual))
}
switch expected.(type) {
case time.Duration:
return fmt.Sprintf("%v", expected), fmt.Sprintf("%v", actual)
}
- return fmt.Sprintf("%#v", expected), fmt.Sprintf("%#v", actual)
+ return truncatingFormat(expected), truncatingFormat(actual)
+}
+
+// truncatingFormat formats the data and truncates it if it's too long.
+//
+// This helps keep formatted error messages lines from exceeding the
+// bufio.MaxScanTokenSize max line length that the go testing framework imposes.
+func truncatingFormat(data interface{}) string {
+ value := fmt.Sprintf("%#v", data)
+ max := bufio.MaxScanTokenSize - 100 // Give us some space the type info too if needed.
+ if len(value) > max {
+ value = value[0:max] + "<... truncated>"
+ }
+ return value
}
// EqualValues asserts that two objects are equal or convertable to the same types
@@ -483,12 +496,12 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
//
// assert.NotNil(t, err)
func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
if !isNil(object) {
return true
}
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
return Fail(t, "Expected value not to be nil.", msgAndArgs...)
}
@@ -529,12 +542,12 @@ func isNil(object interface{}) bool {
//
// assert.Nil(t, err)
func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
if isNil(object) {
return true
}
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...)
}
@@ -571,12 +584,11 @@ func isEmpty(object interface{}) bool {
//
// assert.Empty(t, obj)
func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
pass := isEmpty(object)
if !pass {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...)
}
@@ -591,12 +603,11 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
// assert.Equal(t, "two", obj[1])
// }
func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
pass := !isEmpty(object)
if !pass {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...)
}
@@ -639,16 +650,10 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{})
//
// assert.True(t, myBool)
func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if h, ok := t.(interface {
- Helper()
- }); ok {
- h.Helper()
- }
-
- if value != true {
+ if !value {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
return Fail(t, "Should be true", msgAndArgs...)
}
@@ -660,11 +665,10 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
//
// assert.False(t, myBool)
func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- if value != false {
+ if value {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
return Fail(t, "Should be false", msgAndArgs...)
}
@@ -695,6 +699,21 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{
}
+// NotEqualValues asserts that two objects are not equal even when converted to the same type
+//
+// assert.NotEqualValues(t, obj1, obj2)
+func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if ObjectsAreEqualValues(expected, actual) {
+ return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...)
+ }
+
+ return true
+}
+
// containsElement try loop over the list check if the list includes the element.
// return (false, false) if impossible.
// return (true, false) if element was not found.
@@ -747,10 +766,10 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo
ok, found := includeElement(s, contains)
if !ok {
- return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...)
}
if !found {
- return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("%#v does not contain %#v", s, contains), msgAndArgs...)
}
return true
@@ -881,27 +900,39 @@ func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface
return true
}
- aKind := reflect.TypeOf(listA).Kind()
- bKind := reflect.TypeOf(listB).Kind()
+ if !isList(t, listA, msgAndArgs...) || !isList(t, listB, msgAndArgs...) {
+ return false
+ }
+
+ extraA, extraB := diffLists(listA, listB)
- if aKind != reflect.Array && aKind != reflect.Slice {
- return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...)
+ if len(extraA) == 0 && len(extraB) == 0 {
+ return true
}
- if bKind != reflect.Array && bKind != reflect.Slice {
- return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...)
+ return Fail(t, formatListDiff(listA, listB, extraA, extraB), msgAndArgs...)
+}
+
+// isList checks that the provided value is array or slice.
+func isList(t TestingT, list interface{}, msgAndArgs ...interface{}) (ok bool) {
+ kind := reflect.TypeOf(list).Kind()
+ if kind != reflect.Array && kind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s, expecting array or slice", list, kind),
+ msgAndArgs...)
}
+ return true
+}
+// diffLists diffs two arrays/slices and returns slices of elements that are only in A and only in B.
+// If some element is present multiple times, each instance is counted separately (e.g. if something is 2x in A and
+// 5x in B, it will be 0x in extraA and 3x in extraB). The order of items in both lists is ignored.
+func diffLists(listA, listB interface{}) (extraA, extraB []interface{}) {
aValue := reflect.ValueOf(listA)
bValue := reflect.ValueOf(listB)
aLen := aValue.Len()
bLen := bValue.Len()
- if aLen != bLen {
- return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...)
- }
-
// Mark indexes in bValue that we already used
visited := make([]bool, bLen)
for i := 0; i < aLen; i++ {
@@ -918,11 +949,38 @@ func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface
}
}
if !found {
- return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...)
+ extraA = append(extraA, element)
}
}
- return true
+ for j := 0; j < bLen; j++ {
+ if visited[j] {
+ continue
+ }
+ extraB = append(extraB, bValue.Index(j).Interface())
+ }
+
+ return
+}
+
+func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) string {
+ var msg bytes.Buffer
+
+ msg.WriteString("elements differ")
+ if len(extraA) > 0 {
+ msg.WriteString("\n\nextra elements in list A:\n")
+ msg.WriteString(spewConfig.Sdump(extraA))
+ }
+ if len(extraB) > 0 {
+ msg.WriteString("\n\nextra elements in list B:\n")
+ msg.WriteString(spewConfig.Sdump(extraB))
+ }
+ msg.WriteString("\n\nlistA:\n")
+ msg.WriteString(spewConfig.Sdump(listA))
+ msg.WriteString("\n\nlistB:\n")
+ msg.WriteString(spewConfig.Sdump(listB))
+
+ return msg.String()
}
// Condition uses a Comparison to assert a complex condition.
@@ -1058,6 +1116,8 @@ func toFloat(x interface{}) (float64, bool) {
xok := true
switch xn := x.(type) {
+ case uint:
+ xf = float64(xn)
case uint8:
xf = float64(xn)
case uint16:
@@ -1079,7 +1139,7 @@ func toFloat(x interface{}) (float64, bool) {
case float32:
xf = float64(xn)
case float64:
- xf = float64(xn)
+ xf = xn
case time.Duration:
xf = float64(xn)
default:
@@ -1193,6 +1253,9 @@ func calcRelativeError(expected, actual interface{}) (float64, error) {
if !aok {
return 0, fmt.Errorf("expected value %q cannot be converted to float", expected)
}
+ if math.IsNaN(af) {
+ return 0, errors.New("expected value must not be NaN")
+ }
if af == 0 {
return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error")
}
@@ -1200,6 +1263,9 @@ func calcRelativeError(expected, actual interface{}) (float64, error) {
if !bok {
return 0, fmt.Errorf("actual value %q cannot be converted to float", actual)
}
+ if math.IsNaN(bf) {
+ return 0, errors.New("actual value must not be NaN")
+ }
return math.Abs(af-bf) / math.Abs(af), nil
}
@@ -1209,6 +1275,9 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd
if h, ok := t.(tHelper); ok {
h.Helper()
}
+ if math.IsNaN(epsilon) {
+ return Fail(t, "epsilon must not be NaN")
+ }
actualEpsilon, err := calcRelativeError(expected, actual)
if err != nil {
return Fail(t, err.Error(), msgAndArgs...)
@@ -1256,10 +1325,10 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m
// assert.Equal(t, expectedObj, actualObj)
// }
func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
if err != nil {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...)
}
@@ -1273,11 +1342,10 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
// assert.Equal(t, expectedError, err)
// }
func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
if err == nil {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
return Fail(t, "An error is expected but got nil.", msgAndArgs...)
}
@@ -1553,6 +1621,8 @@ var spewConfig = spew.ConfigState{
DisablePointerAddresses: true,
DisableCapacities: true,
SortKeys: true,
+ DisableMethods: true,
+ MaxDepth: 10,
}
type tHelper interface {
@@ -1624,3 +1694,81 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D
}
}
}
+
+// ErrorIs asserts that at least one of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if errors.Is(err, target) {
+ return true
+ }
+
+ var expectedText string
+ if target != nil {
+ expectedText = target.Error()
+ }
+
+ chain := buildErrorChainString(err)
+
+ return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+
+ "expected: %q\n"+
+ "in chain: %s", expectedText, chain,
+ ), msgAndArgs...)
+}
+
+// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if !errors.Is(err, target) {
+ return true
+ }
+
+ var expectedText string
+ if target != nil {
+ expectedText = target.Error()
+ }
+
+ chain := buildErrorChainString(err)
+
+ return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
+ "found: %q\n"+
+ "in chain: %s", expectedText, chain,
+ ), msgAndArgs...)
+}
+
+// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
+// This is a wrapper for errors.As.
+func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if errors.As(err, target) {
+ return true
+ }
+
+ chain := buildErrorChainString(err)
+
+ return Fail(t, fmt.Sprintf("Should be in error chain:\n"+
+ "expected: %q\n"+
+ "in chain: %s", target, chain,
+ ), msgAndArgs...)
+}
+
+func buildErrorChainString(err error) string {
+ if err == nil {
+ return ""
+ }
+
+ e := errors.Unwrap(err)
+ chain := fmt.Sprintf("%q", err.Error())
+ for e != nil {
+ chain += fmt.Sprintf("\n\t%q", e.Error())
+ e = errors.Unwrap(e)
+ }
+ return chain
+}
diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go
index df46fa777ac..4ed341dd289 100644
--- a/vendor/github.com/stretchr/testify/assert/http_assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go
@@ -33,7 +33,6 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
- return false
}
isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
@@ -56,7 +55,6 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
- return false
}
isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
@@ -79,7 +77,6 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
- return false
}
isErrorCode := code >= http.StatusBadRequest
@@ -90,6 +87,28 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values
return isErrorCode
}
+// HTTPStatusCode asserts that a specified handler returns a specified status code.
+//
+// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ }
+
+ successful := code == statuscode
+ if !successful {
+ Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code))
+ }
+
+ return successful
+}
+
// HTTPBody is a helper that returns HTTP body of the response. It returns
// empty string if building a new request fails.
func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go
index cf6c7b56645..51820df2e67 100644
--- a/vendor/github.com/stretchr/testify/require/require.go
+++ b/vendor/github.com/stretchr/testify/require/require.go
@@ -212,7 +212,7 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg
// EqualValuesf asserts that two objects are equal or convertable to the same types
// and equal.
//
-// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123))
+// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -256,6 +256,54 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) {
t.FailNow()
}
+// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
+// This is a wrapper for errors.As.
+func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.ErrorAs(t, err, target, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
+// This is a wrapper for errors.As.
+func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.ErrorAsf(t, err, target, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
+// ErrorIs asserts that at least one of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.ErrorIs(t, err, target, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// ErrorIsf asserts that at least one of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.ErrorIsf(t, err, target, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// Errorf asserts that a function returned an error (i.e. not `nil`).
//
// actualObj, err := SomeFunction()
@@ -315,7 +363,7 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ..
// Exactlyf asserts that two objects are equal in value and type.
//
-// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123))
+// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -470,7 +518,7 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg
// Greaterf asserts that the first element is greater than the second
//
// assert.Greaterf(t, 2, 1, "error message %s", "formatted")
-// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1))
+// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
// assert.Greaterf(t, "b", "a", "error message %s", "formatted")
func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
@@ -565,7 +613,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string,
//
// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
-// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+// Returns whether the assertion was successful (true) or not (false).
func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -595,7 +643,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin
//
// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
-// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+// Returns whether the assertion was successful (true) or not (false).
func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -606,6 +654,36 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri
t.FailNow()
}
+// HTTPStatusCode asserts that a specified handler returns a specified status code.
+//
+// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.HTTPStatusCode(t, handler, method, url, values, statuscode, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// HTTPStatusCodef asserts that a specified handler returns a specified status code.
+//
+// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.HTTPStatusCodef(t, handler, method, url, values, statuscode, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// HTTPSuccess asserts that a specified handler returns a success status code.
//
// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
@@ -651,7 +729,7 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg
// Implementsf asserts that an object is implemented by the specified interface.
//
-// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -776,6 +854,126 @@ func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon fl
t.FailNow()
}
+// IsDecreasing asserts that the collection is decreasing
+//
+// assert.IsDecreasing(t, []int{2, 1, 0})
+// assert.IsDecreasing(t, []float{2, 1})
+// assert.IsDecreasing(t, []string{"b", "a"})
+func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.IsDecreasing(t, object, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// IsDecreasingf asserts that the collection is decreasing
+//
+// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
+// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
+// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
+func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.IsDecreasingf(t, object, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
+// IsIncreasing asserts that the collection is increasing
+//
+// assert.IsIncreasing(t, []int{1, 2, 3})
+// assert.IsIncreasing(t, []float{1, 2})
+// assert.IsIncreasing(t, []string{"a", "b"})
+func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.IsIncreasing(t, object, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// IsIncreasingf asserts that the collection is increasing
+//
+// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
+// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
+// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
+func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.IsIncreasingf(t, object, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
+// IsNonDecreasing asserts that the collection is not decreasing
+//
+// assert.IsNonDecreasing(t, []int{1, 1, 2})
+// assert.IsNonDecreasing(t, []float{1, 2})
+// assert.IsNonDecreasing(t, []string{"a", "b"})
+func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.IsNonDecreasing(t, object, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// IsNonDecreasingf asserts that the collection is not decreasing
+//
+// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
+// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
+// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
+func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.IsNonDecreasingf(t, object, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
+// IsNonIncreasing asserts that the collection is not increasing
+//
+// assert.IsNonIncreasing(t, []int{2, 1, 1})
+// assert.IsNonIncreasing(t, []float{2, 1})
+// assert.IsNonIncreasing(t, []string{"b", "a"})
+func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.IsNonIncreasing(t, object, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// IsNonIncreasingf asserts that the collection is not increasing
+//
+// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
+// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
+// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
+func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.IsNonIncreasingf(t, object, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// IsType asserts that the specified objects are of the same type.
func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
@@ -902,7 +1100,7 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args .
// Lessf asserts that the first element is less than the second
//
// assert.Lessf(t, 1, 2, "error message %s", "formatted")
-// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2))
+// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
// assert.Lessf(t, "a", "b", "error message %s", "formatted")
func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
@@ -914,6 +1112,34 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter
t.FailNow()
}
+// Negative asserts that the specified element is negative
+//
+// assert.Negative(t, -1)
+// assert.Negative(t, -1.23)
+func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.Negative(t, e, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// Negativef asserts that the specified element is negative
+//
+// assert.Negativef(t, -1, "error message %s", "formatted")
+// assert.Negativef(t, -1.23, "error message %s", "formatted")
+func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.Negativef(t, e, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// Never asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
@@ -1128,6 +1354,32 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs .
t.FailNow()
}
+// NotEqualValues asserts that two objects are not equal even when converted to the same type
+//
+// assert.NotEqualValues(t, obj1, obj2)
+func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NotEqualValues(t, expected, actual, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
+//
+// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
+func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NotEqualValuesf(t, expected, actual, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// NotEqualf asserts that the specified values are NOT equal.
//
// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
@@ -1144,6 +1396,30 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string,
t.FailNow()
}
+// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NotErrorIs(t, err, target, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NotErrorIsf(t, err, target, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// NotNil asserts that the specified object is not nil.
//
// assert.NotNil(t, err)
@@ -1212,7 +1488,7 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf
// NotRegexpf asserts that a specified regexp does not match a string.
//
-// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
@@ -1390,6 +1666,34 @@ func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}
t.FailNow()
}
+// Positive asserts that the specified element is positive
+//
+// assert.Positive(t, 1)
+// assert.Positive(t, 1.23)
+func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.Positive(t, e, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// Positivef asserts that the specified element is positive
+//
+// assert.Positivef(t, 1, "error message %s", "formatted")
+// assert.Positivef(t, 1.23, "error message %s", "formatted")
+func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.Positivef(t, e, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// Regexp asserts that a specified regexp matches a string.
//
// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
@@ -1406,7 +1710,7 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface
// Regexpf asserts that a specified regexp matches a string.
//
-// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go
index 5aac226df83..ed54a9d83f3 100644
--- a/vendor/github.com/stretchr/testify/require/require_forward.go
+++ b/vendor/github.com/stretchr/testify/require/require_forward.go
@@ -170,7 +170,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
// EqualValuesf asserts that two objects are equal or convertable to the same types
// and equal.
//
-// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123))
+// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -205,6 +205,42 @@ func (a *Assertions) Error(err error, msgAndArgs ...interface{}) {
Error(a.t, err, msgAndArgs...)
}
+// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
+// This is a wrapper for errors.As.
+func (a *Assertions) ErrorAs(err error, target interface{}, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ ErrorAs(a.t, err, target, msgAndArgs...)
+}
+
+// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
+// This is a wrapper for errors.As.
+func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ ErrorAsf(a.t, err, target, msg, args...)
+}
+
+// ErrorIs asserts that at least one of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ ErrorIs(a.t, err, target, msgAndArgs...)
+}
+
+// ErrorIsf asserts that at least one of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ ErrorIsf(a.t, err, target, msg, args...)
+}
+
// Errorf asserts that a function returned an error (i.e. not `nil`).
//
// actualObj, err := SomeFunction()
@@ -252,7 +288,7 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg
// Exactlyf asserts that two objects are equal in value and type.
//
-// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123))
+// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted")
func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -371,7 +407,7 @@ func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string,
// Greaterf asserts that the first element is greater than the second
//
// a.Greaterf(2, 1, "error message %s", "formatted")
-// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1))
+// a.Greaterf(float64(2), float64(1), "error message %s", "formatted")
// a.Greaterf("b", "a", "error message %s", "formatted")
func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
@@ -448,7 +484,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri
//
// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
-// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -472,7 +508,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s
//
// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
-// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -480,6 +516,30 @@ func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url
HTTPRedirectf(a.t, handler, method, url, values, msg, args...)
}
+// HTTPStatusCode asserts that a specified handler returns a specified status code.
+//
+// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ HTTPStatusCode(a.t, handler, method, url, values, statuscode, msgAndArgs...)
+}
+
+// HTTPStatusCodef asserts that a specified handler returns a specified status code.
+//
+// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ HTTPStatusCodef(a.t, handler, method, url, values, statuscode, msg, args...)
+}
+
// HTTPSuccess asserts that a specified handler returns a success status code.
//
// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
@@ -516,7 +576,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{},
// Implementsf asserts that an object is implemented by the specified interface.
//
-// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -608,6 +668,102 @@ func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilo
InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
}
+// IsDecreasing asserts that the collection is decreasing
+//
+// a.IsDecreasing([]int{2, 1, 0})
+// a.IsDecreasing([]float{2, 1})
+// a.IsDecreasing([]string{"b", "a"})
+func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ IsDecreasing(a.t, object, msgAndArgs...)
+}
+
+// IsDecreasingf asserts that the collection is decreasing
+//
+// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted")
+// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted")
+// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted")
+func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ IsDecreasingf(a.t, object, msg, args...)
+}
+
+// IsIncreasing asserts that the collection is increasing
+//
+// a.IsIncreasing([]int{1, 2, 3})
+// a.IsIncreasing([]float{1, 2})
+// a.IsIncreasing([]string{"a", "b"})
+func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ IsIncreasing(a.t, object, msgAndArgs...)
+}
+
+// IsIncreasingf asserts that the collection is increasing
+//
+// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted")
+// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted")
+// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted")
+func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ IsIncreasingf(a.t, object, msg, args...)
+}
+
+// IsNonDecreasing asserts that the collection is not decreasing
+//
+// a.IsNonDecreasing([]int{1, 1, 2})
+// a.IsNonDecreasing([]float{1, 2})
+// a.IsNonDecreasing([]string{"a", "b"})
+func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ IsNonDecreasing(a.t, object, msgAndArgs...)
+}
+
+// IsNonDecreasingf asserts that the collection is not decreasing
+//
+// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted")
+// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted")
+// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted")
+func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ IsNonDecreasingf(a.t, object, msg, args...)
+}
+
+// IsNonIncreasing asserts that the collection is not increasing
+//
+// a.IsNonIncreasing([]int{2, 1, 1})
+// a.IsNonIncreasing([]float{2, 1})
+// a.IsNonIncreasing([]string{"b", "a"})
+func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ IsNonIncreasing(a.t, object, msgAndArgs...)
+}
+
+// IsNonIncreasingf asserts that the collection is not increasing
+//
+// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted")
+// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted")
+// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted")
+func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ IsNonIncreasingf(a.t, object, msg, args...)
+}
+
// IsType asserts that the specified objects are of the same type.
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
@@ -707,7 +863,7 @@ func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, ar
// Lessf asserts that the first element is less than the second
//
// a.Lessf(1, 2, "error message %s", "formatted")
-// a.Lessf(float64(1, "error message %s", "formatted"), float64(2))
+// a.Lessf(float64(1), float64(2), "error message %s", "formatted")
// a.Lessf("a", "b", "error message %s", "formatted")
func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
@@ -716,6 +872,28 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i
Lessf(a.t, e1, e2, msg, args...)
}
+// Negative asserts that the specified element is negative
+//
+// a.Negative(-1)
+// a.Negative(-1.23)
+func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ Negative(a.t, e, msgAndArgs...)
+}
+
+// Negativef asserts that the specified element is negative
+//
+// a.Negativef(-1, "error message %s", "formatted")
+// a.Negativef(-1.23, "error message %s", "formatted")
+func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ Negativef(a.t, e, msg, args...)
+}
+
// Never asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
@@ -885,6 +1063,26 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr
NotEqual(a.t, expected, actual, msgAndArgs...)
}
+// NotEqualValues asserts that two objects are not equal even when converted to the same type
+//
+// a.NotEqualValues(obj1, obj2)
+func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NotEqualValues(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
+//
+// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted")
+func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NotEqualValuesf(a.t, expected, actual, msg, args...)
+}
+
// NotEqualf asserts that the specified values are NOT equal.
//
// a.NotEqualf(obj1, obj2, "error message %s", "formatted")
@@ -898,6 +1096,24 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str
NotEqualf(a.t, expected, actual, msg, args...)
}
+// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NotErrorIs(a.t, err, target, msgAndArgs...)
+}
+
+// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NotErrorIsf(a.t, err, target, msg, args...)
+}
+
// NotNil asserts that the specified object is not nil.
//
// a.NotNil(err)
@@ -951,7 +1167,7 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in
// NotRegexpf asserts that a specified regexp does not match a string.
//
-// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
@@ -1090,6 +1306,28 @@ func (a *Assertions) Panicsf(f assert.PanicTestFunc, msg string, args ...interfa
Panicsf(a.t, f, msg, args...)
}
+// Positive asserts that the specified element is positive
+//
+// a.Positive(1)
+// a.Positive(1.23)
+func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ Positive(a.t, e, msgAndArgs...)
+}
+
+// Positivef asserts that the specified element is positive
+//
+// a.Positivef(1, "error message %s", "formatted")
+// a.Positivef(1.23, "error message %s", "formatted")
+func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ Positivef(a.t, e, msg, args...)
+}
+
// Regexp asserts that a specified regexp matches a string.
//
// a.Regexp(regexp.MustCompile("start"), "it's starting")
@@ -1103,7 +1341,7 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter
// Regexpf asserts that a specified regexp matches a string.
//
-// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
diff --git a/vendor/github.com/tidwall/gjson/README.md b/vendor/github.com/tidwall/gjson/README.md
index 4108deb3789..55620142236 100644
--- a/vendor/github.com/tidwall/gjson/README.md
+++ b/vendor/github.com/tidwall/gjson/README.md
@@ -3,13 +3,10 @@
src="logo.png"
width="240" height="78" border="0" alt="GJSON">
-
-
-
get json values quickly
GJSON is a Go package that provides a [fast](#performance) and [simple](#get-a-value) way to get values from a json document.
@@ -17,6 +14,8 @@ It has features such as [one line retrieval](#get-a-value), [dot notation paths]
Also check out [SJSON](https://github.com/tidwall/sjson) for modifying json, and the [JJ](https://github.com/tidwall/jj) command line tool.
+For the Rust version go to [gjson.rs](https://github.com/tidwall/gjson.rs).
+
Getting Started
===============
@@ -476,7 +475,7 @@ JSON document used:
}
```
-Each operation was rotated though one of the following search paths:
+Each operation was rotated through one of the following search paths:
```
widget.window.name
@@ -484,12 +483,4 @@ widget.image.hOffset
widget.text.onMouseUp
```
-*These benchmarks were run on a MacBook Pro 15" 2.8 GHz Intel Core i7 using Go 1.8 and can be be found [here](https://github.com/tidwall/gjson-benchmarks).*
-
-
-## Contact
-Josh Baker [@tidwall](http://twitter.com/tidwall)
-
-## License
-
-GJSON source code is available under the MIT [License](/LICENSE).
+*These benchmarks were run on a MacBook Pro 15" 2.8 GHz Intel Core i7 using Go 1.8 and can be found [here](https://github.com/tidwall/gjson-benchmarks).*
diff --git a/vendor/github.com/tidwall/gjson/SYNTAX.md b/vendor/github.com/tidwall/gjson/SYNTAX.md
index 5ea0407f5ae..86235b9bfb1 100644
--- a/vendor/github.com/tidwall/gjson/SYNTAX.md
+++ b/vendor/github.com/tidwall/gjson/SYNTAX.md
@@ -77,14 +77,21 @@ Special purpose characters, such as `.`, `*`, and `?` can be escaped with `\`.
fav\.movie "Deer Hunter"
```
-You'll also need to make sure that the `\` character is correctly escaped when hardcoding a path in source code.
+You'll also need to make sure that the `\` character is correctly escaped when hardcoding a path in you source code.
```go
-res := gjson.Get(json, "fav\\.movie") // must escape the slash
-res := gjson.Get(json, `fav\.movie`) // no need to escape the slash
+// Go
+val := gjson.Get(json, "fav\\.movie") // must escape the slash
+val := gjson.Get(json, `fav\.movie`) // no need to escape the slash
+```
+```rust
+// Rust
+let val = gjson::get(json, "fav\\.movie") // must escape the slash
+let val = gjson::get(json, r#"fav\.movie"#) // no need to escape the slash
```
+
### Arrays
The `#` character allows for digging into JSON Arrays.
@@ -248,6 +255,8 @@ gjson.AddModifier("case", func(json, arg string) string {
"children.@case:lower.@reverse" ["jack","alex","sara"]
```
+*Note: Custom modifiers are not yet available in the Rust version*
+
### Multipaths
Starting with v1.3.0, GJSON added the ability to join multiple paths together
diff --git a/vendor/github.com/tidwall/gjson/gjson.go b/vendor/github.com/tidwall/gjson/gjson.go
index 0b6dcb0981e..7bb82e95ab7 100644
--- a/vendor/github.com/tidwall/gjson/gjson.go
+++ b/vendor/github.com/tidwall/gjson/gjson.go
@@ -3,7 +3,6 @@ package gjson
import (
"encoding/json"
- "reflect"
"strconv"
"strings"
"time"
@@ -106,7 +105,8 @@ func (t Result) Bool() bool {
case True:
return true
case String:
- return t.Str != "" && t.Str != "0" && t.Str != "false"
+ b, _ := strconv.ParseBool(strings.ToLower(t.Str))
+ return b
case Number:
return t.Num != 0
}
@@ -124,16 +124,17 @@ func (t Result) Int() int64 {
return n
case Number:
// try to directly convert the float64 to int64
- n, ok := floatToInt(t.Num)
- if !ok {
- // now try to parse the raw string
- n, ok = parseInt(t.Raw)
- if !ok {
- // fallback to a standard conversion
- return int64(t.Num)
- }
+ i, ok := safeInt(t.Num)
+ if ok {
+ return i
}
- return n
+ // now try to parse the raw string
+ i, ok = parseInt(t.Raw)
+ if ok {
+ return i
+ }
+ // fallback to a standard conversion
+ return int64(t.Num)
}
}
@@ -149,16 +150,17 @@ func (t Result) Uint() uint64 {
return n
case Number:
// try to directly convert the float64 to uint64
- n, ok := floatToUint(t.Num)
- if !ok {
- // now try to parse the raw string
- n, ok = parseUint(t.Raw)
- if !ok {
- // fallback to a standard conversion
- return uint64(t.Num)
- }
+ i, ok := safeInt(t.Num)
+ if ok && i >= 0 {
+ return uint64(i)
}
- return n
+ // now try to parse the raw string
+ u, ok := parseUint(t.Raw)
+ if ok {
+ return u
+ }
+ // fallback to a standard conversion
+ return uint64(t.Num)
}
}
@@ -495,6 +497,9 @@ func squash(json string) string {
}
}
if depth == 0 {
+ if i >= len(json) {
+ return json
+ }
return json[:i+1]
}
case '{', '[', '(':
@@ -726,8 +731,13 @@ func parseArrayPath(path string) (r arrayPathResult) {
}
if path[i] == '.' {
r.part = path[:i]
- r.path = path[i+1:]
- r.more = true
+ if !r.arrch && i < len(path)-1 && isDotPiperChar(path[i+1]) {
+ r.pipe = path[i+1:]
+ r.piped = true
+ } else {
+ r.path = path[i+1:]
+ r.more = true
+ }
return
}
if path[i] == '#' {
@@ -973,6 +983,11 @@ right:
return s
}
+// peek at the next byte and see if it's a '@', '[', or '{'.
+func isDotPiperChar(c byte) bool {
+ return !DisableModifiers && (c == '@' || c == '[' || c == '{')
+}
+
type objectPathResult struct {
part string
path string
@@ -991,12 +1006,8 @@ func parseObjectPath(path string) (r objectPathResult) {
return
}
if path[i] == '.' {
- // peek at the next byte and see if it's a '@', '[', or '{'.
r.part = path[:i]
- if !DisableModifiers &&
- i < len(path)-1 &&
- (path[i+1] == '@' ||
- path[i+1] == '[' || path[i+1] == '{') {
+ if i < len(path)-1 && isDotPiperChar(path[i+1]) {
r.pipe = path[i+1:]
r.piped = true
} else {
@@ -1026,14 +1037,11 @@ func parseObjectPath(path string) (r objectPathResult) {
continue
} else if path[i] == '.' {
r.part = string(epart)
- // peek at the next byte and see if it's a '@' modifier
- if !DisableModifiers &&
- i < len(path)-1 && path[i+1] == '@' {
+ if i < len(path)-1 && isDotPiperChar(path[i+1]) {
r.pipe = path[i+1:]
r.piped = true
} else {
r.path = path[i+1:]
- r.more = true
}
r.more = true
return
@@ -1398,7 +1406,6 @@ func parseArray(c *parseContext, i int, path string) (int, bool) {
}
return false
}
-
for i < len(c.json)+1 {
if !rp.arrch {
pmatch = partidx == h
@@ -1600,10 +1607,17 @@ func parseArray(c *parseContext, i int, path string) (int, bool) {
c.calcd = true
return i + 1, true
}
- if len(multires) > 0 && !c.value.Exists() {
- c.value = Result{
- Raw: string(append(multires, ']')),
- Type: JSON,
+ if !c.value.Exists() {
+ if len(multires) > 0 {
+ c.value = Result{
+ Raw: string(append(multires, ']')),
+ Type: JSON,
+ }
+ } else if rp.query.all {
+ c.value = Result{
+ Raw: "[]",
+ Type: JSON,
+ }
}
}
return i + 1, false
@@ -1835,7 +1849,7 @@ type parseContext struct {
// A path is in dot syntax, such as "name.last" or "age".
// When the value is found it's returned immediately.
//
-// A path is a series of keys searated by a dot.
+// A path is a series of keys separated by a dot.
// A key may contain special wildcard characters '*' and '?'.
// To access an array value use the index as the key.
// To get the number of elements in an array or to access a child path, use
@@ -1944,7 +1958,6 @@ func Get(json, path string) Result {
}
}
}
-
var i int
var c = &parseContext{json: json}
if len(path) >= 2 && path[0] == '.' && path[1] == '.' {
@@ -2169,11 +2182,6 @@ func parseAny(json string, i int, hit bool) (int, Result, bool) {
return i, res, false
}
-var ( // used for testing
- testWatchForFallback bool
- testLastWasFallback bool
-)
-
// GetMany searches json for the multiple paths.
// The return value is a Result array where the number of items
// will be equal to the number of input paths.
@@ -2374,6 +2382,12 @@ func validnumber(data []byte, i int) (outi int, ok bool) {
// sign
if data[i] == '-' {
i++
+ if i == len(data) {
+ return i, false
+ }
+ if data[i] < '0' || data[i] > '9' {
+ return i, false
+ }
}
// int
if i == len(data) {
@@ -2524,25 +2538,14 @@ func parseInt(s string) (n int64, ok bool) {
return n, true
}
-const minUint53 = 0
-const maxUint53 = 4503599627370495
-const minInt53 = -2251799813685248
-const maxInt53 = 2251799813685247
-
-func floatToUint(f float64) (n uint64, ok bool) {
- n = uint64(f)
- if float64(n) == f && n >= minUint53 && n <= maxUint53 {
- return n, true
- }
- return 0, false
-}
-
-func floatToInt(f float64) (n int64, ok bool) {
- n = int64(f)
- if float64(n) == f && n >= minInt53 && n <= maxInt53 {
- return n, true
+// safeInt validates a given JSON number
+// ensures it lies within the minimum and maximum representable JSON numbers
+func safeInt(f float64) (n int64, ok bool) {
+ // https://tc39.es/ecma262/#sec-number.min_safe_integer || https://tc39.es/ecma262/#sec-number.max_safe_integer
+ if f < -9007199254740991 || f > 9007199254740991 {
+ return 0, false
}
- return 0, false
+ return int64(f), true
}
// execModifier parses the path to find a matching modifier function.
@@ -2600,7 +2603,7 @@ func execModifier(json, path string) (pathOut, res string, ok bool) {
// unwrap removes the '[]' or '{}' characters around json
func unwrap(json string) string {
json = trim(json)
- if len(json) >= 2 && json[0] == '[' || json[0] == '{' {
+ if len(json) >= 2 && (json[0] == '[' || json[0] == '{') {
json = json[1 : len(json)-1]
}
return json
@@ -2632,6 +2635,26 @@ func ModifierExists(name string, fn func(json, arg string) string) bool {
return ok
}
+// cleanWS remove any non-whitespace from string
+func cleanWS(s string) string {
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case ' ', '\t', '\n', '\r':
+ continue
+ default:
+ var s2 []byte
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case ' ', '\t', '\n', '\r':
+ s2 = append(s2, s[i])
+ }
+ }
+ return string(s2)
+ }
+ }
+ return s
+}
+
// @pretty modifier makes the json look nice.
func modPretty(json, arg string) string {
if len(arg) > 0 {
@@ -2641,9 +2664,9 @@ func modPretty(json, arg string) string {
case "sortKeys":
opts.SortKeys = value.Bool()
case "indent":
- opts.Indent = value.String()
+ opts.Indent = cleanWS(value.String())
case "prefix":
- opts.Prefix = value.String()
+ opts.Prefix = cleanWS(value.String())
case "width":
opts.Width = int(value.Int())
}
@@ -2729,19 +2752,24 @@ func modFlatten(json, arg string) string {
out = append(out, '[')
var idx int
res.ForEach(func(_, value Result) bool {
- if idx > 0 {
- out = append(out, ',')
- }
+ var raw string
if value.IsArray() {
if deep {
- out = append(out, unwrap(modFlatten(value.Raw, arg))...)
+ raw = unwrap(modFlatten(value.Raw, arg))
} else {
- out = append(out, unwrap(value.Raw)...)
+ raw = unwrap(value.Raw)
}
} else {
- out = append(out, value.Raw...)
+ raw = value.Raw
+ }
+ raw = strings.TrimSpace(raw)
+ if len(raw) > 0 {
+ if idx > 0 {
+ out = append(out, ',')
+ }
+ out = append(out, raw...)
+ idx++
}
- idx++
return true
})
out = append(out, ']')
@@ -2825,6 +2853,19 @@ func modValid(json, arg string) string {
return json
}
+// stringHeader instead of reflect.StringHeader
+type stringHeader struct {
+ data unsafe.Pointer
+ len int
+}
+
+// sliceHeader instead of reflect.SliceHeader
+type sliceHeader struct {
+ data unsafe.Pointer
+ len int
+ cap int
+}
+
// getBytes casts the input json bytes to a string and safely returns the
// results as uniquely allocated data. This operation is intended to minimize
// copies and allocations for the large json string->[]byte.
@@ -2834,14 +2875,14 @@ func getBytes(json []byte, path string) Result {
// unsafe cast to string
result = Get(*(*string)(unsafe.Pointer(&json)), path)
// safely get the string headers
- rawhi := *(*reflect.StringHeader)(unsafe.Pointer(&result.Raw))
- strhi := *(*reflect.StringHeader)(unsafe.Pointer(&result.Str))
+ rawhi := *(*stringHeader)(unsafe.Pointer(&result.Raw))
+ strhi := *(*stringHeader)(unsafe.Pointer(&result.Str))
// create byte slice headers
- rawh := reflect.SliceHeader{Data: rawhi.Data, Len: rawhi.Len}
- strh := reflect.SliceHeader{Data: strhi.Data, Len: strhi.Len}
- if strh.Data == 0 {
+ rawh := sliceHeader{data: rawhi.data, len: rawhi.len, cap: rawhi.len}
+ strh := sliceHeader{data: strhi.data, len: strhi.len, cap: rawhi.len}
+ if strh.data == nil {
// str is nil
- if rawh.Data == 0 {
+ if rawh.data == nil {
// raw is nil
result.Raw = ""
} else {
@@ -2849,19 +2890,20 @@ func getBytes(json []byte, path string) Result {
result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh)))
}
result.Str = ""
- } else if rawh.Data == 0 {
+ } else if rawh.data == nil {
// raw is nil
result.Raw = ""
// str has data, safely copy the slice header to a string
result.Str = string(*(*[]byte)(unsafe.Pointer(&strh)))
- } else if strh.Data >= rawh.Data &&
- int(strh.Data)+strh.Len <= int(rawh.Data)+rawh.Len {
+ } else if uintptr(strh.data) >= uintptr(rawh.data) &&
+ uintptr(strh.data)+uintptr(strh.len) <=
+ uintptr(rawh.data)+uintptr(rawh.len) {
// Str is a substring of Raw.
- start := int(strh.Data - rawh.Data)
+ start := uintptr(strh.data) - uintptr(rawh.data)
// safely copy the raw slice header
result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh)))
// substring the raw
- result.Str = result.Raw[start : start+strh.Len]
+ result.Str = result.Raw[start : start+uintptr(strh.len)]
} else {
// safely copy both the raw and str slice headers to strings
result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh)))
@@ -2876,9 +2918,9 @@ func getBytes(json []byte, path string) Result {
// used instead.
func fillIndex(json string, c *parseContext) {
if len(c.value.Raw) > 0 && !c.calcd {
- jhdr := *(*reflect.StringHeader)(unsafe.Pointer(&json))
- rhdr := *(*reflect.StringHeader)(unsafe.Pointer(&(c.value.Raw)))
- c.value.Index = int(rhdr.Data - jhdr.Data)
+ jhdr := *(*stringHeader)(unsafe.Pointer(&json))
+ rhdr := *(*stringHeader)(unsafe.Pointer(&(c.value.Raw)))
+ c.value.Index = int(uintptr(rhdr.data) - uintptr(jhdr.data))
if c.value.Index < 0 || c.value.Index >= len(json) {
c.value.Index = 0
}
@@ -2886,10 +2928,10 @@ func fillIndex(json string, c *parseContext) {
}
func stringBytes(s string) []byte {
- return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
- Data: (*reflect.StringHeader)(unsafe.Pointer(&s)).Data,
- Len: len(s),
- Cap: len(s),
+ return *(*[]byte)(unsafe.Pointer(&sliceHeader{
+ data: (*stringHeader)(unsafe.Pointer(&s)).data,
+ len: len(s),
+ cap: len(s),
}))
}
diff --git a/vendor/github.com/tidwall/gjson/go.mod b/vendor/github.com/tidwall/gjson/go.mod
index c287095fc52..9853a309604 100644
--- a/vendor/github.com/tidwall/gjson/go.mod
+++ b/vendor/github.com/tidwall/gjson/go.mod
@@ -3,6 +3,6 @@ module github.com/tidwall/gjson
go 1.12
require (
- github.com/tidwall/match v1.0.1
- github.com/tidwall/pretty v1.0.2
+ github.com/tidwall/match v1.0.3
+ github.com/tidwall/pretty v1.1.0
)
diff --git a/vendor/github.com/tidwall/gjson/go.sum b/vendor/github.com/tidwall/gjson/go.sum
index 75e1dd7b723..739d13276f0 100644
--- a/vendor/github.com/tidwall/gjson/go.sum
+++ b/vendor/github.com/tidwall/gjson/go.sum
@@ -1,4 +1,4 @@
-github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
-github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
-github.com/tidwall/pretty v1.0.2 h1:Z7S3cePv9Jwm1KwS0513MRaoUe3S01WPbLNV40pwWZU=
-github.com/tidwall/pretty v1.0.2/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE=
+github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
+github.com/tidwall/pretty v1.1.0 h1:K3hMW5epkdAVwibsQEfR/7Zj0Qgt4DxtNumTq/VloO8=
+github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
diff --git a/vendor/github.com/tidwall/match/README.md b/vendor/github.com/tidwall/match/README.md
index 2aa5bc38ba1..5fdd4cf63db 100644
--- a/vendor/github.com/tidwall/match/README.md
+++ b/vendor/github.com/tidwall/match/README.md
@@ -1,20 +1,17 @@
-Match
-=====
-
-
+# Match
+
+[![GoDoc](https://godoc.org/github.com/tidwall/match?status.svg)](https://godoc.org/github.com/tidwall/match)
Match is a very simple pattern matcher where '*' matches on any
number characters and '?' matches on any one character.
-Installing
-----------
+## Installing
```
go get -u github.com/tidwall/match
```
-Example
--------
+## Example
```go
match.Match("hello", "*llo")
@@ -23,10 +20,10 @@ match.Match("hello", "h*o")
```
-Contact
--------
+## Contact
+
Josh Baker [@tidwall](http://twitter.com/tidwall)
-License
--------
+## License
+
Redcon source code is available under the MIT [License](/LICENSE).
diff --git a/vendor/github.com/tidwall/match/go.mod b/vendor/github.com/tidwall/match/go.mod
new file mode 100644
index 00000000000..df19b5f773b
--- /dev/null
+++ b/vendor/github.com/tidwall/match/go.mod
@@ -0,0 +1,3 @@
+module github.com/tidwall/match
+
+go 1.15
diff --git a/vendor/github.com/tidwall/match/match.go b/vendor/github.com/tidwall/match/match.go
index fcfe998b5b4..57aabf42402 100644
--- a/vendor/github.com/tidwall/match/match.go
+++ b/vendor/github.com/tidwall/match/match.go
@@ -1,4 +1,4 @@
-// Match provides a simple pattern matcher with unicode support.
+// Package match provides a simple pattern matcher with unicode support.
package match
import "unicode/utf8"
@@ -6,7 +6,7 @@ import "unicode/utf8"
// Match returns true if str matches pattern. This is a very
// simple wildcard match where '*' matches on any number characters
// and '?' matches on any one character.
-
+//
// pattern:
// { term }
// term:
@@ -16,12 +16,16 @@ import "unicode/utf8"
// '\\' c matches character c
//
func Match(str, pattern string) bool {
- if pattern == "*" {
- return true
- }
return deepMatch(str, pattern)
}
+
func deepMatch(str, pattern string) bool {
+ if pattern == "*" {
+ return true
+ }
+ for len(pattern) > 1 && pattern[0] == '*' && pattern[1] == '*' {
+ pattern = pattern[1:]
+ }
for len(pattern) > 0 {
if pattern[0] > 0x7f {
return deepMatchRune(str, pattern)
@@ -52,6 +56,13 @@ func deepMatch(str, pattern string) bool {
}
func deepMatchRune(str, pattern string) bool {
+ if pattern == "*" {
+ return true
+ }
+ for len(pattern) > 1 && pattern[0] == '*' && pattern[1] == '*' {
+ pattern = pattern[1:]
+ }
+
var sr, pr rune
var srsz, prsz int
diff --git a/vendor/github.com/tidwall/pretty/README.md b/vendor/github.com/tidwall/pretty/README.md
index 09884692e1c..7a614223687 100644
--- a/vendor/github.com/tidwall/pretty/README.md
+++ b/vendor/github.com/tidwall/pretty/README.md
@@ -1,8 +1,6 @@
# Pretty
-[![Build Status](https://img.shields.io/travis/tidwall/pretty.svg?style=flat-square)](https://travis-ci.org/tidwall/prettty)
-[![Coverage Status](https://img.shields.io/badge/coverage-100%25-brightgreen.svg?style=flat-square)](http://gocover.io/github.com/tidwall/pretty)
-[![GoDoc](https://img.shields.io/badge/api-reference-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/tidwall/pretty)
+[![GoDoc](https://img.shields.io/badge/api-reference-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/tidwall/pretty)
Pretty is a Go package that provides [fast](#performance) methods for formatting JSON for human readability, or to compact JSON for smaller payloads.
@@ -81,6 +79,45 @@ Will format the json to:
{"name":{"first":"Tom","last":"Anderson"},"age":37,"children":["Sara","Alex","Jack"],"fav.movie":"Deer Hunter","friends":[{"first":"Janet","last":"Murphy","age":44}]}```
```
+## Spec
+
+Spec cleans comments and trailing commas from input JSON, converting it to
+valid JSON per the official spec: https://tools.ietf.org/html/rfc8259
+
+The resulting JSON will always be the same length as the input and it will
+include all of the same line breaks at matching offsets. This is to ensure
+the result can be later processed by a external parser and that that
+parser will report messages or errors with the correct offsets.
+
+The following example uses a JSON document that has comments and trailing
+commas and converts it prior to unmarshalling to using the standard Go
+JSON library.
+
+```go
+
+data := `
+{
+ /* Dev Machine */
+ "dbInfo": {
+ "host": "localhost",
+ "port": 5432, // use full email address
+ "username": "josh",
+ "password": "pass123", // use a hashed password
+ }
+ /* Only SMTP Allowed */
+ "emailInfo": {
+ "email": "josh@example.com",
+ "password": "pass123",
+ "smtp": "smpt.example.com",
+ }
+}
+`
+
+err := json.Unmarshal(pretty.Spec(data), &config)
+
+```
+
+
## Customized output
diff --git a/vendor/github.com/tidwall/pretty/pretty.go b/vendor/github.com/tidwall/pretty/pretty.go
index 2951c610eaa..7fa9d639750 100644
--- a/vendor/github.com/tidwall/pretty/pretty.go
+++ b/vendor/github.com/tidwall/pretty/pretty.go
@@ -118,21 +118,27 @@ type pair struct {
vstart, vend int
}
-type byKey struct {
+type byKeyVal struct {
sorted bool
json []byte
pairs []pair
}
-func (arr *byKey) Len() int {
+func (arr *byKeyVal) Len() int {
return len(arr.pairs)
}
-func (arr *byKey) Less(i, j int) bool {
+func (arr *byKeyVal) Less(i, j int) bool {
key1 := arr.json[arr.pairs[i].kstart+1 : arr.pairs[i].kend-1]
key2 := arr.json[arr.pairs[j].kstart+1 : arr.pairs[j].kend-1]
- return string(key1) < string(key2)
+ if string(key1) < string(key2) {
+ return true
+ }
+ if string(key1) > string(key2) {
+ return false
+ }
+ return arr.pairs[i].vstart < arr.pairs[j].vstart
}
-func (arr *byKey) Swap(i, j int) {
+func (arr *byKeyVal) Swap(i, j int) {
arr.pairs[i], arr.pairs[j] = arr.pairs[j], arr.pairs[i]
arr.sorted = true
}
@@ -174,7 +180,11 @@ func appendPrettyObject(buf, json []byte, i int, open, close byte, pretty bool,
}
if n > 0 {
nl = len(buf)
- buf = append(buf, '\n')
+ if buf[nl-1] == ' ' {
+ buf[nl-1] = '\n'
+ } else {
+ buf = append(buf, '\n')
+ }
}
if buf[len(buf)-1] != open {
buf = appendTabs(buf, prefix, indent, tabs)
@@ -193,7 +203,11 @@ func appendPrettyObject(buf, json []byte, i int, open, close byte, pretty bool,
var p pair
if pretty {
nl = len(buf)
- buf = append(buf, '\n')
+ if buf[nl-1] == ' ' {
+ buf[nl-1] = '\n'
+ } else {
+ buf = append(buf, '\n')
+ }
if open == '{' && sortkeys {
p.kstart = i
p.vstart = len(buf)
@@ -235,8 +249,8 @@ func sortPairs(json, buf []byte, pairs []pair) []byte {
}
vstart := pairs[0].vstart
vend := pairs[len(pairs)-1].vend
- arr := byKey{false, json, pairs}
- sort.Sort(&arr)
+ arr := byKeyVal{false, json, pairs}
+ sort.Stable(&arr)
if !arr.sorted {
return buf
}
@@ -305,6 +319,7 @@ func appendTabs(buf []byte, prefix, indent string, tabs int) []byte {
type Style struct {
Key, String, Number [2]string
True, False, Null [2]string
+ Escape [2]string
Append func(dst []byte, c byte) []byte
}
@@ -328,6 +343,7 @@ func init() {
True: [2]string{"\x1B[96m", "\x1B[0m"},
False: [2]string{"\x1B[96m", "\x1B[0m"},
Null: [2]string{"\x1B[91m", "\x1B[0m"},
+ Escape: [2]string{"\x1B[35m", "\x1B[0m"},
Append: func(dst []byte, c byte) []byte {
if c < ' ' && (c != '\r' && c != '\n' && c != '\t' && c != '\v') {
dst = append(dst, "\\u00"...)
@@ -367,8 +383,39 @@ func Color(src []byte, style *Style) []byte {
dst = append(dst, style.String[0]...)
}
dst = apnd(dst, '"')
+ esc := false
+ uesc := 0
for i = i + 1; i < len(src); i++ {
- dst = apnd(dst, src[i])
+ if src[i] == '\\' {
+ if key {
+ dst = append(dst, style.Key[1]...)
+ } else {
+ dst = append(dst, style.String[1]...)
+ }
+ dst = append(dst, style.Escape[0]...)
+ dst = apnd(dst, src[i])
+ esc = true
+ if i+1 < len(src) && src[i+1] == 'u' {
+ uesc = 5
+ } else {
+ uesc = 1
+ }
+ } else if esc {
+ dst = apnd(dst, src[i])
+ if uesc == 1 {
+ esc = false
+ dst = append(dst, style.Escape[1]...)
+ if key {
+ dst = append(dst, style.Key[0]...)
+ } else {
+ dst = append(dst, style.String[0]...)
+ }
+ } else {
+ uesc--
+ }
+ } else {
+ dst = apnd(dst, src[i])
+ }
if src[i] == '"' {
j := i - 1
for ; ; j-- {
@@ -381,7 +428,9 @@ func Color(src []byte, style *Style) []byte {
}
}
}
- if key {
+ if esc {
+ dst = append(dst, style.Escape[1]...)
+ } else if key {
dst = append(dst, style.Key[1]...)
} else {
dst = append(dst, style.String[1]...)
@@ -434,3 +483,90 @@ func Color(src []byte, style *Style) []byte {
}
return dst
}
+
+// Spec strips out comments and trailing commas and convert the input to a
+// valid JSON per the official spec: https://tools.ietf.org/html/rfc8259
+//
+// The resulting JSON will always be the same length as the input and it will
+// include all of the same line breaks at matching offsets. This is to ensure
+// the result can be later processed by a external parser and that that
+// parser will report messages or errors with the correct offsets.
+func Spec(src []byte) []byte {
+ return spec(src, nil)
+}
+
+// SpecInPlace is the same as Spec, but this method reuses the input json
+// buffer to avoid allocations. Do not use the original bytes slice upon return.
+func SpecInPlace(src []byte) []byte {
+ return spec(src, src)
+}
+
+func spec(src, dst []byte) []byte {
+ dst = dst[:0]
+ for i := 0; i < len(src); i++ {
+ if src[i] == '/' {
+ if i < len(src)-1 {
+ if src[i+1] == '/' {
+ dst = append(dst, ' ', ' ')
+ i += 2
+ for ; i < len(src); i++ {
+ if src[i] == '\n' {
+ dst = append(dst, '\n')
+ break
+ } else if src[i] == '\t' || src[i] == '\r' {
+ dst = append(dst, src[i])
+ } else {
+ dst = append(dst, ' ')
+ }
+ }
+ continue
+ }
+ if src[i+1] == '*' {
+ dst = append(dst, ' ', ' ')
+ i += 2
+ for ; i < len(src)-1; i++ {
+ if src[i] == '*' && src[i+1] == '/' {
+ dst = append(dst, ' ', ' ')
+ i++
+ break
+ } else if src[i] == '\n' || src[i] == '\t' ||
+ src[i] == '\r' {
+ dst = append(dst, src[i])
+ } else {
+ dst = append(dst, ' ')
+ }
+ }
+ continue
+ }
+ }
+ }
+ dst = append(dst, src[i])
+ if src[i] == '"' {
+ for i = i + 1; i < len(src); i++ {
+ dst = append(dst, src[i])
+ if src[i] == '"' {
+ j := i - 1
+ for ; ; j-- {
+ if src[j] != '\\' {
+ break
+ }
+ }
+ if (j-i)%2 != 0 {
+ break
+ }
+ }
+ }
+ } else if src[i] == '}' || src[i] == ']' {
+ for j := len(dst) - 2; j >= 0; j-- {
+ if dst[j] <= ' ' {
+ continue
+ }
+ if dst[j] == ',' {
+ dst[j] = ' '
+ }
+ break
+ }
+ }
+ }
+ return dst
+}
diff --git a/vendor/gopkg.in/yaml.v3/LICENSE b/vendor/gopkg.in/yaml.v3/LICENSE
new file mode 100644
index 00000000000..2683e4bb1f2
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/LICENSE
@@ -0,0 +1,50 @@
+
+This project is covered by two different licenses: MIT and Apache.
+
+#### MIT License ####
+
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original MIT license, with the additional
+copyright staring in 2011 when the project was ported over:
+
+ apic.go emitterc.go parserc.go readerc.go scannerc.go
+ writerc.go yamlh.go yamlprivateh.go
+
+Copyright (c) 2006-2010 Kirill Simonov
+Copyright (c) 2006-2011 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+### Apache License ###
+
+All the remaining project files are covered by the Apache license:
+
+Copyright (c) 2011-2019 Canonical Ltd
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v3/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE
new file mode 100644
index 00000000000..866d74a7ad7
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/NOTICE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v3/README.md b/vendor/gopkg.in/yaml.v3/README.md
new file mode 100644
index 00000000000..08eb1babddf
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/README.md
@@ -0,0 +1,150 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.2, but preserves some behavior
+from 1.1 for backwards compatibility.
+
+Specifically, as of v3 of the yaml package:
+
+ - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being
+ decoded into a typed bool value. Otherwise they behave as a string. Booleans
+ in YAML 1.2 are _true/false_ only.
+ - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_
+ as specified in YAML 1.2, because most parsers still use the old format.
+ Octals in the _0o777_ format are supported though, so new files work.
+ - Does not support base-60 floats. These are gone from YAML 1.2, and were
+ actually never supported by this package as it's clearly a poor choice.
+
+and offers backwards
+compatibility with YAML 1.1 in some cases.
+1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v3*.
+
+To install it, run:
+
+ go get gopkg.in/yaml.v3
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+ - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3)
+
+API stability
+-------------
+
+The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the MIT and Apache License 2.0 licenses.
+Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "gopkg.in/yaml.v3"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
+type T struct {
+ A string
+ B struct {
+ RenamedC int `yaml:"c"`
+ D []int `yaml:",flow"`
+ }
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go
new file mode 100644
index 00000000000..65846e67497
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/apic.go
@@ -0,0 +1,746 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "io"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_reader.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_reader_read_handler
+ parser.input_reader = r
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ }
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_writer.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_writer_write_handler
+ emitter.output_writer = w
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(
+ event *yaml_event_t,
+ version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t,
+ implicit bool,
+) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+}
+
+// Create ALIAS.
+func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool {
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ anchor: anchor,
+ }
+ return true
+}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go
new file mode 100644
index 00000000000..be63169b719
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/decode.go
@@ -0,0 +1,931 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *Node
+ anchors map[string]*Node
+ doneInit bool
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+ yaml_parser_set_input_string(&p.parser, b)
+ return &p
+}
+
+func newParserFromReader(r io.Reader) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ yaml_parser_set_input_reader(&p.parser, r)
+ return &p
+}
+
+func (p *parser) init() {
+ if p.doneInit {
+ return
+ }
+ p.anchors = make(map[string]*Node)
+ p.expect(yaml_STREAM_START_EVENT)
+ p.doneInit = true
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+ if p.event.typ == yaml_NO_EVENT {
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ }
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ if p.event.typ != e {
+ p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+ p.fail()
+ }
+ yaml_event_delete(&p.event)
+ p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+ if p.event.typ != yaml_NO_EVENT {
+ return p.event.typ
+ }
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ return p.event.typ
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
+ } else if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *Node, anchor []byte) {
+ if anchor != nil {
+ n.Anchor = string(anchor)
+ p.anchors[n.Anchor] = n
+ }
+}
+
+func (p *parser) parse() *Node {
+ p.init()
+ switch p.peek() {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ case yaml_TAIL_COMMENT_EVENT:
+ panic("internal error: unexpected tail comment event (please report)")
+ default:
+ panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String())
+ }
+}
+
+func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node {
+ var style Style
+ if tag != "" && tag != "!" {
+ tag = shortTag(tag)
+ style = TaggedStyle
+ } else if defaultTag != "" {
+ tag = defaultTag
+ } else if kind == ScalarNode {
+ tag, _ = resolve("", value)
+ }
+ return &Node{
+ Kind: kind,
+ Tag: tag,
+ Value: value,
+ Style: style,
+ Line: p.event.start_mark.line + 1,
+ Column: p.event.start_mark.column + 1,
+ HeadComment: string(p.event.head_comment),
+ LineComment: string(p.event.line_comment),
+ FootComment: string(p.event.foot_comment),
+ }
+}
+
+func (p *parser) parseChild(parent *Node) *Node {
+ child := p.parse()
+ parent.Content = append(parent.Content, child)
+ return child
+}
+
+func (p *parser) document() *Node {
+ n := p.node(DocumentNode, "", "", "")
+ p.doc = n
+ p.expect(yaml_DOCUMENT_START_EVENT)
+ p.parseChild(n)
+ if p.peek() == yaml_DOCUMENT_END_EVENT {
+ n.FootComment = string(p.event.foot_comment)
+ }
+ p.expect(yaml_DOCUMENT_END_EVENT)
+ return n
+}
+
+func (p *parser) alias() *Node {
+ n := p.node(AliasNode, "", "", string(p.event.anchor))
+ n.Alias = p.anchors[n.Value]
+ if n.Alias == nil {
+ failf("unknown anchor '%s' referenced", n.Value)
+ }
+ p.expect(yaml_ALIAS_EVENT)
+ return n
+}
+
+func (p *parser) scalar() *Node {
+ var parsedStyle = p.event.scalar_style()
+ var nodeStyle Style
+ switch {
+ case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0:
+ nodeStyle = DoubleQuotedStyle
+ case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0:
+ nodeStyle = SingleQuotedStyle
+ case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0:
+ nodeStyle = LiteralStyle
+ case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0:
+ nodeStyle = FoldedStyle
+ }
+ var nodeValue = string(p.event.value)
+ var nodeTag = string(p.event.tag)
+ var defaultTag string
+ if nodeStyle == 0 {
+ if nodeValue == "<<" {
+ defaultTag = mergeTag
+ }
+ } else {
+ defaultTag = strTag
+ }
+ n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue)
+ n.Style |= nodeStyle
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SCALAR_EVENT)
+ return n
+}
+
+func (p *parser) sequence() *Node {
+ n := p.node(SequenceNode, seqTag, string(p.event.tag), "")
+ if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 {
+ n.Style |= FlowStyle
+ }
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SEQUENCE_START_EVENT)
+ for p.peek() != yaml_SEQUENCE_END_EVENT {
+ p.parseChild(n)
+ }
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ p.expect(yaml_SEQUENCE_END_EVENT)
+ return n
+}
+
+func (p *parser) mapping() *Node {
+ n := p.node(MappingNode, mapTag, string(p.event.tag), "")
+ block := true
+ if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 {
+ block = false
+ n.Style |= FlowStyle
+ }
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_MAPPING_START_EVENT)
+ for p.peek() != yaml_MAPPING_END_EVENT {
+ k := p.parseChild(n)
+ if block && k.FootComment != "" {
+ // Must be a foot comment for the prior value when being dedented.
+ if len(n.Content) > 2 {
+ n.Content[len(n.Content)-3].FootComment = k.FootComment
+ k.FootComment = ""
+ }
+ }
+ v := p.parseChild(n)
+ if k.FootComment == "" && v.FootComment != "" {
+ k.FootComment = v.FootComment
+ v.FootComment = ""
+ }
+ if p.peek() == yaml_TAIL_COMMENT_EVENT {
+ if k.FootComment == "" {
+ k.FootComment = string(p.event.foot_comment)
+ }
+ p.expect(yaml_TAIL_COMMENT_EVENT)
+ }
+ }
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 {
+ n.Content[len(n.Content)-2].FootComment = n.FootComment
+ n.FootComment = ""
+ }
+ p.expect(yaml_MAPPING_END_EVENT)
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *Node
+ aliases map[*Node]bool
+ terrors []string
+
+ stringMapType reflect.Type
+ generalMapType reflect.Type
+
+ knownFields bool
+ uniqueKeys bool
+ decodeCount int
+ aliasCount int
+ aliasDepth int
+}
+
+var (
+ nodeType = reflect.TypeOf(Node{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ stringMapType = reflect.TypeOf(map[string]interface{}{})
+ generalMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ ifaceType = generalMapType.Elem()
+ timeType = reflect.TypeOf(time.Time{})
+ ptrTimeType = reflect.TypeOf(&time.Time{})
+)
+
+func newDecoder() *decoder {
+ d := &decoder{
+ stringMapType: stringMapType,
+ generalMapType: generalMapType,
+ uniqueKeys: true,
+ }
+ d.aliases = make(map[*Node]bool)
+ return d
+}
+
+func (d *decoder) terror(n *Node, tag string, out reflect.Value) {
+ if n.Tag != "" {
+ tag = n.Tag
+ }
+ value := n.Value
+ if tag != seqTag && tag != mapTag {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) {
+ err := u.UnmarshalYAML(n)
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) {
+ terrlen := len(d.terrors)
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
+ defer handleErr(&err)
+ d.unmarshal(n, reflect.ValueOf(v))
+ if len(d.terrors) > terrlen {
+ issues := d.terrors[terrlen:]
+ d.terrors = d.terrors[:terrlen]
+ return &TypeError{issues}
+ }
+ return nil
+ })
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+ if n.ShortTag() == nullTag {
+ return out, false, false
+ }
+ again := true
+ for again {
+ again = false
+ if out.Kind() == reflect.Ptr {
+ if out.IsNil() {
+ out.Set(reflect.New(out.Type().Elem()))
+ }
+ out = out.Elem()
+ again = true
+ }
+ if out.CanAddr() {
+ outi := out.Addr().Interface()
+ if u, ok := outi.(Unmarshaler); ok {
+ good = d.callUnmarshaler(n, u)
+ return out, true, good
+ }
+ if u, ok := outi.(obsoleteUnmarshaler); ok {
+ good = d.callObsoleteUnmarshaler(n, u)
+ return out, true, good
+ }
+ }
+ }
+ return out, false, false
+}
+
+func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) {
+ if n.ShortTag() == nullTag {
+ return reflect.Value{}
+ }
+ for _, num := range index {
+ for {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ continue
+ }
+ break
+ }
+ v = v.Field(num)
+ }
+ return v
+}
+
+const (
+ // 400,000 decode operations is ~500kb of dense object declarations, or
+ // ~5kb of dense object declarations with 10000% alias expansion
+ alias_ratio_range_low = 400000
+
+ // 4,000,000 decode operations is ~5MB of dense object declarations, or
+ // ~4.5MB of dense object declarations with 10% alias expansion
+ alias_ratio_range_high = 4000000
+
+ // alias_ratio_range is the range over which we scale allowed alias ratios
+ alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
+)
+
+func allowedAliasRatio(decodeCount int) float64 {
+ switch {
+ case decodeCount <= alias_ratio_range_low:
+ // allow 99% to come from alias expansion for small-to-medium documents
+ return 0.99
+ case decodeCount >= alias_ratio_range_high:
+ // allow 10% to come from alias expansion for very large documents
+ return 0.10
+ default:
+ // scale smoothly from 99% down to 10% over the range.
+ // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
+ // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
+ return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
+ }
+}
+
+func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) {
+ d.decodeCount++
+ if d.aliasDepth > 0 {
+ d.aliasCount++
+ }
+ if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
+ failf("document contains excessive aliasing")
+ }
+ if out.Type() == nodeType {
+ out.Set(reflect.ValueOf(n).Elem())
+ return true
+ }
+ switch n.Kind {
+ case DocumentNode:
+ return d.document(n, out)
+ case AliasNode:
+ return d.alias(n, out)
+ }
+ out, unmarshaled, good := d.prepare(n, out)
+ if unmarshaled {
+ return good
+ }
+ switch n.Kind {
+ case ScalarNode:
+ good = d.scalar(n, out)
+ case MappingNode:
+ good = d.mapping(n, out)
+ case SequenceNode:
+ good = d.sequence(n, out)
+ default:
+ panic("internal error: unknown node kind: " + strconv.Itoa(int(n.Kind)))
+ }
+ return good
+}
+
+func (d *decoder) document(n *Node, out reflect.Value) (good bool) {
+ if len(n.Content) == 1 {
+ d.doc = n
+ d.unmarshal(n.Content[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *Node, out reflect.Value) (good bool) {
+ if d.aliases[n] {
+ // TODO this could actually be allowed in some circumstances.
+ failf("anchor '%s' value contains itself", n.Value)
+ }
+ d.aliases[n] = true
+ d.aliasDepth++
+ good = d.unmarshal(n.Alias, out)
+ d.aliasDepth--
+ delete(d.aliases, n)
+ return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+ for _, k := range out.MapKeys() {
+ out.SetMapIndex(k, zeroValue)
+ }
+}
+
+func (d *decoder) scalar(n *Node, out reflect.Value) bool {
+ var tag string
+ var resolved interface{}
+ if n.indicatedString() {
+ tag = strTag
+ resolved = n.Value
+ } else {
+ tag, resolved = resolve(n.Tag, n.Value)
+ if tag == binaryTag {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ failf("!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ if out.CanAddr() {
+ switch out.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ out.Set(reflect.Zero(out.Type()))
+ return true
+ }
+ }
+ return false
+ }
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ // We've resolved to exactly the type we want, so use that.
+ out.Set(resolvedv)
+ return true
+ }
+ // Perhaps we can use the value as a TextUnmarshaler to
+ // set its value.
+ if out.CanAddr() {
+ u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+ if ok {
+ var text []byte
+ if tag == binaryTag {
+ text = []byte(resolved.(string))
+ } else {
+ // We let any value be unmarshaled into TextUnmarshaler.
+ // That might be more lax than we'd like, but the
+ // TextUnmarshaler itself should bowl out any dubious values.
+ text = []byte(n.Value)
+ }
+ err := u.UnmarshalText(text)
+ if err != nil {
+ fail(err)
+ }
+ return true
+ }
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if tag == binaryTag {
+ out.SetString(resolved.(string))
+ return true
+ }
+ out.SetString(n.Value)
+ return true
+ case reflect.Interface:
+ out.Set(reflect.ValueOf(resolved))
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ // This used to work in v2, but it's very unfriendly.
+ isDuration := out.Type() == durationType
+
+ switch resolved := resolved.(type) {
+ case int:
+ if !isDuration && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case int64:
+ if !isDuration && !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ return true
+ }
+ case uint64:
+ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case float64:
+ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ return true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case int64:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case uint64:
+ if !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case float64:
+ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ return true
+ case string:
+ // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html).
+ // It only works if explicitly attempting to unmarshal into a typed bool value.
+ switch resolved {
+ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON":
+ out.SetBool(true)
+ return true
+ case "n", "N", "no", "No", "NO", "off", "Off", "OFF":
+ out.SetBool(false)
+ return true
+ }
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ return true
+ case int64:
+ out.SetFloat(float64(resolved))
+ return true
+ case uint64:
+ out.SetFloat(float64(resolved))
+ return true
+ case float64:
+ out.SetFloat(resolved)
+ return true
+ }
+ case reflect.Struct:
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ out.Set(resolvedv)
+ return true
+ }
+ case reflect.Ptr:
+ panic("yaml internal error: please report the issue")
+ }
+ d.terror(n, tag, out)
+ return false
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) {
+ l := len(n.Content)
+
+ var iface reflect.Value
+ switch out.Kind() {
+ case reflect.Slice:
+ out.Set(reflect.MakeSlice(out.Type(), l, l))
+ case reflect.Array:
+ if l != out.Len() {
+ failf("invalid array: want %d elements but got %d", out.Len(), l)
+ }
+ case reflect.Interface:
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, l))
+ default:
+ d.terror(n, seqTag, out)
+ return false
+ }
+ et := out.Type().Elem()
+
+ j := 0
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.Content[i], e); ok {
+ out.Index(j).Set(e)
+ j++
+ }
+ }
+ if out.Kind() != reflect.Array {
+ out.Set(out.Slice(0, j))
+ }
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
+ l := len(n.Content)
+ if d.uniqueKeys {
+ nerrs := len(d.terrors)
+ for i := 0; i < l; i += 2 {
+ ni := n.Content[i]
+ for j := i + 2; j < l; j += 2 {
+ nj := n.Content[j]
+ if ni.Kind == nj.Kind && ni.Value == nj.Value {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line))
+ }
+ }
+ }
+ if len(d.terrors) > nerrs {
+ return false
+ }
+ }
+ switch out.Kind() {
+ case reflect.Struct:
+ return d.mappingStruct(n, out)
+ case reflect.Map:
+ // okay
+ case reflect.Interface:
+ iface := out
+ if isStringMap(n) {
+ out = reflect.MakeMap(d.stringMapType)
+ } else {
+ out = reflect.MakeMap(d.generalMapType)
+ }
+ iface.Set(out)
+ default:
+ d.terror(n, mapTag, out)
+ return false
+ }
+
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ stringMapType := d.stringMapType
+ generalMapType := d.generalMapType
+ if outt.Elem() == ifaceType {
+ if outt.Key().Kind() == reflect.String {
+ d.stringMapType = outt
+ } else if outt.Key() == ifaceType {
+ d.generalMapType = outt
+ }
+ }
+
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ }
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.Content[i]) {
+ d.merge(n.Content[i+1], out)
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.Content[i], k) {
+ kkind := k.Kind()
+ if kkind == reflect.Interface {
+ kkind = k.Elem().Kind()
+ }
+ if kkind == reflect.Map || kkind == reflect.Slice {
+ failf("invalid map key: %#v", k.Interface())
+ }
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.Content[i+1], e) {
+ out.SetMapIndex(k, e)
+ }
+ }
+ }
+ d.stringMapType = stringMapType
+ d.generalMapType = generalMapType
+ return true
+}
+
+func isStringMap(n *Node) bool {
+ if n.Kind != MappingNode {
+ return false
+ }
+ l := len(n.Content)
+ for i := 0; i < l; i += 2 {
+ if n.Content[i].ShortTag() != strTag {
+ return false
+ }
+ }
+ return true
+}
+
+func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+
+ var inlineMap reflect.Value
+ var elemType reflect.Type
+ if sinfo.InlineMap != -1 {
+ inlineMap = out.Field(sinfo.InlineMap)
+ inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+ elemType = inlineMap.Type().Elem()
+ }
+
+ for _, index := range sinfo.InlineUnmarshalers {
+ field := d.fieldByIndex(n, out, index)
+ d.prepare(n, field)
+ }
+
+ var doneFields []bool
+ if d.uniqueKeys {
+ doneFields = make([]bool, len(sinfo.FieldsList))
+ }
+ name := settableValueOf("")
+ l := len(n.Content)
+ for i := 0; i < l; i += 2 {
+ ni := n.Content[i]
+ if isMerge(ni) {
+ d.merge(n.Content[i+1], out)
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ if d.uniqueKeys {
+ if doneFields[info.Id] {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type()))
+ continue
+ }
+ doneFields[info.Id] = true
+ }
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = d.fieldByIndex(n, out, info.Inline)
+ }
+ d.unmarshal(n.Content[i+1], field)
+ } else if sinfo.InlineMap != -1 {
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+ value := reflect.New(elemType).Elem()
+ d.unmarshal(n.Content[i+1], value)
+ inlineMap.SetMapIndex(name, value)
+ } else if d.knownFields {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type()))
+ }
+ }
+ return true
+}
+
+func failWantMap() {
+ failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *Node, out reflect.Value) {
+ switch n.Kind {
+ case MappingNode:
+ d.unmarshal(n, out)
+ case AliasNode:
+ if n.Alias != nil && n.Alias.Kind != MappingNode {
+ failWantMap()
+ }
+ d.unmarshal(n, out)
+ case SequenceNode:
+ // Step backwards as earlier nodes take precedence.
+ for i := len(n.Content) - 1; i >= 0; i-- {
+ ni := n.Content[i]
+ if ni.Kind == AliasNode {
+ if ni.Alias != nil && ni.Alias.Kind != MappingNode {
+ failWantMap()
+ }
+ } else if ni.Kind != MappingNode {
+ failWantMap()
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ failWantMap()
+ }
+}
+
+func isMerge(n *Node) bool {
+ return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag)
+}
diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go
new file mode 100644
index 00000000000..ab2a066194c
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/emitterc.go
@@ -0,0 +1,1992 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ if emitter.column == 0 {
+ emitter.space_above = true
+ }
+ emitter.column = 0
+ emitter.line++
+ // [Go] Do this here and below and drop from everywhere else (see commented lines).
+ emitter.indention = true
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ if emitter.column == 0 {
+ emitter.space_above = true
+ }
+ emitter.column = 0
+ emitter.line++
+ // [Go] Do this here and above and drop from everywhere else (see commented lines).
+ emitter.indention = true
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ emitter.indent += emitter.best_indent
+ // [Go] If inside a block sequence item, discount the space taken by the indicator.
+ if emitter.best_indent > 2 && emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE {
+ emitter.indent -= 2
+ }
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false)
+
+ case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false)
+
+ case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+ emitter.space_above = true
+ emitter.foot_indent = -1
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical || true {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if len(emitter.head_comment) > 0 {
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_emit_node(emitter, event, true, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ // [Go] Force document foot separation.
+ emitter.foot_indent = 0
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.foot_indent = -1
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ if emitter.canonical && !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.column == 0 || emitter.canonical && !first {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if emitter.column == 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE)
+ } else {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ }
+ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+ return false
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+
+ if emitter.column == 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE)
+ } else {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ }
+ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+ return false
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ // [Go] The original logic here would not indent the sequence when inside a mapping.
+ // In Go we always indent it, but take the sequence indicator out of the indentation.
+ indentless := emitter.best_indent == 2 && emitter.mapping_context && (emitter.column == 0 || !emitter.indention)
+ original := emitter.indent
+ if !yaml_emitter_increase_indent(emitter, false, indentless) {
+ return false
+ }
+ if emitter.indent > original+2 {
+ emitter.indent -= 2
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
+ }
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an anchor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Write a head comment.
+func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.tail_comment) {
+ return false
+ }
+ emitter.tail_comment = emitter.tail_comment[:0]
+ emitter.foot_indent = emitter.indent
+ if emitter.foot_indent < 0 {
+ emitter.foot_indent = 0
+ }
+ }
+
+ if len(emitter.head_comment) == 0 {
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.head_comment) {
+ return false
+ }
+ emitter.head_comment = emitter.head_comment[:0]
+ return true
+}
+
+// Write an line comment.
+func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.line_comment) == 0 {
+ return true
+ }
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.line_comment) {
+ return false
+ }
+ emitter.line_comment = emitter.line_comment[:0]
+ return true
+}
+
+// Write a foot comment.
+func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.foot_comment) == 0 {
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.foot_comment) {
+ return false
+ }
+ emitter.foot_comment = emitter.foot_comment[:0]
+ emitter.foot_indent = emitter.indent
+ if emitter.foot_indent < 0 {
+ emitter.foot_indent = 0
+ }
+ return true
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+ tab_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[i])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if value[i] == '\t' {
+ tab_characters = true
+ } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || tab_characters || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ if len(event.head_comment) > 0 {
+ emitter.head_comment = event.head_comment
+ }
+ if len(event.line_comment) > 0 {
+ emitter.line_comment = event.line_comment
+ }
+ if len(event.foot_comment) > 0 {
+ emitter.foot_comment = event.foot_comment
+ }
+ if len(event.tail_comment) > 0 {
+ emitter.tail_comment = event.tail_comment
+ }
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if emitter.foot_indent == indent {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ //emitter.indention = true
+ emitter.space_above = false
+ emitter.foot_indent = -1
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+ if !put(emitter, '%') {
+ return false
+ }
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if len(value) > 0 && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ if len(value) > 0 {
+ emitter.whitespace = false
+ }
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ //emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+
+ if !put_break(emitter) {
+ return false
+ }
+ //emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool {
+ breaks := false
+ pound := false
+ for i := 0; i < len(comment); {
+ if is_break(comment, i) {
+ if !write_break(emitter, comment, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ pound = false
+ } else {
+ if breaks && !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !pound {
+ if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) {
+ return false
+ }
+ pound = true
+ }
+ if !write(emitter, comment, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ if !breaks && !put_break(emitter) {
+ return false
+ }
+
+ emitter.whitespace = true
+ //emitter.indention = true
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go
new file mode 100644
index 00000000000..1f37271ce45
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/encode.go
@@ -0,0 +1,561 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+ indent int
+ doneInit bool
+}
+
+func newEncoder() *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func newEncoderWithWriter(w io.Writer) *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_writer(&e.emitter, w)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func (e *encoder) init() {
+ if e.doneInit {
+ return
+ }
+ if e.indent == 0 {
+ e.indent = 4
+ }
+ e.emitter.best_indent = e.indent
+ yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
+ e.emit()
+ e.doneInit = true
+}
+
+func (e *encoder) finish() {
+ e.emitter.open_ended = false
+ yaml_stream_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ e.must(yaml_emitter_emit(&e.emitter, &e.event))
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "unknown problem generating YAML content"
+ }
+ failf("%s", msg)
+ }
+}
+
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+ e.init()
+ var node *Node
+ if in.IsValid() {
+ node, _ = in.Interface().(*Node)
+ }
+ if node != nil && node.Kind == DocumentNode {
+ e.nodev(in)
+ } else {
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.emit()
+ e.marshal(tag, in)
+ yaml_document_end_event_initialize(&e.event, true)
+ e.emit()
+ }
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ tag = shortTag(tag)
+ if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
+ e.nilv()
+ return
+ }
+ iface := in.Interface()
+ switch value := iface.(type) {
+ case *Node:
+ e.nodev(in)
+ return
+ case time.Time:
+ e.timev(tag, in)
+ return
+ case *time.Time:
+ e.timev(tag, in.Elem())
+ return
+ case time.Duration:
+ e.stringv(tag, reflect.ValueOf(value.String()))
+ return
+ case Marshaler:
+ v, err := value.MarshalYAML()
+ if err != nil {
+ fail(err)
+ }
+ if v == nil {
+ e.nilv()
+ return
+ }
+ e.marshal(tag, reflect.ValueOf(v))
+ return
+ case encoding.TextMarshaler:
+ text, err := value.MarshalText()
+ if err != nil {
+ fail(err)
+ }
+ in = reflect.ValueOf(string(text))
+ case nil:
+ e.nilv()
+ return
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ e.marshal(tag, in.Elem())
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ e.marshal(tag, in.Elem())
+ case reflect.Struct:
+ e.structv(tag, in)
+ case reflect.Slice, reflect.Array:
+ e.slicev(tag, in)
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ e.intv(tag, in)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("cannot marshal type: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) {
+ for _, num := range index {
+ for {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return reflect.Value{}
+ }
+ v = v.Elem()
+ continue
+ }
+ break
+ }
+ v = v.Field(num)
+ }
+ return v
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = e.fieldByIndex(in, info.Inline)
+ if !value.IsValid() {
+ continue
+ }
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ if sinfo.InlineMap >= 0 {
+ m := in.Field(sinfo.InlineMap)
+ if m.Len() > 0 {
+ e.flow = false
+ keys := keyList(m.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ if _, found := sinfo.FieldsMap[k.String()]; found {
+ panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String()))
+ }
+ e.marshal("", k)
+ e.flow = false
+ e.marshal("", m.MapIndex(k))
+ }
+ }
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+ e.emit()
+ f()
+ yaml_mapping_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+ // Fast path.
+ if s == "" {
+ return false
+ }
+ c := s[0]
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+ return false
+ }
+ // Do the full match.
+ return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+// isOldBool returns whether s is bool notation as defined in YAML 1.1.
+//
+// We continue to force strings that YAML 1.1 would interpret as booleans to be
+// rendered as quotes strings so that the marshalled output valid for YAML 1.1
+// parsing.
+func isOldBool(s string) (result bool) {
+ switch s {
+ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON",
+ "n", "N", "no", "No", "NO", "off", "Off", "OFF":
+ return true
+ default:
+ return false
+ }
+}
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ canUsePlain := true
+ switch {
+ case !utf8.ValidString(s):
+ if tag == binaryTag {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if tag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = binaryTag
+ s = encodeBase64(s)
+ case tag == "":
+ // Check to see if it would resolve to a specific
+ // tag when encoded unquoted. If it doesn't,
+ // there's no need to quote it.
+ rtag, _ := resolve("", s)
+ canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s))
+ }
+ // Note: it's possible for user code to emit invalid YAML
+ // if they explicitly specify a tag and a string containing
+ // text that's incompatible with that tag.
+ switch {
+ case strings.Contains(s, "\n"):
+ if e.flow {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ } else {
+ style = yaml_LITERAL_SCALAR_STYLE
+ }
+ case canUsePlain:
+ style = yaml_PLAIN_SCALAR_STYLE
+ default:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style, nil, nil, nil, nil)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) timev(tag string, in reflect.Value) {
+ t := in.Interface().(time.Time)
+ s := t.Format(time.RFC3339Nano)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // Issue #352: When formatting, use the precision of the underlying value
+ precision := 64
+ if in.Kind() == reflect.Float32 {
+ precision = 32
+ }
+
+ s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) {
+ // TODO Kill this function. Replace all initialize calls by their underlining Go literals.
+ implicit := tag == ""
+ if !implicit {
+ tag = longTag(tag)
+ }
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.event.head_comment = head
+ e.event.line_comment = line
+ e.event.foot_comment = foot
+ e.event.tail_comment = tail
+ e.emit()
+}
+
+func (e *encoder) nodev(in reflect.Value) {
+ e.node(in.Interface().(*Node), "")
+}
+
+func (e *encoder) node(node *Node, tail string) {
+ // If the tag was not explicitly requested, and dropping it won't change the
+ // implicit tag of the value, don't include it in the presentation.
+ var tag = node.Tag
+ var stag = shortTag(tag)
+ var rtag string
+ var forceQuoting bool
+ if tag != "" && node.Style&TaggedStyle == 0 {
+ if node.Kind == ScalarNode {
+ if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 {
+ tag = ""
+ } else {
+ rtag, _ = resolve("", node.Value)
+ if rtag == stag {
+ tag = ""
+ } else if stag == strTag {
+ tag = ""
+ forceQuoting = true
+ }
+ }
+ } else {
+ switch node.Kind {
+ case MappingNode:
+ rtag = mapTag
+ case SequenceNode:
+ rtag = seqTag
+ }
+ if rtag == stag {
+ tag = ""
+ }
+ }
+ }
+
+ switch node.Kind {
+ case DocumentNode:
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+ for _, node := range node.Content {
+ e.node(node, "")
+ }
+ yaml_document_end_event_initialize(&e.event, true)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case SequenceNode:
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if node.Style&FlowStyle != 0 {
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style))
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+ for _, node := range node.Content {
+ e.node(node, "")
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case MappingNode:
+ style := yaml_BLOCK_MAPPING_STYLE
+ if node.Style&FlowStyle != 0 {
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style)
+ e.event.tail_comment = []byte(tail)
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+
+ // The tail logic below moves the foot comment of prior keys to the following key,
+ // since the value for each key may be a nested structure and the foot needs to be
+ // processed only the entirety of the value is streamed. The last tail is processed
+ // with the mapping end event.
+ var tail string
+ for i := 0; i+1 < len(node.Content); i += 2 {
+ k := node.Content[i]
+ foot := k.FootComment
+ if foot != "" {
+ kopy := *k
+ kopy.FootComment = ""
+ k = &kopy
+ }
+ e.node(k, tail)
+ tail = foot
+
+ v := node.Content[i+1]
+ e.node(v, "")
+ }
+
+ yaml_mapping_end_event_initialize(&e.event)
+ e.event.tail_comment = []byte(tail)
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case AliasNode:
+ yaml_alias_event_initialize(&e.event, []byte(node.Value))
+ e.event.head_comment = []byte(node.HeadComment)
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case ScalarNode:
+ value := node.Value
+ if !utf8.ValidString(value) {
+ if tag == binaryTag {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if tag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = binaryTag
+ value = encodeBase64(value)
+ }
+
+ style := yaml_PLAIN_SCALAR_STYLE
+ switch {
+ case node.Style&DoubleQuotedStyle != 0:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ case node.Style&SingleQuotedStyle != 0:
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ case node.Style&LiteralStyle != 0:
+ style = yaml_LITERAL_SCALAR_STYLE
+ case node.Style&FoldedStyle != 0:
+ style = yaml_FOLDED_SCALAR_STYLE
+ case strings.Contains(value, "\n"):
+ style = yaml_LITERAL_SCALAR_STYLE
+ case forceQuoting:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail))
+ }
+}
diff --git a/vendor/gopkg.in/yaml.v3/go.mod b/vendor/gopkg.in/yaml.v3/go.mod
new file mode 100644
index 00000000000..f407ea3213e
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/go.mod
@@ -0,0 +1,5 @@
+module "gopkg.in/yaml.v3"
+
+require (
+ "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
+)
diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go
new file mode 100644
index 00000000000..aea9050b833
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/parserc.go
@@ -0,0 +1,1229 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ token := &parser.tokens[parser.tokens_head]
+ yaml_parser_unfold_comments(parser, token)
+ return token
+ }
+ return nil
+}
+
+// yaml_parser_unfold_comments walks through the comments queue and joins all
+// comments behind the position of the provided token into the respective
+// top-level comment slices in the parser.
+func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) {
+ for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index {
+ comment := &parser.comments[parser.comments_head]
+ if len(comment.head) > 0 {
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ // No heads on ends, so keep comment.head for a follow up token.
+ break
+ }
+ if len(parser.head_comment) > 0 {
+ parser.head_comment = append(parser.head_comment, '\n')
+ }
+ parser.head_comment = append(parser.head_comment, comment.head...)
+ }
+ if len(comment.foot) > 0 {
+ if len(parser.foot_comment) > 0 {
+ parser.foot_comment = append(parser.foot_comment, '\n')
+ }
+ parser.foot_comment = append(parser.foot_comment, comment.foot...)
+ }
+ if len(comment.line) > 0 {
+ if len(parser.line_comment) > 0 {
+ parser.line_comment = append(parser.line_comment, '\n')
+ }
+ parser.line_comment = append(parser.line_comment, comment.line...)
+ }
+ *comment = yaml_comment_t{}
+ parser.comments_head++
+ }
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ var head_comment []byte
+ if len(parser.head_comment) > 0 {
+ // [Go] Scan the header comment backwards, and if an empty line is found, break
+ // the header so the part before the last empty line goes into the
+ // document header, while the bottom of it goes into a follow up event.
+ for i := len(parser.head_comment) - 1; i > 0; i-- {
+ if parser.head_comment[i] == '\n' {
+ if i == len(parser.head_comment)-1 {
+ head_comment = parser.head_comment[:i]
+ parser.head_comment = parser.head_comment[i+1:]
+ break
+ } else if parser.head_comment[i-1] == '\n' {
+ head_comment = parser.head_comment[:i-1]
+ parser.head_comment = parser.head_comment[i+1:]
+ break
+ }
+ }
+ }
+ }
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+
+ head_comment: head_comment,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected ", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *************
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ if len(event.head_comment) > 0 && len(event.foot_comment) == 0 {
+ event.foot_comment = event.head_comment
+ event.head_comment = nil
+ }
+ return true
+}
+
+func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) {
+ event.head_comment = parser.head_comment
+ event.line_comment = parser.line_comment
+ event.foot_comment = parser.foot_comment
+ parser.head_comment = nil
+ parser.line_comment = nil
+ parser.foot_comment = nil
+ parser.tail_comment = nil
+ parser.stem_comment = nil
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+// block_node ::= ALIAS
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+// flow_node ::= ALIAS
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// *************************
+// block_content ::= block_collection | flow_collection | SCALAR
+// ******
+// flow_content ::= flow_collection | SCALAR
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ if parser.stem_comment != nil {
+ event.head_comment = parser.stem_comment
+ parser.stem_comment = nil
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// ******************** *********** * *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ prior_head := len(parser.head_comment)
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if prior_head > 0 && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ // [Go] It's a sequence under a sequence entry, so the former head comment
+ // is for the list itself, not the first list item under it.
+ parser.stem_comment = parser.head_comment[:prior_head]
+ if len(parser.head_comment) == prior_head {
+ parser.head_comment = nil
+ } else {
+ // Copy suffix to prevent very strange bugs if someone ever appends
+ // further bytes to the prefix in the stem_comment slice above.
+ parser.head_comment = append([]byte(nil), parser.head_comment[prior_head+1:]...)
+ }
+
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // [Go] A tail comment was left from the prior mapping value processed. Emit an event
+ // as it needs to be processed with that value and not the following key.
+ if len(parser.tail_comment) > 0 {
+ *event = yaml_event_t{
+ typ: yaml_TAIL_COMMENT_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ foot_comment: parser.tail_comment,
+ }
+ parser.tail_comment = nil
+ return true
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+
+ skip_token(parser)
+ return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go
new file mode 100644
index 00000000000..b7de0a89c46
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/readerc.go
@@ -0,0 +1,434 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // [Go] This function was changed to guarantee the requested length size at EOF.
+ // The fact we need to do this is pretty awful, but the description above implies
+ // for that to be the case, and there are tests
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ // [Go] ACTUALLY! Read the documentation of this function above.
+ // This is just broken. To return true, we need to have the
+ // given length in the buffer. Not doing that means every single
+ // check that calls this function to make sure the buffer has a
+ // given length is Go) panicking; or C) accessing invalid memory.
+ //return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ low, high = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ buffer_len += 1
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ buffer_len += 2
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ buffer_len += 3
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ buffer_len += 4
+ }
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ // [Go] Read the documentation of this function above. To return true,
+ // we need to have the given length in the buffer. Not doing that means
+ // every single check that calls this function to make sure the buffer
+ // has a given length is Go) panicking; or C) accessing invalid memory.
+ // This happens here due to the EOF above breaking early.
+ for buffer_len < length {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go
new file mode 100644
index 00000000000..64ae888057a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/resolve.go
@@ -0,0 +1,326 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, boolTag, []string{"true", "True", "TRUE"}},
+ {false, boolTag, []string{"false", "False", "FALSE"}},
+ {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", mergeTag, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const (
+ nullTag = "!!null"
+ boolTag = "!!bool"
+ strTag = "!!str"
+ intTag = "!!int"
+ floatTag = "!!float"
+ timestampTag = "!!timestamp"
+ seqTag = "!!seq"
+ mapTag = "!!map"
+ binaryTag = "!!binary"
+ mergeTag = "!!merge"
+)
+
+var longTags = make(map[string]string)
+var shortTags = make(map[string]string)
+
+func init() {
+ for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} {
+ ltag := longTag(stag)
+ longTags[stag] = ltag
+ shortTags[ltag] = stag
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ if strings.HasPrefix(tag, longTagPrefix) {
+ if stag, ok := shortTags[tag]; ok {
+ return stag
+ }
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ if ltag, ok := longTags[tag]; ok {
+ return ltag
+ }
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag:
+ return true
+ }
+ return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ tag = shortTag(tag)
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, strTag, binaryTag:
+ return
+ case floatTag:
+ if rtag == intTag {
+ switch v := out.(type) {
+ case int64:
+ rtag = floatTag
+ out = float64(v)
+ return
+ case int:
+ rtag = floatTag
+ out = float64(v)
+ return
+ }
+ }
+ }
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != strTag && tag != binaryTag {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return floatTag, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ // Only try values as a timestamp if the value is unquoted or there's an explicit
+ // !!timestamp tag.
+ if tag == "" || tag == timestampTag {
+ t, ok := parseTimestamp(in)
+ if ok {
+ return timestampTag, t
+ }
+ }
+
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain, 0, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ if yamlStyleFloat.MatchString(plain) {
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return floatTag, floatv
+ }
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ }
+ // Octals as introduced in version 1.2 of the spec.
+ // Octals from the 1.1 spec, spelled as 0777, are still
+ // decoded by default in v3 as well for compatibility.
+ // May be dropped in v4 depending on how usage evolves.
+ if strings.HasPrefix(plain, "0o") {
+ intv, err := strconv.ParseInt(plain[2:], 8, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 8, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0o") {
+ intv, err := strconv.ParseInt("-"+plain[3:], 8, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ }
+ default:
+ panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ return strTag, in
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+ "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+ "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+ "2006-1-2 15:4:5.999999999", // space separated with no time zone
+ "2006-1-2", // date only
+ // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+ // from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+ // TODO write code to check all the formats supported by
+ // http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+ // Quick check: all date formats start with YYYY-.
+ i := 0
+ for ; i < len(s); i++ {
+ if c := s[i]; c < '0' || c > '9' {
+ break
+ }
+ }
+ if i != 4 || i == len(s) || s[i] != '-' {
+ return time.Time{}, false
+ }
+ for _, format := range allowedTimestampFormats {
+ if t, err := time.Parse(format, s); err == nil {
+ return t, true
+ }
+ }
+ return time.Time{}, false
+}
diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go
new file mode 100644
index 00000000000..57e954ca53d
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/scannerc.go
@@ -0,0 +1,3025 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ parser.newlines = 0
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ parser.newlines++
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ parser.newlines++
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ parser.newlines = 0
+ }
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.newlines++
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ // [Go] The comment parsing logic requires a lookahead of two tokens
+ // so that foot comments may be parsed in time of associating them
+ // with the tokens that are parsed before them, and also for line
+ // comments to be transformed into head comments in some edge cases.
+ if parser.tokens_head < len(parser.tokens)-2 {
+ // If a potential simple key is at the head position, we need to fetch
+ // the next token to disambiguate it.
+ head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
+ if !ok {
+ break
+ } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
+ return false
+ } else if !valid {
+ break
+ }
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ scan_mark := parser.mark
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // [Go] While unrolling indents, transform the head comments of prior
+ // indentation levels observed after scan_start into foot comments at
+ // the respective indexes.
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ comment_mark := parser.mark
+ if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') {
+ // Associate any following comments with the prior token.
+ comment_mark = parser.tokens[len(parser.tokens)-1].start_mark
+ }
+ defer func() {
+ if !ok {
+ return
+ }
+ if !yaml_parser_scan_line_comment(parser, comment_mark) {
+ ok = false
+ return
+ }
+ }()
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] TODO Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
+ if !simple_key.possible {
+ return false, true
+ }
+
+ // The 1.2 specification says:
+ //
+ // "If the ? indicator is omitted, parsing needs to see past the
+ // implicit key to recognize it as such. To limit the amount of
+ // lookahead required, the “:” indicator must appear at most 1024
+ // Unicode characters beyond the start of the key. In addition, the key
+ // is restricted to a single line."
+ //
+ if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return false, yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ return false, true
+ }
+ return true, true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ }
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
+ }
+ return true
+}
+
+// max_flow_level limits the flow_level
+const max_flow_level = 10000
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
+ possible: false,
+ required: false,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ })
+
+ // Increase the flow level.
+ parser.flow_level++
+ if parser.flow_level > max_flow_level {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_flow_level))
+ }
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ last := len(parser.simple_keys) - 1
+ delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
+ parser.simple_keys = parser.simple_keys[:last]
+ }
+ return true
+}
+
+// max_indents limits the indents stack size
+const max_indents = 10000
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+ if len(parser.indents) > max_indents {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_indents))
+ }
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ block_mark := scan_mark
+ block_mark.index--
+
+ // Loop through the indentation levels in the stack.
+ for parser.indent > column {
+
+ // [Go] Reposition the end token before potential following
+ // foot comments of parent blocks. For that, search
+ // backwards for recent comments that were at the same
+ // indent as the block that is ending now.
+ stop_index := block_mark.index
+ for i := len(parser.comments) - 1; i >= 0; i-- {
+ comment := &parser.comments[i]
+
+ if comment.end_mark.index < stop_index {
+ // Don't go back beyond the start of the comment/whitespace scan, unless column < 0.
+ // If requested indent column is < 0, then the document is over and everything else
+ // is a foot anyway.
+ break
+ }
+ if comment.start_mark.column == parser.indent+1 {
+ // This is a good match. But maybe there's a former comment
+ // at that same indent level, so keep searching.
+ block_mark = comment.start_mark
+ }
+
+ // While the end of the former comment matches with
+ // the start of the following one, we know there's
+ // nothing in between and scanning is still safe.
+ stop_index = comment.scan_mark.index
+ }
+
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: block_mark,
+ end_mark: block_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ parser.simple_keys_by_tok = make(map[int]int)
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
+ return false
+
+ } else if valid {
+
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+ delete(parser.simple_keys_by_tok, simple_key.token_number)
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ scan_mark := parser.mark
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if we just had a line comment under a sequence entry that
+ // looks more like a header to the following content. Similar to this:
+ //
+ // - # The comment
+ // - Some data
+ //
+ // If so, transform the line comment to a head comment and reposition.
+ if len(parser.comments) > 0 && len(parser.tokens) > 1 {
+ tokenA := parser.tokens[len(parser.tokens)-2]
+ tokenB := parser.tokens[len(parser.tokens)-1]
+ comment := &parser.comments[len(parser.comments)-1]
+ if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) {
+ // If it was in the prior line, reposition so it becomes a
+ // header of the follow up token. Otherwise, keep it in place
+ // so it becomes a header of the former.
+ comment.head = comment.line
+ comment.line = nil
+ if comment.start_mark.line == parser.mark.line-1 {
+ comment.token_mark = parser.mark
+ }
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ if !yaml_parser_scan_comments(parser, scan_mark) {
+ return false
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ // [Go] Discard this inline comment for the time being.
+ //if !yaml_parser_scan_line_comment(parser, start_mark) {
+ // return false
+ //}
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && string(s) != "!" {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+ hasTag := len(head) > 0
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] TODO Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ hasTag = true
+ }
+
+ if !hasTag {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the indentation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+
+ // Get the indentation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ // TODO Test this and then re-enable it.
+ //if !yaml_parser_scan_line_comment(parser, start_mark) {
+ // return false
+ //}
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the indentation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following indentation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar. Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the indentation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the indentation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the indentation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an indentation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab characters that abuse indentation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violates indentation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check indentation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
+
+func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool {
+ if parser.newlines > 0 {
+ return true
+ }
+
+ var start_mark yaml_mark_t
+ var text []byte
+
+ for peek := 0; peek < 512; peek++ {
+ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+ break
+ }
+ if is_blank(parser.buffer, parser.buffer_pos+peek) {
+ continue
+ }
+ if parser.buffer[parser.buffer_pos+peek] == '#' {
+ seen := parser.mark.index+peek
+ for {
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_breakz(parser.buffer, parser.buffer_pos) {
+ if parser.mark.index >= seen {
+ break
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ } else {
+ if parser.mark.index >= seen {
+ if len(text) == 0 {
+ start_mark = parser.mark
+ }
+ text = append(text, parser.buffer[parser.buffer_pos])
+ }
+ skip(parser)
+ }
+ }
+ }
+ break
+ }
+ if len(text) > 0 {
+ parser.comments = append(parser.comments, yaml_comment_t{
+ token_mark: token_mark,
+ start_mark: start_mark,
+ line: text,
+ })
+ }
+ return true
+}
+
+func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool {
+ token := parser.tokens[len(parser.tokens)-1]
+
+ if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 {
+ token = parser.tokens[len(parser.tokens)-2]
+ }
+
+ var token_mark = token.start_mark
+ var start_mark yaml_mark_t
+
+ var recent_empty = false
+ var first_empty = parser.newlines <= 1
+
+ var line = parser.mark.line
+ var column = parser.mark.column
+
+ var text []byte
+
+ // The foot line is the place where a comment must start to
+ // still be considered as a foot of the prior content.
+ // If there's some content in the currently parsed line, then
+ // the foot is the line below it.
+ var foot_line = -1
+ if scan_mark.line > 0 {
+ foot_line = parser.mark.line-parser.newlines+1
+ if parser.newlines == 0 && parser.mark.column > 1 {
+ foot_line++
+ }
+ }
+
+ var peek = 0
+ for ; peek < 512; peek++ {
+ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+ break
+ }
+ column++
+ if is_blank(parser.buffer, parser.buffer_pos+peek) {
+ continue
+ }
+ c := parser.buffer[parser.buffer_pos+peek]
+ if is_breakz(parser.buffer, parser.buffer_pos+peek) || parser.flow_level > 0 && (c == ']' || c == '}') {
+ // Got line break or terminator.
+ if !recent_empty {
+ if first_empty && (start_mark.line == foot_line || start_mark.column-1 < parser.indent) {
+ // This is the first empty line and there were no empty lines before,
+ // so this initial part of the comment is a foot of the prior token
+ // instead of being a head for the following one. Split it up.
+ if len(text) > 0 {
+ if start_mark.column-1 < parser.indent {
+ // If dedented it's unrelated to the prior token.
+ token_mark = start_mark
+ }
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: token_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
+ foot: text,
+ })
+ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ token_mark = scan_mark
+ text = nil
+ }
+ } else {
+ if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 {
+ text = append(text, '\n')
+ }
+ }
+ }
+ if !is_break(parser.buffer, parser.buffer_pos+peek) {
+ break
+ }
+ first_empty = false
+ recent_empty = true
+ column = 0
+ line++
+ continue
+ }
+
+ if len(text) > 0 && column < parser.indent+1 && column != start_mark.column {
+ // The comment at the different indentation is a foot of the
+ // preceding data rather than a head of the upcoming one.
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: token_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
+ foot: text,
+ })
+ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ token_mark = scan_mark
+ text = nil
+ }
+
+ if parser.buffer[parser.buffer_pos+peek] != '#' {
+ break
+ }
+
+ if len(text) == 0 {
+ start_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ } else {
+ text = append(text, '\n')
+ }
+
+ recent_empty = false
+
+ // Consume until after the consumed comment line.
+ seen := parser.mark.index+peek
+ for {
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_breakz(parser.buffer, parser.buffer_pos) {
+ if parser.mark.index >= seen {
+ break
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ } else {
+ if parser.mark.index >= seen {
+ text = append(text, parser.buffer[parser.buffer_pos])
+ }
+ skip(parser)
+ }
+ }
+
+ peek = 0
+ column = 0
+ line = parser.mark.line
+ }
+
+ if len(text) > 0 {
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: start_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column},
+ head: text,
+ })
+ }
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go
new file mode 100644
index 00000000000..9210ece7e97
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/sorter.go
@@ -0,0 +1,134 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ digits := false
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ digits = unicode.IsDigit(ar[i])
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ if digits {
+ return al
+ } else {
+ return bl
+ }
+ }
+ var ai, bi int
+ var an, bn int64
+ if ar[i] == '0' || br[i] == '0' {
+ for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+ if ar[j] != '0' {
+ an = 1
+ bn = 1
+ break
+ }
+ }
+ }
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go
new file mode 100644
index 00000000000..b8a116bf9a2
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/writerc.go
@@ -0,0 +1,48 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go
new file mode 100644
index 00000000000..b5d35a50ded
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/yaml.go
@@ -0,0 +1,662 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "sync"
+ "unicode/utf8"
+)
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document.
+type Unmarshaler interface {
+ UnmarshalYAML(value *Node) error
+}
+
+type obsoleteUnmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, false)
+}
+
+// A Decorder reads and decodes YAML values from an input stream.
+type Decoder struct {
+ parser *parser
+ knownFields bool
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ parser: newParserFromReader(r),
+ }
+}
+
+// KnownFields ensures that the keys in decoded mappings to
+// exist as fields in the struct being decoded into.
+func (dec *Decoder) KnownFields(enable bool) {
+ dec.knownFields = enable
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+ d := newDecoder()
+ d.knownFields = dec.knownFields
+ defer handleErr(&err)
+ node := dec.parser.parse()
+ if node == nil {
+ return io.EOF
+ }
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(node, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Decode decodes the node and stores its data into the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (n *Node) Decode(v interface{}) (err error) {
+ d := newDecoder()
+ defer handleErr(&err)
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(n, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+ defer handleErr(&err)
+ d := newDecoder()
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[][,[,]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Zero valued structs will be omitted if all their public
+// fields are zero, unless they implement an IsZero
+// method (see the IsZeroer interface type), in which
+// case the field will be included if that method returns true.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+ encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ encoder: newEncoderWithWriter(w),
+ }
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e.encoder.marshalDoc("", reflect.ValueOf(v))
+ return nil
+}
+
+// SetIndent changes the used indentation used when encoding.
+func (e *Encoder) SetIndent(spaces int) {
+ if spaces < 0 {
+ panic("yaml: cannot indent to a negative number of spaces")
+ }
+ e.encoder.indent = spaces
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+ defer handleErr(&err)
+ e.encoder.finish()
+ return nil
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+type Kind uint32
+
+const (
+ DocumentNode Kind = 1 << iota
+ SequenceNode
+ MappingNode
+ ScalarNode
+ AliasNode
+)
+
+type Style uint32
+
+const (
+ TaggedStyle Style = 1 << iota
+ DoubleQuotedStyle
+ SingleQuotedStyle
+ LiteralStyle
+ FoldedStyle
+ FlowStyle
+)
+
+// Node represents an element in the YAML document hierarchy. While documents
+// are typically encoded and decoded into higher level types, such as structs
+// and maps, Node is an intermediate representation that allows detailed
+// control over the content being decoded or encoded.
+//
+// Values that make use of the Node type interact with the yaml package in the
+// same way any other type would do, by encoding and decoding yaml data
+// directly or indirectly into them.
+//
+// For example:
+//
+// var person struct {
+// Name string
+// Address yaml.Node
+// }
+// err := yaml.Unmarshal(data, &person)
+//
+// Or by itself:
+//
+// var person Node
+// err := yaml.Unmarshal(data, &person)
+//
+type Node struct {
+ // Kind defines whether the node is a document, a mapping, a sequence,
+ // a scalar value, or an alias to another node. The specific data type of
+ // scalar nodes may be obtained via the ShortTag and LongTag methods.
+ Kind Kind
+
+ // Style allows customizing the apperance of the node in the tree.
+ Style Style
+
+ // Tag holds the YAML tag defining the data type for the value.
+ // When decoding, this field will always be set to the resolved tag,
+ // even when it wasn't explicitly provided in the YAML content.
+ // When encoding, if this field is unset the value type will be
+ // implied from the node properties, and if it is set, it will only
+ // be serialized into the representation if TaggedStyle is used or
+ // the implicit tag diverges from the provided one.
+ Tag string
+
+ // Value holds the unescaped and unquoted represenation of the value.
+ Value string
+
+ // Anchor holds the anchor name for this node, which allows aliases to point to it.
+ Anchor string
+
+ // Alias holds the node that this alias points to. Only valid when Kind is AliasNode.
+ Alias *Node
+
+ // Content holds contained nodes for documents, mappings, and sequences.
+ Content []*Node
+
+ // HeadComment holds any comments in the lines preceding the node and
+ // not separated by an empty line.
+ HeadComment string
+
+ // LineComment holds any comments at the end of the line where the node is in.
+ LineComment string
+
+ // FootComment holds any comments following the node and before empty lines.
+ FootComment string
+
+ // Line and Column hold the node position in the decoded YAML text.
+ // These fields are not respected when encoding the node.
+ Line int
+ Column int
+}
+
+// LongTag returns the long form of the tag that indicates the data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) LongTag() string {
+ return longTag(n.ShortTag())
+}
+
+// ShortTag returns the short form of the YAML tag that indicates data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) ShortTag() string {
+ if n.indicatedString() {
+ return strTag
+ }
+ if n.Tag == "" || n.Tag == "!" {
+ switch n.Kind {
+ case MappingNode:
+ return mapTag
+ case SequenceNode:
+ return seqTag
+ case AliasNode:
+ if n.Alias != nil {
+ return n.Alias.ShortTag()
+ }
+ case ScalarNode:
+ tag, _ := resolve("", n.Value)
+ return tag
+ }
+ return ""
+ }
+ return shortTag(n.Tag)
+}
+
+func (n *Node) indicatedString() bool {
+ return n.Kind == ScalarNode &&
+ (shortTag(n.Tag) == strTag ||
+ (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0)
+}
+
+// SetString is a convenience function that sets the node to a string value
+// and defines its style in a pleasant way depending on its content.
+func (n *Node) SetString(s string) {
+ n.Kind = ScalarNode
+ if utf8.ValidString(s) {
+ n.Value = s
+ n.Tag = strTag
+ } else {
+ n.Value = encodeBase64(s)
+ n.Tag = binaryTag
+ }
+ if strings.Contains(n.Value, "\n") {
+ n.Style = LiteralStyle
+ }
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+
+ // InlineUnmarshalers holds indexes to inlined fields that
+ // contain unmarshaler values.
+ InlineUnmarshalers [][]int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+ // Id holds the unique field identifier, so we can cheaply
+ // check for field duplicates without maintaining an extra map.
+ Id int
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+var unmarshalerType reflect.Type
+
+func init() {
+ var v Unmarshaler
+ unmarshalerType = reflect.ValueOf(&v).Elem().Type()
+}
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ inlineUnmarshalers := [][]int(nil)
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct, reflect.Ptr:
+ ftype := field.Type
+ for ftype.Kind() == reflect.Ptr {
+ ftype = ftype.Elem()
+ }
+ if ftype.Kind() != reflect.Struct {
+ return nil, errors.New("option ,inline may only be used on a struct or map field")
+ }
+ if reflect.PtrTo(ftype).Implements(unmarshalerType) {
+ inlineUnmarshalers = append(inlineUnmarshalers, []int{i})
+ } else {
+ sinfo, err := getStructInfo(ftype)
+ if err != nil {
+ return nil, err
+ }
+ for _, index := range sinfo.InlineUnmarshalers {
+ inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...))
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ finfo.Id = len(fieldsList)
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ }
+ default:
+ return nil, errors.New("option ,inline may only be used on a struct or map field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ info.Id = len(fieldsList)
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{
+ FieldsMap: fieldsMap,
+ FieldsList: fieldsList,
+ InlineMap: inlineMap,
+ InlineUnmarshalers: inlineUnmarshalers,
+ }
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+ IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+ kind := v.Kind()
+ if z, ok := v.Interface().(IsZeroer); ok {
+ if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+ return true
+ }
+ return z.IsZero()
+ }
+ switch kind {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ for i := v.NumField() - 1; i >= 0; i-- {
+ if vt.Field(i).PkgPath != "" {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go
new file mode 100644
index 00000000000..2719cfbb0b7
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/yamlh.go
@@ -0,0 +1,805 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "fmt"
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0
+
+ yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return ""
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+ yaml_TAIL_COMMENT_EVENT
+)
+
+var eventStrings = []string{
+ yaml_NO_EVENT: "none",
+ yaml_STREAM_START_EVENT: "stream start",
+ yaml_STREAM_END_EVENT: "stream end",
+ yaml_DOCUMENT_START_EVENT: "document start",
+ yaml_DOCUMENT_END_EVENT: "document end",
+ yaml_ALIAS_EVENT: "alias",
+ yaml_SCALAR_EVENT: "scalar",
+ yaml_SEQUENCE_START_EVENT: "sequence start",
+ yaml_SEQUENCE_END_EVENT: "sequence end",
+ yaml_MAPPING_START_EVENT: "mapping start",
+ yaml_MAPPING_END_EVENT: "mapping end",
+ yaml_TAIL_COMMENT_EVENT: "tail comment",
+}
+
+func (e yaml_event_type_t) String() string {
+ if e < 0 || int(e) >= len(eventStrings) {
+ return fmt.Sprintf("unknown event %d", e)
+ }
+ return eventStrings[e]
+}
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The comments
+ head_comment []byte
+ line_comment []byte
+ foot_comment []byte
+ tail_comment []byte
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+// yaml_parser_set_input().
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return ""
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occurred.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_reader io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ newlines int // The number of line breaks since last non-break/non-blank character
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Comments
+
+ head_comment []byte // The current head comments
+ line_comment []byte // The current line comments
+ foot_comment []byte // The current foot comments
+ tail_comment []byte // Foot comment that happens at the end of a block.
+ stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc)
+
+ comments []yaml_comment_t // The folded comments for all parsed tokens
+ comments_head int
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+ simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+type yaml_comment_t struct {
+
+ scan_mark yaml_mark_t // Position where scanning for comments started
+ token_mark yaml_mark_t // Position after which tokens will be associated with this comment
+ start_mark yaml_mark_t // Position of '#' comment mark
+ end_mark yaml_mark_t // Position where comment terminated
+
+ head []byte
+ line []byte
+ foot []byte
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+// yaml_emitter_set_output().
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_writer io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ space_above bool // Is there's an empty line above?
+ foot_indent int // The indent used to write the foot comment above, or -1 if none.
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Comments
+ head_comment []byte
+ line_comment []byte
+ foot_comment []byte
+ tail_comment []byte
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go
new file mode 100644
index 00000000000..e88f9c54aec
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/yamlprivateh.go
@@ -0,0 +1,198 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return (
+ // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return (
+ // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return (
+ // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 35f4507579a..fd385eefbdd 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,4 +1,4 @@
-# github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4
+# github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c
## explicit
github.com/Azure/go-ntlmssp
# github.com/DataDog/datadog-go v0.0.0-20180330214955-e67964b4021a
@@ -8,7 +8,7 @@ github.com/DataDog/datadog-go/statsd
## explicit
github.com/GeertJohan/go.rice
github.com/GeertJohan/go.rice/embedded
-# github.com/PuerkitoBio/goquery v1.3.0
+# github.com/PuerkitoBio/goquery v1.6.1
## explicit
github.com/PuerkitoBio/goquery
# github.com/Shopify/sarama v1.16.0
@@ -19,11 +19,10 @@ github.com/Shopify/sarama
# github.com/Soontao/goHttpDigestClient v0.0.0-20170320082612-6d28bb1415c5
## explicit
github.com/Soontao/goHttpDigestClient
-# github.com/andybalholm/brotli v0.0.0-20190704151324-71eb68cc467c
+# github.com/andybalholm/brotli v1.0.1
## explicit
github.com/andybalholm/brotli
-# github.com/andybalholm/cascadia v1.0.0
-## explicit
+# github.com/andybalholm/cascadia v1.1.0
github.com/andybalholm/cascadia
# github.com/daaku/go.zipexe v0.0.0-20150329023125-a5fe2436ffcb
## explicit
@@ -77,9 +76,6 @@ github.com/gin-gonic/gin/render
## explicit
github.com/go-sourcemap/sourcemap
github.com/go-sourcemap/sourcemap/internal/base64vlq
-# github.com/golang/gddo v0.0.0-20190419222130-af0f2af80721
-github.com/golang/gddo/httputil
-github.com/golang/gddo/httputil/header
# github.com/golang/protobuf v1.4.2
## explicit
github.com/golang/protobuf/jsonpb
@@ -136,17 +132,13 @@ github.com/kardianos/osext
# github.com/kelseyhightower/envconfig v1.4.0
## explicit
github.com/kelseyhightower/envconfig
-# github.com/klauspost/compress v1.7.2
+# github.com/klauspost/compress v1.11.13
## explicit
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
github.com/klauspost/compress/snappy
github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd/internal/xxhash
-# github.com/klauspost/cpuid v1.3.1
-## explicit
-# github.com/konsorten/go-windows-terminal-sequences v1.0.3
-github.com/konsorten/go-windows-terminal-sequences
# github.com/kubernetes/helm v2.9.0+incompatible
## explicit
github.com/kubernetes/helm/pkg/strvals
@@ -157,7 +149,7 @@ github.com/labstack/echo
## explicit
github.com/labstack/gommon/color
github.com/labstack/gommon/log
-# github.com/mailru/easyjson v0.7.4-0.20200812114229-8ab5ff9cd8e4
+# github.com/mailru/easyjson v0.7.7
## explicit
github.com/mailru/easyjson
github.com/mailru/easyjson/buffer
@@ -187,7 +179,7 @@ github.com/mitchellh/mapstructure
github.com/nu7hatch/gouuid
# github.com/onsi/ginkgo v1.14.0
## explicit
-# github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2
+# github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c
## explicit
github.com/oxtoacart/bpool
# github.com/pierrec/lz4 v1.0.2-0.20171218195038-2fcda4cb7018
@@ -205,10 +197,10 @@ github.com/pmezard/go-difflib/difflib
# github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165
## explicit
github.com/rcrowley/go-metrics
-# github.com/serenize/snaker v0.0.0-20171204205717-a683aaf2d516
+# github.com/serenize/snaker v0.0.0-20201027110005-a7ad2135616e
## explicit
github.com/serenize/snaker
-# github.com/sirupsen/logrus v1.6.0
+# github.com/sirupsen/logrus v1.8.1
## explicit
github.com/sirupsen/logrus
github.com/sirupsen/logrus/hooks/test
@@ -222,16 +214,16 @@ github.com/spf13/cobra
# github.com/spf13/pflag v1.0.1
## explicit
github.com/spf13/pflag
-# github.com/stretchr/testify v1.5.1
+# github.com/stretchr/testify v1.7.0
## explicit
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
-# github.com/tidwall/gjson v1.6.1
+# github.com/tidwall/gjson v1.7.4
## explicit
github.com/tidwall/gjson
-# github.com/tidwall/match v1.0.1
+# github.com/tidwall/match v1.0.3
github.com/tidwall/match
-# github.com/tidwall/pretty v1.0.2
+# github.com/tidwall/pretty v1.1.0
## explicit
github.com/tidwall/pretty
# github.com/ugorji/go v1.1.7
@@ -402,3 +394,5 @@ gopkg.in/guregu/null.v3
# gopkg.in/yaml.v2 v2.3.0
## explicit
gopkg.in/yaml.v2
+# gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c
+gopkg.in/yaml.v3