From e6d917df78b04cfd1d914e321b5aa947053e13e8 Mon Sep 17 00:00:00 2001 From: Craig Peterson Date: Fri, 2 Oct 2015 13:37:44 -0600 Subject: [PATCH 1/2] Updating a lot of dependencies --- .../p/draw2d/draw2d/advanced_path.go | 42 -- .../code.google.com/p/draw2d/draw2d/arc.go | 67 --- .../code.google.com/p/draw2d/draw2d/curves.go | 336 ------------ .../p/draw2d/draw2d/demux_converter.go | 23 - .../code.google.com/p/draw2d/draw2d/doc.go | 5 - .../code.google.com/p/draw2d/draw2d/gc.go | 55 -- .../code.google.com/p/draw2d/draw2d/image.go | 359 ------------- .../code.google.com/p/draw2d/draw2d/math.go | 52 -- .../code.google.com/p/draw2d/draw2d/paint.go | 92 ---- .../code.google.com/p/draw2d/draw2d/path.go | 27 - .../p/draw2d/draw2d/path_adder.go | 70 --- .../p/draw2d/draw2d/path_converter.go | 173 ------ .../p/draw2d/draw2d/path_storage.go | 190 ------- .../p/draw2d/draw2d/stroker.go | 135 ----- .../p/draw2d/draw2d/transform.go | 306 ----------- .../p/draw2d/draw2d/vertex2d.go | 19 - .../p/graphics-go/graphics/Makefile | 15 - .../p/graphics-go/graphics/affine.go | 174 ------ .../p/graphics-go/graphics/blur.go | 68 --- .../p/graphics-go/graphics/blur_test.go | 207 -------- .../p/graphics-go/graphics/convolve/Makefile | 11 - .../graphics-go/graphics/convolve/convolve.go | 274 ---------- .../graphics/convolve/convolve_test.go | 78 --- .../p/graphics-go/graphics/interp/Makefile | 13 - .../p/graphics-go/graphics/interp/bilinear.go | 206 -------- .../graphics/interp/bilinear_test.go | 143 ----- .../p/graphics-go/graphics/interp/doc.go | 25 - .../p/graphics-go/graphics/interp/interp.go | 29 - .../p/graphics-go/graphics/rotate.go | 35 -- .../p/graphics-go/graphics/rotate_test.go | 169 ------ .../p/graphics-go/graphics/scale.go | 31 -- .../p/graphics-go/graphics/scale_test.go | 153 ------ .../p/graphics-go/graphics/shared_test.go | 69 --- .../p/graphics-go/graphics/thumbnail.go | 41 -- .../p/graphics-go/graphics/thumbnail_test.go | 53 -- .../github.com/PuerkitoBio/goquery/array.go | 2 +- .../github.com/PuerkitoBio/goquery/expand.go | 2 +- .../github.com/PuerkitoBio/goquery/filter.go | 2 +- .../PuerkitoBio/goquery/iteration_test.go | 2 +- .../PuerkitoBio/goquery/manipulation.go | 2 +- .../PuerkitoBio/goquery/property.go | 2 +- .../github.com/PuerkitoBio/goquery/query.go | 2 +- .../PuerkitoBio/goquery/traversal.go | 2 +- .../github.com/PuerkitoBio/goquery/type.go | 8 +- .../PuerkitoBio/goquery/type_test.go | 2 +- .../PuerkitoBio/goquery/utilities.go | 2 +- .../StackExchange/httpunit/httpunit.go | 26 +- .../andybalholm/cascadia/benchmark_test.go | 2 +- .../github.com/andybalholm/cascadia/parser.go | 2 +- .../andybalholm/cascadia/selector.go | 2 +- .../andybalholm/cascadia/selector_test.go | 2 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/ec2/api.go | 162 +++++- .../aws-sdk-go/service/ec2/examples_test.go | 22 + .../aws/aws-sdk-go/service/ec2/service.go | 2 +- .../aymerick/douceur/css/declaration.go | 10 +- .../github.com/aymerick/douceur/css/rule.go | 31 +- .../aymerick/douceur/css/stylesheet.go | 4 +- .../aymerick/douceur/inliner/element.go | 17 +- .../aymerick/douceur/inliner/inliner.go | 30 +- .../douceur/inliner/style_declaration.go | 4 +- .../aymerick/douceur/inliner/style_rule.go | 18 +- .../aymerick/douceur/parser/parser.go | 51 +- .../aymerick/douceur/parser/parser_test.go | 122 ++--- _third_party/github.com/boltdb/bolt/README.md | 4 + .../github.com/boltdb/bolt/bolt_unix.go | 2 +- .../boltdb/bolt/bolt_unix_solaris.go | 2 +- _third_party/github.com/boltdb/bolt/bucket.go | 3 +- .../github.com/boltdb/bolt/db_test.go | 10 + _third_party/github.com/boltdb/bolt/tx.go | 8 +- .../github.com/garyburd/redigo/redis/conn.go | 4 +- .../github.com/garyburd/redigo/redis/pool.go | 7 +- .../github.com/garyburd/redigo/redis/reply.go | 4 +- .../github.com/garyburd/redigo/redis/scan.go | 7 + .../garyburd/redigo/redis/scan_test.go | 1 + .../github.com/go-ole/go-ole/ChangeLog.md | 19 +- .../github.com/go-ole/go-ole/README.md | 3 +- .../github.com/go-ole/go-ole/appveyor.yml | 40 +- _third_party/github.com/go-ole/go-ole/com.go | 120 ++++- .../github.com/go-ole/go-ole/com_func.go | 35 +- .../github.com/go-ole/go-ole/com_func_test.go | 10 + .../github.com/go-ole/go-ole/error_func.go | 1 + .../github.com/go-ole/go-ole/error_windows.go | 1 + _third_party/github.com/go-ole/go-ole/guid.go | 9 +- .../go-ole/go-ole/idispatch_windows.go | 4 + .../github.com/go-ole/go-ole/iunknown.go | 14 + .../github.com/go-ole/go-ole/iunknown_func.go | 4 + .../go-ole/go-ole/iunknown_windows.go | 14 + _third_party/github.com/go-ole/go-ole/ole.go | 1 + .../go-ole/go-ole/safearray_func.go | 93 ++++ .../go-ole/go-ole/safearray_test.go | 4 +- .../go-ole/go-ole/safearray_windows.go | 105 +++- .../go-ole/go-ole/safearrayslices.go | 17 +- .../github.com/go-ole/go-ole/variant.go | 10 +- .../github.com/go-ole/go-ole/winrt_doc.go | 3 + .../gogo/protobuf/proto/all_test.go | 52 ++ .../github.com/gogo/protobuf/proto/clone.go | 11 + .../gogo/protobuf/proto/clone_test.go | 22 + .../github.com/gogo/protobuf/proto/decode.go | 40 +- .../github.com/gogo/protobuf/proto/encode.go | 57 +- .../github.com/gogo/protobuf/proto/equal.go | 11 + .../gogo/protobuf/proto/equal_test.go | 18 + .../github.com/gogo/protobuf/proto/lib.go | 72 ++- .../gogo/protobuf/proto/properties.go | 65 ++- .../gogo/protobuf/proto/size_test.go | 5 + .../github.com/gogo/protobuf/proto/text.go | 61 ++- .../gogo/protobuf/proto/text_parser.go | 176 ++++--- .../gogo/protobuf/proto/text_parser_test.go | 12 + .../gogo/protobuf/proto/text_test.go | 24 + .../github.com/golang/freetype/AUTHORS | 18 + .../github.com/golang/freetype/CONTRIBUTORS | 36 ++ .../github.com/golang/freetype/LICENSE | 12 + .../github.com/golang/freetype/README | 21 + .../golang}/freetype/freetype.go | 162 +++--- .../golang}/freetype/freetype_test.go | 8 +- .../golang}/freetype/raster/geom.go | 179 +++---- .../golang}/freetype/raster/paint.go | 75 ++- .../golang}/freetype/raster/raster.go | 214 ++++---- .../golang}/freetype/raster/stroke.go | 207 ++++---- .../golang/freetype/truetype/face.go | 495 ++++++++++++++++++ .../golang/freetype/truetype/face_test.go | 48 ++ .../golang}/freetype/truetype/glyph.go | 281 +++++----- .../golang}/freetype/truetype/hint.go | 183 ++++--- .../golang}/freetype/truetype/hint_test.go | 4 +- .../golang}/freetype/truetype/opcodes.go | 0 .../golang}/freetype/truetype/truetype.go | 243 ++++++--- .../freetype/truetype/truetype_test.go | 136 +++-- _third_party/github.com/gorilla/mux/README.md | 232 +++++++- _third_party/github.com/gorilla/mux/doc.go | 12 +- _third_party/github.com/gorilla/mux/mux.go | 4 + .../github.com/gorilla/mux/mux_test.go | 139 +++++ .../github.com/gorilla/mux/old_test.go | 6 +- _third_party/github.com/gorilla/mux/regexp.go | 40 +- _third_party/github.com/gorilla/mux/route.go | 14 +- .../go-msgpack/codec/ext_dep_test.go | 2 +- .../github.com/hashicorp/raft/config.go | 9 + .../github.com/hashicorp/raft/integ_test.go | 2 +- .../github.com/hashicorp/raft/raft.go | 23 + .../github.com/hashicorp/raft/raft_test.go | 212 ++++++-- .../github.com/jordan-wright/email/README.md | 11 +- .../jordan-wright/email/email_test.go | 90 +++- .../github.com/llgcode/draw2d/AUTHORS | 2 + .../github.com/llgcode/draw2d/LICENSE | 18 + .../github.com/llgcode/draw2d/README.md | 124 +++++ .../github.com/llgcode/draw2d/draw2d.go | 228 ++++++++ .../llgcode/draw2d/draw2dbase/README.md | 7 + .../llgcode/draw2d/draw2dbase/curve.go | 161 ++++++ .../llgcode/draw2d/draw2dbase/curve_test.go | 134 +++++ .../llgcode/draw2d/draw2dbase}/dasher.go | 61 ++- .../draw2d/draw2dbase/demux_flattener.go | 35 ++ .../llgcode/draw2d/draw2dbase/flattener.go | 127 +++++ .../llgcode/draw2d/draw2dbase/line.go | 58 ++ .../llgcode/draw2d/draw2dbase}/stack_gc.go | 115 ++-- .../llgcode/draw2d/draw2dbase/stroker.go | 90 ++++ .../llgcode/draw2d/draw2dimg/README.md | 8 + .../llgcode/draw2d/draw2dimg/fileutil.go | 46 ++ .../llgcode/draw2d/draw2dimg/ftgc.go | 328 ++++++++++++ .../llgcode/draw2d/draw2dimg/ftpath.go | 30 ++ .../draw2d/draw2dimg}/rgba_interpolation.go | 27 +- .../llgcode/draw2d/draw2dimg/text.go | 82 +++ .../llgcode}/draw2d/font.go | 23 +- _third_party/github.com/llgcode/draw2d/gc.go | 63 +++ .../github.com/llgcode/draw2d/matrix.go | 222 ++++++++ .../github.com/llgcode/draw2d/path.go | 189 +++++++ .../github.com/llgcode/draw2d/samples_test.go | 60 +++ _third_party/github.com/llgcode/draw2d/test | 8 + .../github.com/llgcode/draw2d/test_test.go | 30 ++ .../github.com/olivere/elastic/CONTRIBUTORS | 4 + .../github.com/olivere/elastic/README.md | 14 +- .../github.com/olivere/elastic/client.go | 34 +- .../github.com/olivere/elastic/client_test.go | 17 +- .../github.com/olivere/elastic/errors.go | 7 +- .../github.com/olivere/elastic/errors_test.go | 29 + .../github.com/olivere/elastic/exists.go | 153 +++++- .../github.com/olivere/elastic/index_test.go | 5 + .../github.com/olivere/elastic/reindexer.go | 16 +- .../olivere/elastic/reindexer_test.go | 4 + .../github.com/olivere/elastic/scan.go | 127 +++-- .../github.com/olivere/elastic/scan_test.go | 188 ++++++- .../github.com/olivere/elastic/search.go | 34 +- .../olivere/elastic/search_facets_test.go | 59 ++- .../olivere/elastic/search_filters_range.go | 12 + .../elastic/search_filters_range_test.go | 16 + .../elastic/search_queries_more_like_this.go | 287 ++++++++-- .../search_queries_more_like_this_test.go | 48 +- .../olivere/elastic/search_queries_range.go | 18 +- .../elastic/search_queries_range_test.go | 16 + .../olivere/elastic/search_source.go | 59 ++- .../elastic/suggester_completion_fuzzy.go | 16 +- .../suggester_completion_fuzzy_test.go | 4 +- .../siddontang/go/bson/bson_test.go | 4 +- .../tatsushid/go-fastping/fastping.go | 52 +- .../tatsushid/go-fastping/fastping_test.go | 134 ++++- .../github.com/ugorji/go/codec/binc.go | 5 + .../github.com/ugorji/go/codec/cbor.go | 15 +- .../github.com/ugorji/go/codec/gen.go | 67 ++- .../github.com/ugorji/go/codec/helper.go | 75 ++- .../github.com/ugorji/go/codec/json.go | 33 +- .../github.com/ugorji/go/codec/msgpack.go | 5 + .../github.com/ugorji/go/codec/simple.go | 9 +- .../github.com/ugorji/go/codec/tests.sh | 15 +- .../github.com/vdobler/chart/imgg/image.go | 150 +++--- .../github.com/vdobler/chart/style.go | 8 +- .../golang.org/x/crypto/bcrypt/bcrypt.go | 2 +- .../golang.org/x/net/html/example_test.go | 2 +- _third_party/golang.org/x/net/html/node.go | 2 +- _third_party/golang.org/x/net/html/parse.go | 2 +- .../golang.org/x/net/html/parse_test.go | 2 +- _third_party/golang.org/x/net/html/token.go | 2 +- .../golang.org/x/net/icmp/endpoint.go | 25 +- .../golang.org/x/net/icmp/example_test.go | 17 +- .../golang.org/x/net/icmp/extension_test.go | 2 +- .../golang.org/x/net/icmp/interface.go | 2 +- _third_party/golang.org/x/net/icmp/ipv4.go | 2 +- .../golang.org/x/net/icmp/ipv4_test.go | 2 +- _third_party/golang.org/x/net/icmp/ipv6.go | 2 +- .../golang.org/x/net/icmp/listen_posix.go | 10 +- _third_party/golang.org/x/net/icmp/message.go | 6 +- .../golang.org/x/net/icmp/message_test.go | 8 +- .../golang.org/x/net/icmp/multipart.go | 2 +- .../golang.org/x/net/icmp/multipart_test.go | 8 +- .../golang.org/x/net/icmp/paramprob.go | 2 +- .../golang.org/x/net/icmp/ping_test.go | 12 +- .../golang.org/x/net/ipv4/control_bsd.go | 2 +- .../golang.org/x/net/ipv4/control_pktinfo.go | 2 +- .../golang.org/x/net/ipv4/control_unix.go | 2 +- _third_party/golang.org/x/net/ipv4/doc.go | 4 +- .../golang.org/x/net/ipv4/example_test.go | 17 +- _third_party/golang.org/x/net/ipv4/icmp.go | 2 +- .../golang.org/x/net/ipv4/icmp_test.go | 4 +- .../golang.org/x/net/ipv4/multicast_test.go | 8 +- .../x/net/ipv4/multicastlistener_test.go | 4 +- .../x/net/ipv4/multicastsockopt_test.go | 4 +- .../golang.org/x/net/ipv4/readwrite_test.go | 4 +- .../x/net/ipv4/sockopt_asmreq_unix.go | 2 +- .../x/net/ipv4/sockopt_asmreq_windows.go | 2 +- .../x/net/ipv4/sockopt_asmreqn_unix.go | 2 +- .../x/net/ipv4/sockopt_ssmreq_unix.go | 2 +- .../golang.org/x/net/ipv4/sockopt_unix.go | 2 +- .../golang.org/x/net/ipv4/sockopt_windows.go | 2 +- .../golang.org/x/net/ipv4/unicast_test.go | 8 +- .../x/net/ipv4/unicastsockopt_test.go | 6 +- .../x/net/ipv6/control_rfc2292_unix.go | 2 +- .../x/net/ipv6/control_rfc3542_unix.go | 2 +- .../golang.org/x/net/ipv6/control_unix.go | 2 +- _third_party/golang.org/x/net/ipv6/doc.go | 4 +- .../golang.org/x/net/ipv6/example_test.go | 15 +- .../golang.org/x/net/ipv6/header_test.go | 4 +- _third_party/golang.org/x/net/ipv6/icmp.go | 2 +- .../golang.org/x/net/ipv6/icmp_test.go | 4 +- .../golang.org/x/net/ipv6/multicast_test.go | 8 +- .../x/net/ipv6/multicastlistener_test.go | 4 +- .../x/net/ipv6/multicastsockopt_test.go | 4 +- .../golang.org/x/net/ipv6/readwrite_test.go | 6 +- .../golang.org/x/net/ipv6/sockopt_test.go | 6 +- _third_party/golang.org/x/net/ipv6/sys_bsd.go | 2 +- .../golang.org/x/net/ipv6/sys_darwin.go | 2 +- .../golang.org/x/net/ipv6/sys_freebsd.go | 2 +- .../golang.org/x/net/ipv6/sys_linux.go | 2 +- .../golang.org/x/net/ipv6/sys_windows.go | 2 +- .../golang.org/x/net/ipv6/unicast_test.go | 8 +- .../x/net/ipv6/unicastsockopt_test.go | 6 +- .../golang.org/x/sys/unix/creds_test.go | 2 +- .../golang.org/x/sys/unix/mmap_unix_test.go | 2 +- .../golang.org/x/sys/unix/syscall_bsd_test.go | 2 +- .../golang.org/x/sys/unix/syscall_test.go | 2 +- .../x/sys/unix/syscall_unix_test.go | 2 +- .../x/sys/windows/registry/registry_test.go | 74 ++- .../x/sys/windows/registry/syscall.go | 5 + .../x/sys/windows/registry/value.go | 66 ++- .../sys/windows/registry/zsyscall_windows.go | 9 + .../x/sys/windows/svc/debug/service.go | 2 +- .../golang.org/x/sys/windows/svc/event.go | 2 +- .../x/sys/windows/svc/eventlog/install.go | 4 +- .../x/sys/windows/svc/eventlog/log.go | 2 +- .../x/sys/windows/svc/eventlog/log_test.go | 2 +- .../x/sys/windows/svc/mgr/config.go | 2 +- .../golang.org/x/sys/windows/svc/mgr/mgr.go | 2 +- .../x/sys/windows/svc/mgr/mgr_test.go | 2 +- .../x/sys/windows/svc/mgr/service.go | 4 +- .../golang.org/x/sys/windows/svc/security.go | 2 +- .../golang.org/x/sys/windows/svc/service.go | 2 +- .../golang.org/x/sys/windows/svc/svc_test.go | 4 +- .../golang.org/x/sys/windows/syscall_test.go | 2 +- .../x/sys/windows/syscall_windows_test.go | 2 +- _third_party/gopkg.in/yaml.v1/decode_test.go | 4 +- _third_party/gopkg.in/yaml.v1/encode_test.go | 4 +- _third_party/gopkg.in/yaml.v1/suite_test.go | 2 +- 288 files changed, 7492 insertions(+), 5594 deletions(-) delete mode 100644 _third_party/code.google.com/p/draw2d/draw2d/advanced_path.go delete mode 100644 _third_party/code.google.com/p/draw2d/draw2d/arc.go delete mode 100644 _third_party/code.google.com/p/draw2d/draw2d/curves.go delete mode 100644 _third_party/code.google.com/p/draw2d/draw2d/demux_converter.go delete mode 100644 _third_party/code.google.com/p/draw2d/draw2d/doc.go delete mode 100644 _third_party/code.google.com/p/draw2d/draw2d/gc.go delete mode 100644 _third_party/code.google.com/p/draw2d/draw2d/image.go delete mode 100644 _third_party/code.google.com/p/draw2d/draw2d/math.go delete mode 100644 _third_party/code.google.com/p/draw2d/draw2d/paint.go delete mode 100644 _third_party/code.google.com/p/draw2d/draw2d/path.go delete mode 100644 _third_party/code.google.com/p/draw2d/draw2d/path_adder.go delete mode 100644 _third_party/code.google.com/p/draw2d/draw2d/path_converter.go delete mode 100644 _third_party/code.google.com/p/draw2d/draw2d/path_storage.go delete mode 100644 _third_party/code.google.com/p/draw2d/draw2d/stroker.go delete mode 100644 _third_party/code.google.com/p/draw2d/draw2d/transform.go delete mode 100644 _third_party/code.google.com/p/draw2d/draw2d/vertex2d.go delete mode 100644 _third_party/code.google.com/p/graphics-go/graphics/Makefile delete mode 100644 _third_party/code.google.com/p/graphics-go/graphics/affine.go delete mode 100644 _third_party/code.google.com/p/graphics-go/graphics/blur.go delete mode 100644 _third_party/code.google.com/p/graphics-go/graphics/blur_test.go delete mode 100644 _third_party/code.google.com/p/graphics-go/graphics/convolve/Makefile delete mode 100644 _third_party/code.google.com/p/graphics-go/graphics/convolve/convolve.go delete mode 100644 _third_party/code.google.com/p/graphics-go/graphics/convolve/convolve_test.go delete mode 100644 _third_party/code.google.com/p/graphics-go/graphics/interp/Makefile delete mode 100644 _third_party/code.google.com/p/graphics-go/graphics/interp/bilinear.go delete mode 100644 _third_party/code.google.com/p/graphics-go/graphics/interp/bilinear_test.go delete mode 100644 _third_party/code.google.com/p/graphics-go/graphics/interp/doc.go delete mode 100644 _third_party/code.google.com/p/graphics-go/graphics/interp/interp.go delete mode 100644 _third_party/code.google.com/p/graphics-go/graphics/rotate.go delete mode 100644 _third_party/code.google.com/p/graphics-go/graphics/rotate_test.go delete mode 100644 _third_party/code.google.com/p/graphics-go/graphics/scale.go delete mode 100644 _third_party/code.google.com/p/graphics-go/graphics/scale_test.go delete mode 100644 _third_party/code.google.com/p/graphics-go/graphics/shared_test.go delete mode 100644 _third_party/code.google.com/p/graphics-go/graphics/thumbnail.go delete mode 100644 _third_party/code.google.com/p/graphics-go/graphics/thumbnail_test.go create mode 100644 _third_party/github.com/golang/freetype/AUTHORS create mode 100644 _third_party/github.com/golang/freetype/CONTRIBUTORS create mode 100644 _third_party/github.com/golang/freetype/LICENSE create mode 100644 _third_party/github.com/golang/freetype/README rename _third_party/{code.google.com/p/freetype-go => github.com/golang}/freetype/freetype.go (67%) rename _third_party/{code.google.com/p/freetype-go => github.com/golang}/freetype/freetype_test.go (88%) rename _third_party/{code.google.com/p/freetype-go => github.com/golang}/freetype/raster/geom.go (51%) rename _third_party/{code.google.com/p/freetype-go => github.com/golang}/freetype/raster/paint.go (80%) rename _third_party/{code.google.com/p/freetype-go => github.com/golang}/freetype/raster/raster.go (71%) rename _third_party/{code.google.com/p/freetype-go => github.com/golang}/freetype/raster/stroke.go (70%) create mode 100644 _third_party/github.com/golang/freetype/truetype/face.go create mode 100644 _third_party/github.com/golang/freetype/truetype/face_test.go rename _third_party/{code.google.com/p/freetype-go => github.com/golang}/freetype/truetype/glyph.go (63%) rename _third_party/{code.google.com/p/freetype-go => github.com/golang}/freetype/truetype/hint.go (89%) rename _third_party/{code.google.com/p/freetype-go => github.com/golang}/freetype/truetype/hint_test.go (99%) rename _third_party/{code.google.com/p/freetype-go => github.com/golang}/freetype/truetype/opcodes.go (100%) rename _third_party/{code.google.com/p/freetype-go => github.com/golang}/freetype/truetype/truetype.go (70%) rename _third_party/{code.google.com/p/freetype-go => github.com/golang}/freetype/truetype/truetype_test.go (72%) create mode 100644 _third_party/github.com/llgcode/draw2d/AUTHORS create mode 100644 _third_party/github.com/llgcode/draw2d/LICENSE create mode 100644 _third_party/github.com/llgcode/draw2d/README.md create mode 100644 _third_party/github.com/llgcode/draw2d/draw2d.go create mode 100644 _third_party/github.com/llgcode/draw2d/draw2dbase/README.md create mode 100644 _third_party/github.com/llgcode/draw2d/draw2dbase/curve.go create mode 100644 _third_party/github.com/llgcode/draw2d/draw2dbase/curve_test.go rename _third_party/{code.google.com/p/draw2d/draw2d => github.com/llgcode/draw2d/draw2dbase}/dasher.go (59%) create mode 100644 _third_party/github.com/llgcode/draw2d/draw2dbase/demux_flattener.go create mode 100644 _third_party/github.com/llgcode/draw2d/draw2dbase/flattener.go create mode 100644 _third_party/github.com/llgcode/draw2d/draw2dbase/line.go rename _third_party/{code.google.com/p/draw2d/draw2d => github.com/llgcode/draw2d/draw2dbase}/stack_gc.go (54%) create mode 100644 _third_party/github.com/llgcode/draw2d/draw2dbase/stroker.go create mode 100644 _third_party/github.com/llgcode/draw2d/draw2dimg/README.md create mode 100644 _third_party/github.com/llgcode/draw2d/draw2dimg/fileutil.go create mode 100644 _third_party/github.com/llgcode/draw2d/draw2dimg/ftgc.go create mode 100644 _third_party/github.com/llgcode/draw2d/draw2dimg/ftpath.go rename _third_party/{code.google.com/p/draw2d/draw2d => github.com/llgcode/draw2d/draw2dimg}/rgba_interpolation.go (86%) create mode 100644 _third_party/github.com/llgcode/draw2d/draw2dimg/text.go rename _third_party/{code.google.com/p/draw2d => github.com/llgcode}/draw2d/font.go (75%) create mode 100644 _third_party/github.com/llgcode/draw2d/gc.go create mode 100644 _third_party/github.com/llgcode/draw2d/matrix.go create mode 100644 _third_party/github.com/llgcode/draw2d/path.go create mode 100644 _third_party/github.com/llgcode/draw2d/samples_test.go create mode 100755 _third_party/github.com/llgcode/draw2d/test create mode 100644 _third_party/github.com/llgcode/draw2d/test_test.go diff --git a/_third_party/code.google.com/p/draw2d/draw2d/advanced_path.go b/_third_party/code.google.com/p/draw2d/draw2d/advanced_path.go deleted file mode 100644 index 68f1d782b7..0000000000 --- a/_third_party/code.google.com/p/draw2d/draw2d/advanced_path.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2010 The draw2d Authors. All rights reserved. -// created: 13/12/2010 by Laurent Le Goff - -package draw2d - -import ( - "math" -) - -//high level path creation - -func Rect(path Path, x1, y1, x2, y2 float64) { - path.MoveTo(x1, y1) - path.LineTo(x2, y1) - path.LineTo(x2, y2) - path.LineTo(x1, y2) - path.Close() -} - -func RoundRect(path Path, x1, y1, x2, y2, arcWidth, arcHeight float64) { - arcWidth = arcWidth / 2 - arcHeight = arcHeight / 2 - path.MoveTo(x1, y1+arcHeight) - path.QuadCurveTo(x1, y1, x1+arcWidth, y1) - path.LineTo(x2-arcWidth, y1) - path.QuadCurveTo(x2, y1, x2, y1+arcHeight) - path.LineTo(x2, y2-arcHeight) - path.QuadCurveTo(x2, y2, x2-arcWidth, y2) - path.LineTo(x1+arcWidth, y2) - path.QuadCurveTo(x1, y2, x1, y2-arcHeight) - path.Close() -} - -func Ellipse(path Path, cx, cy, rx, ry float64) { - path.ArcTo(cx, cy, rx, ry, 0, -math.Pi*2) - path.Close() -} - -func Circle(path Path, cx, cy, radius float64) { - path.ArcTo(cx, cy, radius, radius, 0, -math.Pi*2) - path.Close() -} diff --git a/_third_party/code.google.com/p/draw2d/draw2d/arc.go b/_third_party/code.google.com/p/draw2d/draw2d/arc.go deleted file mode 100644 index 2de3abfdda..0000000000 --- a/_third_party/code.google.com/p/draw2d/draw2d/arc.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2010 The draw2d Authors. All rights reserved. -// created: 21/11/2010 by Laurent Le Goff - -package draw2d - -import ( - "bosun.org/_third_party/code.google.com/p/freetype-go/freetype/raster" - "math" -) - -func arc(t VertexConverter, x, y, rx, ry, start, angle, scale float64) (lastX, lastY float64) { - end := start + angle - clockWise := true - if angle < 0 { - clockWise = false - } - ra := (math.Abs(rx) + math.Abs(ry)) / 2 - da := math.Acos(ra/(ra+0.125/scale)) * 2 - //normalize - if !clockWise { - da = -da - } - angle = start + da - var curX, curY float64 - for { - if (angle < end-da/4) != clockWise { - curX = x + math.Cos(end)*rx - curY = y + math.Sin(end)*ry - return curX, curY - } - curX = x + math.Cos(angle)*rx - curY = y + math.Sin(angle)*ry - - angle += da - t.Vertex(curX, curY) - } - return curX, curY -} - -func arcAdder(adder raster.Adder, x, y, rx, ry, start, angle, scale float64) raster.Point { - end := start + angle - clockWise := true - if angle < 0 { - clockWise = false - } - ra := (math.Abs(rx) + math.Abs(ry)) / 2 - da := math.Acos(ra/(ra+0.125/scale)) * 2 - //normalize - if !clockWise { - da = -da - } - angle = start + da - var curX, curY float64 - for { - if (angle < end-da/4) != clockWise { - curX = x + math.Cos(end)*rx - curY = y + math.Sin(end)*ry - return raster.Point{raster.Fix32(curX * 256), raster.Fix32(curY * 256)} - } - curX = x + math.Cos(angle)*rx - curY = y + math.Sin(angle)*ry - - angle += da - adder.Add1(raster.Point{raster.Fix32(curX * 256), raster.Fix32(curY * 256)}) - } - return raster.Point{raster.Fix32(curX * 256), raster.Fix32(curY * 256)} -} diff --git a/_third_party/code.google.com/p/draw2d/draw2d/curves.go b/_third_party/code.google.com/p/draw2d/draw2d/curves.go deleted file mode 100644 index 98c9a97507..0000000000 --- a/_third_party/code.google.com/p/draw2d/draw2d/curves.go +++ /dev/null @@ -1,336 +0,0 @@ -// Copyright 2010 The draw2d Authors. All rights reserved. -// created: 21/11/2010 by Laurent Le Goff - -package draw2d - -import ( - "math" -) - -var ( - CurveRecursionLimit = 32 - CurveCollinearityEpsilon = 1e-30 - CurveAngleToleranceEpsilon = 0.01 -) - -/* - The function has the following parameters: - approximationScale : - Eventually determines the approximation accuracy. In practice we need to transform points from the World coordinate system to the Screen one. - It always has some scaling coefficient. - The curves are usually processed in the World coordinates, while the approximation accuracy should be eventually in pixels. - Usually it looks as follows: - curved.approximationScale(transform.scale()); - where transform is the affine matrix that includes all the transformations, including viewport and zoom. - angleTolerance : - You set it in radians. - The less this value is the more accurate will be the approximation at sharp turns. - But 0 means that we don't consider angle conditions at all. - cuspLimit : - An angle in radians. - If 0, only the real cusps will have bevel cuts. - If more than 0, it will restrict the sharpness. - The more this value is the less sharp turns will be cut. - Typically it should not exceed 10-15 degrees. -*/ -func cubicBezier(v VertexConverter, x1, y1, x2, y2, x3, y3, x4, y4, approximationScale, angleTolerance, cuspLimit float64) { - cuspLimit = computeCuspLimit(cuspLimit) - distanceToleranceSquare := 0.5 / approximationScale - distanceToleranceSquare = distanceToleranceSquare * distanceToleranceSquare - recursiveCubicBezier(v, x1, y1, x2, y2, x3, y3, x4, y4, 0, distanceToleranceSquare, angleTolerance, cuspLimit) -} - -/* - * see cubicBezier comments for approximationScale and angleTolerance definition - */ -func quadraticBezier(v VertexConverter, x1, y1, x2, y2, x3, y3, approximationScale, angleTolerance float64) { - distanceToleranceSquare := 0.5 / approximationScale - distanceToleranceSquare = distanceToleranceSquare * distanceToleranceSquare - - recursiveQuadraticBezierBezier(v, x1, y1, x2, y2, x3, y3, 0, distanceToleranceSquare, angleTolerance) -} - -func computeCuspLimit(v float64) (r float64) { - if v == 0.0 { - r = 0.0 - } else { - r = math.Pi - v - } - return -} - -/** - * http://www.antigrain.com/research/adaptive_bezier/index.html - */ -func recursiveQuadraticBezierBezier(v VertexConverter, x1, y1, x2, y2, x3, y3 float64, level int, distanceToleranceSquare, angleTolerance float64) { - if level > CurveRecursionLimit { - return - } - - // Calculate all the mid-points of the line segments - //---------------------- - x12 := (x1 + x2) / 2 - y12 := (y1 + y2) / 2 - x23 := (x2 + x3) / 2 - y23 := (y2 + y3) / 2 - x123 := (x12 + x23) / 2 - y123 := (y12 + y23) / 2 - - dx := x3 - x1 - dy := y3 - y1 - d := math.Abs(((x2-x3)*dy - (y2-y3)*dx)) - - if d > CurveCollinearityEpsilon { - // Regular case - //----------------- - if d*d <= distanceToleranceSquare*(dx*dx+dy*dy) { - // If the curvature doesn't exceed the distanceTolerance value - // we tend to finish subdivisions. - //---------------------- - if angleTolerance < CurveAngleToleranceEpsilon { - v.Vertex(x123, y123) - return - } - - // Angle & Cusp Condition - //---------------------- - da := math.Abs(math.Atan2(y3-y2, x3-x2) - math.Atan2(y2-y1, x2-x1)) - if da >= math.Pi { - da = 2*math.Pi - da - } - - if da < angleTolerance { - // Finally we can stop the recursion - //---------------------- - v.Vertex(x123, y123) - return - } - } - } else { - // Collinear case - //------------------ - da := dx*dx + dy*dy - if da == 0 { - d = squareDistance(x1, y1, x2, y2) - } else { - d = ((x2-x1)*dx + (y2-y1)*dy) / da - if d > 0 && d < 1 { - // Simple collinear case, 1---2---3 - // We can leave just two endpoints - return - } - if d <= 0 { - d = squareDistance(x2, y2, x1, y1) - } else if d >= 1 { - d = squareDistance(x2, y2, x3, y3) - } else { - d = squareDistance(x2, y2, x1+d*dx, y1+d*dy) - } - } - if d < distanceToleranceSquare { - v.Vertex(x2, y2) - return - } - } - - // Continue subdivision - //---------------------- - recursiveQuadraticBezierBezier(v, x1, y1, x12, y12, x123, y123, level+1, distanceToleranceSquare, angleTolerance) - recursiveQuadraticBezierBezier(v, x123, y123, x23, y23, x3, y3, level+1, distanceToleranceSquare, angleTolerance) -} - -/** - * http://www.antigrain.com/research/adaptive_bezier/index.html - */ -func recursiveCubicBezier(v VertexConverter, x1, y1, x2, y2, x3, y3, x4, y4 float64, level int, distanceToleranceSquare, angleTolerance, cuspLimit float64) { - if level > CurveRecursionLimit { - return - } - - // Calculate all the mid-points of the line segments - //---------------------- - x12 := (x1 + x2) / 2 - y12 := (y1 + y2) / 2 - x23 := (x2 + x3) / 2 - y23 := (y2 + y3) / 2 - x34 := (x3 + x4) / 2 - y34 := (y3 + y4) / 2 - x123 := (x12 + x23) / 2 - y123 := (y12 + y23) / 2 - x234 := (x23 + x34) / 2 - y234 := (y23 + y34) / 2 - x1234 := (x123 + x234) / 2 - y1234 := (y123 + y234) / 2 - - // Try to approximate the full cubic curve by a single straight line - //------------------ - dx := x4 - x1 - dy := y4 - y1 - - d2 := math.Abs(((x2-x4)*dy - (y2-y4)*dx)) - d3 := math.Abs(((x3-x4)*dy - (y3-y4)*dx)) - - switch { - case d2 <= CurveCollinearityEpsilon && d3 <= CurveCollinearityEpsilon: - // All collinear OR p1==p4 - //---------------------- - k := dx*dx + dy*dy - if k == 0 { - d2 = squareDistance(x1, y1, x2, y2) - d3 = squareDistance(x4, y4, x3, y3) - } else { - k = 1 / k - da1 := x2 - x1 - da2 := y2 - y1 - d2 = k * (da1*dx + da2*dy) - da1 = x3 - x1 - da2 = y3 - y1 - d3 = k * (da1*dx + da2*dy) - if d2 > 0 && d2 < 1 && d3 > 0 && d3 < 1 { - // Simple collinear case, 1---2---3---4 - // We can leave just two endpoints - return - } - if d2 <= 0 { - d2 = squareDistance(x2, y2, x1, y1) - } else if d2 >= 1 { - d2 = squareDistance(x2, y2, x4, y4) - } else { - d2 = squareDistance(x2, y2, x1+d2*dx, y1+d2*dy) - } - - if d3 <= 0 { - d3 = squareDistance(x3, y3, x1, y1) - } else if d3 >= 1 { - d3 = squareDistance(x3, y3, x4, y4) - } else { - d3 = squareDistance(x3, y3, x1+d3*dx, y1+d3*dy) - } - } - if d2 > d3 { - if d2 < distanceToleranceSquare { - v.Vertex(x2, y2) - return - } - } else { - if d3 < distanceToleranceSquare { - v.Vertex(x3, y3) - return - } - } - break - - case d2 <= CurveCollinearityEpsilon && d3 > CurveCollinearityEpsilon: - // p1,p2,p4 are collinear, p3 is significant - //---------------------- - if d3*d3 <= distanceToleranceSquare*(dx*dx+dy*dy) { - if angleTolerance < CurveAngleToleranceEpsilon { - v.Vertex(x23, y23) - return - } - - // Angle Condition - //---------------------- - da1 := math.Abs(math.Atan2(y4-y3, x4-x3) - math.Atan2(y3-y2, x3-x2)) - if da1 >= math.Pi { - da1 = 2*math.Pi - da1 - } - - if da1 < angleTolerance { - v.Vertex(x2, y2) - v.Vertex(x3, y3) - return - } - - if cuspLimit != 0.0 { - if da1 > cuspLimit { - v.Vertex(x3, y3) - return - } - } - } - break - - case d2 > CurveCollinearityEpsilon && d3 <= CurveCollinearityEpsilon: - // p1,p3,p4 are collinear, p2 is significant - //---------------------- - if d2*d2 <= distanceToleranceSquare*(dx*dx+dy*dy) { - if angleTolerance < CurveAngleToleranceEpsilon { - v.Vertex(x23, y23) - return - } - - // Angle Condition - //---------------------- - da1 := math.Abs(math.Atan2(y3-y2, x3-x2) - math.Atan2(y2-y1, x2-x1)) - if da1 >= math.Pi { - da1 = 2*math.Pi - da1 - } - - if da1 < angleTolerance { - v.Vertex(x2, y2) - v.Vertex(x3, y3) - return - } - - if cuspLimit != 0.0 { - if da1 > cuspLimit { - v.Vertex(x2, y2) - return - } - } - } - break - - case d2 > CurveCollinearityEpsilon && d3 > CurveCollinearityEpsilon: - // Regular case - //----------------- - if (d2+d3)*(d2+d3) <= distanceToleranceSquare*(dx*dx+dy*dy) { - // If the curvature doesn't exceed the distanceTolerance value - // we tend to finish subdivisions. - //---------------------- - if angleTolerance < CurveAngleToleranceEpsilon { - v.Vertex(x23, y23) - return - } - - // Angle & Cusp Condition - //---------------------- - k := math.Atan2(y3-y2, x3-x2) - da1 := math.Abs(k - math.Atan2(y2-y1, x2-x1)) - da2 := math.Abs(math.Atan2(y4-y3, x4-x3) - k) - if da1 >= math.Pi { - da1 = 2*math.Pi - da1 - } - if da2 >= math.Pi { - da2 = 2*math.Pi - da2 - } - - if da1+da2 < angleTolerance { - // Finally we can stop the recursion - //---------------------- - v.Vertex(x23, y23) - return - } - - if cuspLimit != 0.0 { - if da1 > cuspLimit { - v.Vertex(x2, y2) - return - } - - if da2 > cuspLimit { - v.Vertex(x3, y3) - return - } - } - } - break - } - - // Continue subdivision - //---------------------- - recursiveCubicBezier(v, x1, y1, x12, y12, x123, y123, x1234, y1234, level+1, distanceToleranceSquare, angleTolerance, cuspLimit) - recursiveCubicBezier(v, x1234, y1234, x234, y234, x34, y34, x4, y4, level+1, distanceToleranceSquare, angleTolerance, cuspLimit) - -} diff --git a/_third_party/code.google.com/p/draw2d/draw2d/demux_converter.go b/_third_party/code.google.com/p/draw2d/draw2d/demux_converter.go deleted file mode 100644 index b5c871d2c6..0000000000 --- a/_third_party/code.google.com/p/draw2d/draw2d/demux_converter.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2010 The draw2d Authors. All rights reserved. -// created: 13/12/2010 by Laurent Le Goff - -package draw2d - -type DemuxConverter struct { - converters []VertexConverter -} - -func NewDemuxConverter(converters ...VertexConverter) *DemuxConverter { - return &DemuxConverter{converters} -} - -func (dc *DemuxConverter) NextCommand(cmd VertexCommand) { - for _, converter := range dc.converters { - converter.NextCommand(cmd) - } -} -func (dc *DemuxConverter) Vertex(x, y float64) { - for _, converter := range dc.converters { - converter.Vertex(x, y) - } -} diff --git a/_third_party/code.google.com/p/draw2d/draw2d/doc.go b/_third_party/code.google.com/p/draw2d/draw2d/doc.go deleted file mode 100644 index 3baeffb4d3..0000000000 --- a/_third_party/code.google.com/p/draw2d/draw2d/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Copyright 2010 The draw2d Authors. All rights reserved. -// created: 13/12/2010 by Laurent Le Goff - -// The package draw2d provide a Graphic Context that can draw vectorial figure on surface. -package draw2d diff --git a/_third_party/code.google.com/p/draw2d/draw2d/gc.go b/_third_party/code.google.com/p/draw2d/draw2d/gc.go deleted file mode 100644 index 66dc5088fc..0000000000 --- a/_third_party/code.google.com/p/draw2d/draw2d/gc.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2010 The draw2d Authors. All rights reserved. -// created: 21/11/2010 by Laurent Le Goff - -package draw2d - -import ( - "image" - "image/color" -) - -type FillRule int - -const ( - FillRuleEvenOdd FillRule = iota - FillRuleWinding -) - -type GraphicContext interface { - Path - // Create a new path - BeginPath() - GetMatrixTransform() MatrixTransform - SetMatrixTransform(tr MatrixTransform) - ComposeMatrixTransform(tr MatrixTransform) - Rotate(angle float64) - Translate(tx, ty float64) - Scale(sx, sy float64) - SetStrokeColor(c color.Color) - SetFillColor(c color.Color) - SetFillRule(f FillRule) - SetLineWidth(lineWidth float64) - SetLineCap(cap Cap) - SetLineJoin(join Join) - SetLineDash(dash []float64, dashOffset float64) - SetFontSize(fontSize float64) - GetFontSize() float64 - SetFontData(fontData FontData) - GetFontData() FontData - DrawImage(image image.Image) - Save() - Restore() - Clear() - ClearRect(x1, y1, x2, y2 int) - SetDPI(dpi int) - GetDPI() int - GetStringBounds(s string) (left, top, right, bottom float64) - CreateStringPath(text string, x, y float64) (cursor float64) - FillString(text string) (cursor float64) - FillStringAt(text string, x, y float64) (cursor float64) - StrokeString(text string) (cursor float64) - StrokeStringAt(text string, x, y float64) (cursor float64) - Stroke(paths ...*PathStorage) - Fill(paths ...*PathStorage) - FillStroke(paths ...*PathStorage) -} diff --git a/_third_party/code.google.com/p/draw2d/draw2d/image.go b/_third_party/code.google.com/p/draw2d/draw2d/image.go deleted file mode 100644 index 741e76767c..0000000000 --- a/_third_party/code.google.com/p/draw2d/draw2d/image.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright 2010 The draw2d Authors. All rights reserved. -// created: 21/11/2010 by Laurent Le Goff - -package draw2d - -import ( - "bosun.org/_third_party/code.google.com/p/freetype-go/freetype/raster" - "bosun.org/_third_party/code.google.com/p/freetype-go/freetype/truetype" - "errors" - "image" - "image/color" - "image/draw" - "log" - "math" -) - -type Painter interface { - raster.Painter - SetColor(color color.Color) -} - -var ( - defaultFontData = FontData{"luxi", FontFamilySans, FontStyleNormal} -) - -type ImageGraphicContext struct { - *StackGraphicContext - img draw.Image - painter Painter - fillRasterizer *raster.Rasterizer - strokeRasterizer *raster.Rasterizer - glyphBuf *truetype.GlyphBuf - DPI int -} - -/** - * Create a new Graphic context from an image - */ -func NewGraphicContext(img draw.Image) *ImageGraphicContext { - var painter Painter - switch selectImage := img.(type) { - case *image.RGBA: - painter = raster.NewRGBAPainter(selectImage) - default: - panic("Image type not supported") - } - return NewGraphicContextWithPainter(img, painter) -} - -// Create a new Graphic context from an image and a Painter (see Freetype-go) -func NewGraphicContextWithPainter(img draw.Image, painter Painter) *ImageGraphicContext { - width, height := img.Bounds().Dx(), img.Bounds().Dy() - dpi := 92 - gc := &ImageGraphicContext{ - NewStackGraphicContext(), - img, - painter, - raster.NewRasterizer(width, height), - raster.NewRasterizer(width, height), - truetype.NewGlyphBuf(), - dpi, - } - return gc -} - -func (gc *ImageGraphicContext) GetDPI() int { - return gc.DPI -} - -func (gc *ImageGraphicContext) Clear() { - width, height := gc.img.Bounds().Dx(), gc.img.Bounds().Dy() - gc.ClearRect(0, 0, width, height) -} - -func (gc *ImageGraphicContext) ClearRect(x1, y1, x2, y2 int) { - imageColor := image.NewUniform(gc.Current.FillColor) - draw.Draw(gc.img, image.Rect(x1, y1, x2, y2), imageColor, image.ZP, draw.Over) -} - -func (gc *ImageGraphicContext) DrawImage(img image.Image) { - DrawImage(img, gc.img, gc.Current.Tr, draw.Over, BilinearFilter) -} - -func (gc *ImageGraphicContext) FillString(text string) (cursor float64) { - return gc.FillStringAt(text, 0, 0) -} - -func (gc *ImageGraphicContext) FillStringAt(text string, x, y float64) (cursor float64) { - width := gc.CreateStringPath(text, x, y) - gc.Fill() - return width -} - -func (gc *ImageGraphicContext) StrokeString(text string) (cursor float64) { - return gc.StrokeStringAt(text, 0, 0) -} - -func (gc *ImageGraphicContext) StrokeStringAt(text string, x, y float64) (cursor float64) { - width := gc.CreateStringPath(text, x, y) - gc.Stroke() - return width -} - -func (gc *ImageGraphicContext) loadCurrentFont() (*truetype.Font, error) { - font := GetFont(gc.Current.FontData) - if font == nil { - font = GetFont(defaultFontData) - } - if font == nil { - return nil, errors.New("No font set, and no default font available.") - } - gc.SetFont(font) - gc.SetFontSize(gc.Current.FontSize) - return font, nil -} - -func fUnitsToFloat64(x int32) float64 { - scaled := x << 2 - return float64(scaled/256) + float64(scaled%256)/256.0 -} - -// p is a truetype.Point measured in FUnits and positive Y going upwards. -// The returned value is the same thing measured in floating point and positive Y -// going downwards. -func pointToF64Point(p truetype.Point) (x, y float64) { - return fUnitsToFloat64(p.X), -fUnitsToFloat64(p.Y) -} - -// drawContour draws the given closed contour at the given sub-pixel offset. -func (gc *ImageGraphicContext) drawContour(ps []truetype.Point, dx, dy float64) { - if len(ps) == 0 { - return - } - startX, startY := pointToF64Point(ps[0]) - gc.MoveTo(startX+dx, startY+dy) - q0X, q0Y, on0 := startX, startY, true - for _, p := range ps[1:] { - qX, qY := pointToF64Point(p) - on := p.Flags&0x01 != 0 - if on { - if on0 { - gc.LineTo(qX+dx, qY+dy) - } else { - gc.QuadCurveTo(q0X+dx, q0Y+dy, qX+dx, qY+dy) - } - } else { - if on0 { - // No-op. - } else { - midX := (q0X + qX) / 2 - midY := (q0Y + qY) / 2 - gc.QuadCurveTo(q0X+dx, q0Y+dy, midX+dx, midY+dy) - } - } - q0X, q0Y, on0 = qX, qY, on - } - // Close the curve. - if on0 { - gc.LineTo(startX+dx, startY+dy) - } else { - gc.QuadCurveTo(q0X+dx, q0Y+dy, startX+dx, startY+dy) - } -} - -func (gc *ImageGraphicContext) drawGlyph(glyph truetype.Index, dx, dy float64) error { - if err := gc.glyphBuf.Load(gc.Current.font, gc.Current.scale, glyph, truetype.NoHinting); err != nil { - return err - } - e0 := 0 - for _, e1 := range gc.glyphBuf.End { - gc.drawContour(gc.glyphBuf.Point[e0:e1], dx, dy) - e0 = e1 - } - return nil -} - -// CreateStringPath creates a path from the string s at x, y, and returns the string width. -// The text is placed so that the left edge of the em square of the first character of s -// and the baseline intersect at x, y. The majority of the affected pixels will be -// above and to the right of the point, but some may be below or to the left. -// For example, drawing a string that starts with a 'J' in an italic font may -// affect pixels below and left of the point. -func (gc *ImageGraphicContext) CreateStringPath(s string, x, y float64) float64 { - font, err := gc.loadCurrentFont() - if err != nil { - log.Println(err) - return 0.0 - } - startx := x - prev, hasPrev := truetype.Index(0), false - for _, rune := range s { - index := font.Index(rune) - if hasPrev { - x += fUnitsToFloat64(font.Kerning(gc.Current.scale, prev, index)) - } - err := gc.drawGlyph(index, x, y) - if err != nil { - log.Println(err) - return startx - x - } - x += fUnitsToFloat64(font.HMetric(gc.Current.scale, index).AdvanceWidth) - prev, hasPrev = index, true - } - return x - startx -} - -// GetStringBounds returns the approximate pixel bounds of the string s at x, y. -// The the left edge of the em square of the first character of s -// and the baseline intersect at 0, 0 in the returned coordinates. -// Therefore the top and left coordinates may well be negative. -func (gc *ImageGraphicContext) GetStringBounds(s string) (left, top, right, bottom float64) { - font, err := gc.loadCurrentFont() - if err != nil { - log.Println(err) - return 0, 0, 0, 0 - } - top, left, bottom, right = 10e6, 10e6, -10e6, -10e6 - cursor := 0.0 - prev, hasPrev := truetype.Index(0), false - for _, rune := range s { - index := font.Index(rune) - if hasPrev { - cursor += fUnitsToFloat64(font.Kerning(gc.Current.scale, prev, index)) - } - if err := gc.glyphBuf.Load(gc.Current.font, gc.Current.scale, index, truetype.NoHinting); err != nil { - log.Println(err) - return 0, 0, 0, 0 - } - e0 := 0 - for _, e1 := range gc.glyphBuf.End { - ps := gc.glyphBuf.Point[e0:e1] - for _, p := range ps { - x, y := pointToF64Point(p) - top = math.Min(top, y) - bottom = math.Max(bottom, y) - left = math.Min(left, x+cursor) - right = math.Max(right, x+cursor) - } - } - cursor += fUnitsToFloat64(font.HMetric(gc.Current.scale, index).AdvanceWidth) - prev, hasPrev = index, true - } - return left, top, right, bottom -} - -// recalc recalculates scale and bounds values from the font size, screen -// resolution and font metrics, and invalidates the glyph cache. -func (gc *ImageGraphicContext) recalc() { - gc.Current.scale = int32(gc.Current.FontSize * float64(gc.DPI) * (64.0 / 72.0)) -} - -// SetDPI sets the screen resolution in dots per inch. -func (gc *ImageGraphicContext) SetDPI(dpi int) { - gc.DPI = dpi - gc.recalc() -} - -// SetFont sets the font used to draw text. -func (gc *ImageGraphicContext) SetFont(font *truetype.Font) { - gc.Current.font = font -} - -// SetFontSize sets the font size in points (as in ``a 12 point font''). -func (gc *ImageGraphicContext) SetFontSize(fontSize float64) { - gc.Current.FontSize = fontSize - gc.recalc() -} - -func (gc *ImageGraphicContext) paint(rasterizer *raster.Rasterizer, color color.Color) { - gc.painter.SetColor(color) - rasterizer.Rasterize(gc.painter) - rasterizer.Clear() - gc.Current.Path.Clear() -} - -/**** second method ****/ -func (gc *ImageGraphicContext) Stroke(paths ...*PathStorage) { - paths = append(paths, gc.Current.Path) - gc.strokeRasterizer.UseNonZeroWinding = true - - stroker := NewLineStroker(gc.Current.Cap, gc.Current.Join, NewVertexMatrixTransform(gc.Current.Tr, NewVertexAdder(gc.strokeRasterizer))) - stroker.HalfLineWidth = gc.Current.LineWidth / 2 - var pathConverter *PathConverter - if gc.Current.Dash != nil && len(gc.Current.Dash) > 0 { - dasher := NewDashConverter(gc.Current.Dash, gc.Current.DashOffset, stroker) - pathConverter = NewPathConverter(dasher) - } else { - pathConverter = NewPathConverter(stroker) - } - pathConverter.ApproximationScale = gc.Current.Tr.GetScale() - pathConverter.Convert(paths...) - - gc.paint(gc.strokeRasterizer, gc.Current.StrokeColor) -} - -/**** second method ****/ -func (gc *ImageGraphicContext) Fill(paths ...*PathStorage) { - paths = append(paths, gc.Current.Path) - gc.fillRasterizer.UseNonZeroWinding = gc.Current.FillRule.UseNonZeroWinding() - - /**** first method ****/ - pathConverter := NewPathConverter(NewVertexMatrixTransform(gc.Current.Tr, NewVertexAdder(gc.fillRasterizer))) - pathConverter.ApproximationScale = gc.Current.Tr.GetScale() - pathConverter.Convert(paths...) - - gc.paint(gc.fillRasterizer, gc.Current.FillColor) -} - -/* second method */ -func (gc *ImageGraphicContext) FillStroke(paths ...*PathStorage) { - gc.fillRasterizer.UseNonZeroWinding = gc.Current.FillRule.UseNonZeroWinding() - gc.strokeRasterizer.UseNonZeroWinding = true - - filler := NewVertexMatrixTransform(gc.Current.Tr, NewVertexAdder(gc.fillRasterizer)) - - stroker := NewLineStroker(gc.Current.Cap, gc.Current.Join, NewVertexMatrixTransform(gc.Current.Tr, NewVertexAdder(gc.strokeRasterizer))) - stroker.HalfLineWidth = gc.Current.LineWidth / 2 - - demux := NewDemuxConverter(filler, stroker) - paths = append(paths, gc.Current.Path) - pathConverter := NewPathConverter(demux) - pathConverter.ApproximationScale = gc.Current.Tr.GetScale() - pathConverter.Convert(paths...) - - gc.paint(gc.fillRasterizer, gc.Current.FillColor) - gc.paint(gc.strokeRasterizer, gc.Current.StrokeColor) -} - -func (f FillRule) UseNonZeroWinding() bool { - switch f { - case FillRuleEvenOdd: - return false - case FillRuleWinding: - return true - } - return false -} - -func (c Cap) Convert() raster.Capper { - switch c { - case RoundCap: - return raster.RoundCapper - case ButtCap: - return raster.ButtCapper - case SquareCap: - return raster.SquareCapper - } - return raster.RoundCapper -} - -func (j Join) Convert() raster.Joiner { - switch j { - case RoundJoin: - return raster.RoundJoiner - case BevelJoin: - return raster.BevelJoiner - } - return raster.RoundJoiner -} diff --git a/_third_party/code.google.com/p/draw2d/draw2d/math.go b/_third_party/code.google.com/p/draw2d/draw2d/math.go deleted file mode 100644 index c4bb761df0..0000000000 --- a/_third_party/code.google.com/p/draw2d/draw2d/math.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2010 The draw2d Authors. All rights reserved. -// created: 21/11/2010 by Laurent Le Goff - -package draw2d - -import ( - "math" -) - -func distance(x1, y1, x2, y2 float64) float64 { - dx := x2 - x1 - dy := y2 - y1 - return float64(math.Sqrt(dx*dx + dy*dy)) -} - -func vectorDistance(dx, dy float64) float64 { - return float64(math.Sqrt(dx*dx + dy*dy)) -} - -func squareDistance(x1, y1, x2, y2 float64) float64 { - dx := x2 - x1 - dy := y2 - y1 - return dx*dx + dy*dy -} - -func min(x, y float64) float64 { - if x < y { - return x - } - return y -} - -func max(x, y float64) float64 { - if x > y { - return x - } - return y -} - -func minMax(x, y float64) (min, max float64) { - if x > y { - return y, x - } - return x, y -} - -func minUint32(a, b uint32) uint32 { - if a < b { - return a - } - return b -} diff --git a/_third_party/code.google.com/p/draw2d/draw2d/paint.go b/_third_party/code.google.com/p/draw2d/draw2d/paint.go deleted file mode 100644 index 885d993aed..0000000000 --- a/_third_party/code.google.com/p/draw2d/draw2d/paint.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2010 The draw2d Authors. All rights reserved. -// created: 21/11/2010 by Laurent Le Goff - -package draw2d - -/* -import ( - "image/draw" - "image" - "freetype-go.googlecode.com/hg/freetype/raster" -)*/ - -const M = 1<<16 - 1 - -/* -type NRGBAPainter struct { - // The image to compose onto. - Image *image.NRGBA - // The Porter-Duff composition operator. - Op draw.Op - // The 16-bit color to paint the spans. - cr, cg, cb, ca uint32 -} - -// Paint satisfies the Painter interface by painting ss onto an image.RGBA. -func (r *NRGBAPainter) Paint(ss []raster.Span, done bool) { - b := r.Image.Bounds() - for _, s := range ss { - if s.Y < b.Min.Y { - continue - } - if s.Y >= b.Max.Y { - return - } - if s.X0 < b.Min.X { - s.X0 = b.Min.X - } - if s.X1 > b.Max.X { - s.X1 = b.Max.X - } - if s.X0 >= s.X1 { - continue - } - base := s.Y * r.Image.Stride - p := r.Image.Pix[base+s.X0 : base+s.X1] - // This code is duplicated from drawGlyphOver in $GOROOT/src/pkg/image/draw/draw.go. - // TODO(nigeltao): Factor out common code into a utility function, once the compiler - // can inline such function calls. - ma := s.A >> 16 - if r.Op == draw.Over { - for i, nrgba := range p { - dr, dg, db, da := nrgba. - a := M - (r.ca*ma)/M - da = (da*a + r.ca*ma) / M - if da != 0 { - dr = minUint32(M, (dr*a+r.cr*ma)/da) - dg = minUint32(M, (dg*a+r.cg*ma)/da) - db = minUint32(M, (db*a+r.cb*ma)/da) - } else { - dr, dg, db = 0, 0, 0 - } - p[i] = image.NRGBAColor{uint8(dr >> 8), uint8(dg >> 8), uint8(db >> 8), uint8(da >> 8)} - } - } else { - for i, nrgba := range p { - dr, dg, db, da := nrgba.RGBA() - a := M - ma - da = (da*a + r.ca*ma) / M - if da != 0 { - dr = minUint32(M, (dr*a+r.cr*ma)/da) - dg = minUint32(M, (dg*a+r.cg*ma)/da) - db = minUint32(M, (db*a+r.cb*ma)/da) - } else { - dr, dg, db = 0, 0, 0 - } - p[i] = image.NRGBAColor{uint8(dr >> 8), uint8(dg >> 8), uint8(db >> 8), uint8(da >> 8)} - } - } - } - -} - -// SetColor sets the color to paint the spans. -func (r *NRGBAPainter) SetColor(c image.Color) { - r.cr, r.cg, r.cb, r.ca = c.RGBA() -} - -// NewRGBAPainter creates a new RGBAPainter for the given image. -func NewNRGBAPainter(m *image.NRGBA) *NRGBAPainter { - return &NRGBAPainter{Image: m} -} -*/ diff --git a/_third_party/code.google.com/p/draw2d/draw2d/path.go b/_third_party/code.google.com/p/draw2d/draw2d/path.go deleted file mode 100644 index 7167a7c45d..0000000000 --- a/_third_party/code.google.com/p/draw2d/draw2d/path.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2010 The draw2d Authors. All rights reserved. -// created: 21/11/2010 by Laurent Le Goff - -package draw2d - -type Path interface { - // Return the current point of the path - LastPoint() (x, y float64) - // Create a new subpath that start at the specified point - MoveTo(x, y float64) - // Create a new subpath that start at the specified point - // relative to the current point - RMoveTo(dx, dy float64) - // Add a line to the current subpath - LineTo(x, y float64) - // Add a line to the current subpath - // relative to the current point - RLineTo(dx, dy float64) - - QuadCurveTo(cx, cy, x, y float64) - RQuadCurveTo(dcx, dcy, dx, dy float64) - CubicCurveTo(cx1, cy1, cx2, cy2, x, y float64) - RCubicCurveTo(dcx1, dcy1, dcx2, dcy2, dx, dy float64) - ArcTo(cx, cy, rx, ry, startAngle, angle float64) - RArcTo(dcx, dcy, rx, ry, startAngle, angle float64) - Close() -} diff --git a/_third_party/code.google.com/p/draw2d/draw2d/path_adder.go b/_third_party/code.google.com/p/draw2d/draw2d/path_adder.go deleted file mode 100644 index ee26809d83..0000000000 --- a/_third_party/code.google.com/p/draw2d/draw2d/path_adder.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2010 The draw2d Authors. All rights reserved. -// created: 13/12/2010 by Laurent Le Goff - -package draw2d - -import ( - "bosun.org/_third_party/code.google.com/p/freetype-go/freetype/raster" -) - -type VertexAdder struct { - command VertexCommand - adder raster.Adder -} - -func NewVertexAdder(adder raster.Adder) *VertexAdder { - return &VertexAdder{VertexNoCommand, adder} -} - -func (vertexAdder *VertexAdder) NextCommand(cmd VertexCommand) { - vertexAdder.command = cmd -} - -func (vertexAdder *VertexAdder) Vertex(x, y float64) { - switch vertexAdder.command { - case VertexStartCommand: - vertexAdder.adder.Start(raster.Point{raster.Fix32(x * 256), raster.Fix32(y * 256)}) - default: - vertexAdder.adder.Add1(raster.Point{raster.Fix32(x * 256), raster.Fix32(y * 256)}) - } - vertexAdder.command = VertexNoCommand -} - -type PathAdder struct { - adder raster.Adder - firstPoint raster.Point - ApproximationScale float64 -} - -func NewPathAdder(adder raster.Adder) *PathAdder { - return &PathAdder{adder, raster.Point{0, 0}, 1} -} - -func (pathAdder *PathAdder) Convert(paths ...*PathStorage) { - for _, path := range paths { - j := 0 - for _, cmd := range path.commands { - switch cmd { - case MoveTo: - pathAdder.firstPoint = raster.Point{raster.Fix32(path.vertices[j] * 256), raster.Fix32(path.vertices[j+1] * 256)} - pathAdder.adder.Start(pathAdder.firstPoint) - j += 2 - case LineTo: - pathAdder.adder.Add1(raster.Point{raster.Fix32(path.vertices[j] * 256), raster.Fix32(path.vertices[j+1] * 256)}) - j += 2 - case QuadCurveTo: - pathAdder.adder.Add2(raster.Point{raster.Fix32(path.vertices[j] * 256), raster.Fix32(path.vertices[j+1] * 256)}, raster.Point{raster.Fix32(path.vertices[j+2] * 256), raster.Fix32(path.vertices[j+3] * 256)}) - j += 4 - case CubicCurveTo: - pathAdder.adder.Add3(raster.Point{raster.Fix32(path.vertices[j] * 256), raster.Fix32(path.vertices[j+1] * 256)}, raster.Point{raster.Fix32(path.vertices[j+2] * 256), raster.Fix32(path.vertices[j+3] * 256)}, raster.Point{raster.Fix32(path.vertices[j+4] * 256), raster.Fix32(path.vertices[j+5] * 256)}) - j += 6 - case ArcTo: - lastPoint := arcAdder(pathAdder.adder, path.vertices[j], path.vertices[j+1], path.vertices[j+2], path.vertices[j+3], path.vertices[j+4], path.vertices[j+5], pathAdder.ApproximationScale) - pathAdder.adder.Add1(lastPoint) - j += 6 - case Close: - pathAdder.adder.Add1(pathAdder.firstPoint) - } - } - } -} diff --git a/_third_party/code.google.com/p/draw2d/draw2d/path_converter.go b/_third_party/code.google.com/p/draw2d/draw2d/path_converter.go deleted file mode 100644 index 0ef96b84db..0000000000 --- a/_third_party/code.google.com/p/draw2d/draw2d/path_converter.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2010 The draw2d Authors. All rights reserved. -// created: 06/12/2010 by Laurent Le Goff - -package draw2d - -import ( - "math" -) - -type PathConverter struct { - converter VertexConverter - ApproximationScale, AngleTolerance, CuspLimit float64 - startX, startY, x, y float64 -} - -func NewPathConverter(converter VertexConverter) *PathConverter { - return &PathConverter{converter, 1, 0, 0, 0, 0, 0, 0} -} - -func (c *PathConverter) Convert(paths ...*PathStorage) { - for _, path := range paths { - j := 0 - for _, cmd := range path.commands { - j = j + c.ConvertCommand(cmd, path.vertices[j:]...) - } - c.converter.NextCommand(VertexStopCommand) - } -} - -func (c *PathConverter) ConvertCommand(cmd PathCmd, vertices ...float64) int { - switch cmd { - case MoveTo: - c.x, c.y = vertices[0], vertices[1] - c.startX, c.startY = c.x, c.y - c.converter.NextCommand(VertexStopCommand) - c.converter.NextCommand(VertexStartCommand) - c.converter.Vertex(c.x, c.y) - return 2 - case LineTo: - c.x, c.y = vertices[0], vertices[1] - if c.startX == c.x && c.startY == c.y { - c.converter.NextCommand(VertexCloseCommand) - } - c.converter.Vertex(c.x, c.y) - c.converter.NextCommand(VertexJoinCommand) - return 2 - case QuadCurveTo: - quadraticBezier(c.converter, c.x, c.y, vertices[0], vertices[1], vertices[2], vertices[3], c.ApproximationScale, c.AngleTolerance) - c.x, c.y = vertices[2], vertices[3] - if c.startX == c.x && c.startY == c.y { - c.converter.NextCommand(VertexCloseCommand) - } - c.converter.Vertex(c.x, c.y) - return 4 - case CubicCurveTo: - cubicBezier(c.converter, c.x, c.y, vertices[0], vertices[1], vertices[2], vertices[3], vertices[4], vertices[5], c.ApproximationScale, c.AngleTolerance, c.CuspLimit) - c.x, c.y = vertices[4], vertices[5] - if c.startX == c.x && c.startY == c.y { - c.converter.NextCommand(VertexCloseCommand) - } - c.converter.Vertex(c.x, c.y) - return 6 - case ArcTo: - c.x, c.y = arc(c.converter, vertices[0], vertices[1], vertices[2], vertices[3], vertices[4], vertices[5], c.ApproximationScale) - if c.startX == c.x && c.startY == c.y { - c.converter.NextCommand(VertexCloseCommand) - } - c.converter.Vertex(c.x, c.y) - return 6 - case Close: - c.converter.NextCommand(VertexCloseCommand) - c.converter.Vertex(c.startX, c.startY) - return 0 - } - return 0 -} - -func (c *PathConverter) MoveTo(x, y float64) *PathConverter { - c.x, c.y = x, y - c.startX, c.startY = c.x, c.y - c.converter.NextCommand(VertexStopCommand) - c.converter.NextCommand(VertexStartCommand) - c.converter.Vertex(c.x, c.y) - return c -} - -func (c *PathConverter) RMoveTo(dx, dy float64) *PathConverter { - c.MoveTo(c.x+dx, c.y+dy) - return c -} - -func (c *PathConverter) LineTo(x, y float64) *PathConverter { - c.x, c.y = x, y - if c.startX == c.x && c.startY == c.y { - c.converter.NextCommand(VertexCloseCommand) - } - c.converter.Vertex(c.x, c.y) - c.converter.NextCommand(VertexJoinCommand) - return c -} - -func (c *PathConverter) RLineTo(dx, dy float64) *PathConverter { - c.LineTo(c.x+dx, c.y+dy) - return c -} - -func (c *PathConverter) QuadCurveTo(cx, cy, x, y float64) *PathConverter { - quadraticBezier(c.converter, c.x, c.y, cx, cy, x, y, c.ApproximationScale, c.AngleTolerance) - c.x, c.y = x, y - if c.startX == c.x && c.startY == c.y { - c.converter.NextCommand(VertexCloseCommand) - } - c.converter.Vertex(c.x, c.y) - return c -} - -func (c *PathConverter) RQuadCurveTo(dcx, dcy, dx, dy float64) *PathConverter { - c.QuadCurveTo(c.x+dcx, c.y+dcy, c.x+dx, c.y+dy) - return c -} - -func (c *PathConverter) CubicCurveTo(cx1, cy1, cx2, cy2, x, y float64) *PathConverter { - cubicBezier(c.converter, c.x, c.y, cx1, cy1, cx2, cy2, x, y, c.ApproximationScale, c.AngleTolerance, c.CuspLimit) - c.x, c.y = x, y - if c.startX == c.x && c.startY == c.y { - c.converter.NextCommand(VertexCloseCommand) - } - c.converter.Vertex(c.x, c.y) - return c -} - -func (c *PathConverter) RCubicCurveTo(dcx1, dcy1, dcx2, dcy2, dx, dy float64) *PathConverter { - c.CubicCurveTo(c.x+dcx1, c.y+dcy1, c.x+dcx2, c.y+dcy2, c.x+dx, c.y+dy) - return c -} - -func (c *PathConverter) ArcTo(cx, cy, rx, ry, startAngle, angle float64) *PathConverter { - endAngle := startAngle + angle - clockWise := true - if angle < 0 { - clockWise = false - } - // normalize - if clockWise { - for endAngle < startAngle { - endAngle += math.Pi * 2.0 - } - } else { - for startAngle < endAngle { - startAngle += math.Pi * 2.0 - } - } - startX := cx + math.Cos(startAngle)*rx - startY := cy + math.Sin(startAngle)*ry - c.MoveTo(startX, startY) - c.x, c.y = arc(c.converter, cx, cy, rx, ry, startAngle, angle, c.ApproximationScale) - if c.startX == c.x && c.startY == c.y { - c.converter.NextCommand(VertexCloseCommand) - } - c.converter.Vertex(c.x, c.y) - return c -} - -func (c *PathConverter) RArcTo(dcx, dcy, rx, ry, startAngle, angle float64) *PathConverter { - c.ArcTo(c.x+dcx, c.y+dcy, rx, ry, startAngle, angle) - return c -} - -func (c *PathConverter) Close() *PathConverter { - c.converter.NextCommand(VertexCloseCommand) - c.converter.Vertex(c.startX, c.startY) - return c -} diff --git a/_third_party/code.google.com/p/draw2d/draw2d/path_storage.go b/_third_party/code.google.com/p/draw2d/draw2d/path_storage.go deleted file mode 100644 index c2a887037a..0000000000 --- a/_third_party/code.google.com/p/draw2d/draw2d/path_storage.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2010 The draw2d Authors. All rights reserved. -// created: 21/11/2010 by Laurent Le Goff - -package draw2d - -import ( - "fmt" - "math" -) - -type PathCmd int - -const ( - MoveTo PathCmd = iota - LineTo - QuadCurveTo - CubicCurveTo - ArcTo - Close -) - -type PathStorage struct { - commands []PathCmd - vertices []float64 - x, y float64 -} - -func NewPathStorage() (p *PathStorage) { - p = new(PathStorage) - p.commands = make([]PathCmd, 0, 256) - p.vertices = make([]float64, 0, 256) - return -} - -func (p *PathStorage) Clear() { - p.commands = p.commands[0:0] - p.vertices = p.vertices[0:0] - return -} - -func (p *PathStorage) appendToPath(cmd PathCmd, vertices ...float64) { - if cap(p.vertices) <= len(p.vertices)+6 { - a := make([]PathCmd, len(p.commands), cap(p.commands)+256) - b := make([]float64, len(p.vertices), cap(p.vertices)+256) - copy(a, p.commands) - p.commands = a - copy(b, p.vertices) - p.vertices = b - } - p.commands = p.commands[0 : len(p.commands)+1] - p.commands[len(p.commands)-1] = cmd - copy(p.vertices[len(p.vertices):len(p.vertices)+len(vertices)], vertices) - p.vertices = p.vertices[0 : len(p.vertices)+len(vertices)] -} - -func (src *PathStorage) Copy() (dest *PathStorage) { - dest = new(PathStorage) - dest.commands = make([]PathCmd, len(src.commands)) - copy(dest.commands, src.commands) - dest.vertices = make([]float64, len(src.vertices)) - copy(dest.vertices, src.vertices) - return dest -} - -func (p *PathStorage) LastPoint() (x, y float64) { - return p.x, p.y -} - -func (p *PathStorage) IsEmpty() bool { - return len(p.commands) == 0 -} - -func (p *PathStorage) Close() *PathStorage { - p.appendToPath(Close) - return p -} - -func (p *PathStorage) MoveTo(x, y float64) *PathStorage { - p.appendToPath(MoveTo, x, y) - p.x = x - p.y = y - return p -} - -func (p *PathStorage) RMoveTo(dx, dy float64) *PathStorage { - x, y := p.LastPoint() - p.MoveTo(x+dx, y+dy) - return p -} - -func (p *PathStorage) LineTo(x, y float64) *PathStorage { - p.appendToPath(LineTo, x, y) - p.x = x - p.y = y - return p -} - -func (p *PathStorage) RLineTo(dx, dy float64) *PathStorage { - x, y := p.LastPoint() - p.LineTo(x+dx, y+dy) - return p -} - -func (p *PathStorage) QuadCurveTo(cx, cy, x, y float64) *PathStorage { - p.appendToPath(QuadCurveTo, cx, cy, x, y) - p.x = x - p.y = y - return p -} - -func (p *PathStorage) RQuadCurveTo(dcx, dcy, dx, dy float64) *PathStorage { - x, y := p.LastPoint() - p.QuadCurveTo(x+dcx, y+dcy, x+dx, y+dy) - return p -} - -func (p *PathStorage) CubicCurveTo(cx1, cy1, cx2, cy2, x, y float64) *PathStorage { - p.appendToPath(CubicCurveTo, cx1, cy1, cx2, cy2, x, y) - p.x = x - p.y = y - return p -} - -func (p *PathStorage) RCubicCurveTo(dcx1, dcy1, dcx2, dcy2, dx, dy float64) *PathStorage { - x, y := p.LastPoint() - p.CubicCurveTo(x+dcx1, y+dcy1, x+dcx2, y+dcy2, x+dx, y+dy) - return p -} - -func (p *PathStorage) ArcTo(cx, cy, rx, ry, startAngle, angle float64) *PathStorage { - endAngle := startAngle + angle - clockWise := true - if angle < 0 { - clockWise = false - } - // normalize - if clockWise { - for endAngle < startAngle { - endAngle += math.Pi * 2.0 - } - } else { - for startAngle < endAngle { - startAngle += math.Pi * 2.0 - } - } - startX := cx + math.Cos(startAngle)*rx - startY := cy + math.Sin(startAngle)*ry - if len(p.commands) > 0 { - p.LineTo(startX, startY) - } else { - p.MoveTo(startX, startY) - } - p.appendToPath(ArcTo, cx, cy, rx, ry, startAngle, angle) - p.x = cx + math.Cos(endAngle)*rx - p.y = cy + math.Sin(endAngle)*ry - return p -} - -func (p *PathStorage) RArcTo(dcx, dcy, rx, ry, startAngle, angle float64) *PathStorage { - x, y := p.LastPoint() - p.ArcTo(x+dcx, y+dcy, rx, ry, startAngle, angle) - return p -} - -func (p *PathStorage) String() string { - s := "" - j := 0 - for _, cmd := range p.commands { - switch cmd { - case MoveTo: - s += fmt.Sprintf("MoveTo: %f, %f\n", p.vertices[j], p.vertices[j+1]) - j = j + 2 - case LineTo: - s += fmt.Sprintf("LineTo: %f, %f\n", p.vertices[j], p.vertices[j+1]) - j = j + 2 - case QuadCurveTo: - s += fmt.Sprintf("QuadCurveTo: %f, %f, %f, %f\n", p.vertices[j], p.vertices[j+1], p.vertices[j+2], p.vertices[j+3]) - j = j + 4 - case CubicCurveTo: - s += fmt.Sprintf("CubicCurveTo: %f, %f, %f, %f, %f, %f\n", p.vertices[j], p.vertices[j+1], p.vertices[j+2], p.vertices[j+3], p.vertices[j+4], p.vertices[j+5]) - j = j + 6 - case ArcTo: - s += fmt.Sprintf("ArcTo: %f, %f, %f, %f, %f, %f\n", p.vertices[j], p.vertices[j+1], p.vertices[j+2], p.vertices[j+3], p.vertices[j+4], p.vertices[j+5]) - j = j + 6 - case Close: - s += "Close\n" - } - } - return s -} diff --git a/_third_party/code.google.com/p/draw2d/draw2d/stroker.go b/_third_party/code.google.com/p/draw2d/draw2d/stroker.go deleted file mode 100644 index 9e40361e98..0000000000 --- a/_third_party/code.google.com/p/draw2d/draw2d/stroker.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2010 The draw2d Authors. All rights reserved. -// created: 13/12/2010 by Laurent Le Goff - -package draw2d - -type Cap int - -const ( - RoundCap Cap = iota - ButtCap - SquareCap -) - -type Join int - -const ( - BevelJoin Join = iota - RoundJoin - MiterJoin -) - -type LineStroker struct { - Next VertexConverter - HalfLineWidth float64 - Cap Cap - Join Join - vertices []float64 - rewind []float64 - x, y, nx, ny float64 - command VertexCommand -} - -func NewLineStroker(c Cap, j Join, converter VertexConverter) *LineStroker { - l := new(LineStroker) - l.Next = converter - l.HalfLineWidth = 0.5 - l.vertices = make([]float64, 0, 256) - l.rewind = make([]float64, 0, 256) - l.Cap = c - l.Join = j - l.command = VertexNoCommand - return l -} - -func (l *LineStroker) NextCommand(command VertexCommand) { - l.command = command - if command == VertexStopCommand { - l.Next.NextCommand(VertexStartCommand) - for i, j := 0, 1; j < len(l.vertices); i, j = i+2, j+2 { - l.Next.Vertex(l.vertices[i], l.vertices[j]) - l.Next.NextCommand(VertexNoCommand) - } - for i, j := len(l.rewind)-2, len(l.rewind)-1; j > 0; i, j = i-2, j-2 { - l.Next.NextCommand(VertexNoCommand) - l.Next.Vertex(l.rewind[i], l.rewind[j]) - } - if len(l.vertices) > 1 { - l.Next.NextCommand(VertexNoCommand) - l.Next.Vertex(l.vertices[0], l.vertices[1]) - } - l.Next.NextCommand(VertexStopCommand) - // reinit vertices - l.vertices = l.vertices[0:0] - l.rewind = l.rewind[0:0] - l.x, l.y, l.nx, l.ny = 0, 0, 0, 0 - } -} - -func (l *LineStroker) Vertex(x, y float64) { - switch l.command { - case VertexNoCommand: - l.line(l.x, l.y, x, y) - case VertexJoinCommand: - l.joinLine(l.x, l.y, l.nx, l.ny, x, y) - case VertexStartCommand: - l.x, l.y = x, y - case VertexCloseCommand: - l.line(l.x, l.y, x, y) - l.joinLine(l.x, l.y, l.nx, l.ny, x, y) - l.closePolygon() - } - l.command = VertexNoCommand -} - -func (l *LineStroker) appendVertex(vertices ...float64) { - s := len(vertices) / 2 - if len(l.vertices)+s >= cap(l.vertices) { - v := make([]float64, len(l.vertices), cap(l.vertices)+128) - copy(v, l.vertices) - l.vertices = v - v = make([]float64, len(l.rewind), cap(l.rewind)+128) - copy(v, l.rewind) - l.rewind = v - } - - copy(l.vertices[len(l.vertices):len(l.vertices)+s], vertices[:s]) - l.vertices = l.vertices[0 : len(l.vertices)+s] - copy(l.rewind[len(l.rewind):len(l.rewind)+s], vertices[s:]) - l.rewind = l.rewind[0 : len(l.rewind)+s] - -} - -func (l *LineStroker) closePolygon() { - if len(l.vertices) > 1 { - l.appendVertex(l.vertices[0], l.vertices[1], l.rewind[0], l.rewind[1]) - } -} - -func (l *LineStroker) line(x1, y1, x2, y2 float64) { - dx := (x2 - x1) - dy := (y2 - y1) - d := vectorDistance(dx, dy) - if d != 0 { - nx := dy * l.HalfLineWidth / d - ny := -(dx * l.HalfLineWidth / d) - l.appendVertex(x1+nx, y1+ny, x2+nx, y2+ny, x1-nx, y1-ny, x2-nx, y2-ny) - l.x, l.y, l.nx, l.ny = x2, y2, nx, ny - } -} - -func (l *LineStroker) joinLine(x1, y1, nx1, ny1, x2, y2 float64) { - dx := (x2 - x1) - dy := (y2 - y1) - d := vectorDistance(dx, dy) - - if d != 0 { - nx := dy * l.HalfLineWidth / d - ny := -(dx * l.HalfLineWidth / d) - /* l.join(x1, y1, x1 + nx, y1 - ny, nx, ny, x1 + ny2, y1 + nx2, nx2, ny2) - l.join(x1, y1, x1 - ny1, y1 - nx1, nx1, ny1, x1 - ny2, y1 - nx2, nx2, ny2)*/ - - l.appendVertex(x1+nx, y1+ny, x2+nx, y2+ny, x1-nx, y1-ny, x2-nx, y2-ny) - l.x, l.y, l.nx, l.ny = x2, y2, nx, ny - } -} diff --git a/_third_party/code.google.com/p/draw2d/draw2d/transform.go b/_third_party/code.google.com/p/draw2d/draw2d/transform.go deleted file mode 100644 index 7b1ae50b4b..0000000000 --- a/_third_party/code.google.com/p/draw2d/draw2d/transform.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2010 The draw2d Authors. All rights reserved. -// created: 21/11/2010 by Laurent Le Goff - -package draw2d - -import ( - "bosun.org/_third_party/code.google.com/p/freetype-go/freetype/raster" - "math" -) - -type MatrixTransform [6]float64 - -const ( - epsilon = 1e-6 -) - -func (tr MatrixTransform) Determinant() float64 { - return tr[0]*tr[3] - tr[1]*tr[2] -} - -func (tr MatrixTransform) Transform(points ...*float64) { - for i, j := 0, 1; j < len(points); i, j = i+2, j+2 { - x := *points[i] - y := *points[j] - *points[i] = x*tr[0] + y*tr[2] + tr[4] - *points[j] = x*tr[1] + y*tr[3] + tr[5] - } -} - -func (tr MatrixTransform) TransformArray(points []float64) { - for i, j := 0, 1; j < len(points); i, j = i+2, j+2 { - x := points[i] - y := points[j] - points[i] = x*tr[0] + y*tr[2] + tr[4] - points[j] = x*tr[1] + y*tr[3] + tr[5] - } -} - -func (tr MatrixTransform) TransformRectangle(x0, y0, x2, y2 *float64) { - x1 := *x2 - y1 := *y0 - x3 := *x0 - y3 := *y2 - tr.Transform(x0, y0, &x1, &y1, x2, y2, &x3, &y3) - *x0, x1 = minMax(*x0, x1) - *x2, x3 = minMax(*x2, x3) - *y0, y1 = minMax(*y0, y1) - *y2, y3 = minMax(*y2, y3) - - *x0 = min(*x0, *x2) - *y0 = min(*y0, *y2) - *x2 = max(x1, x3) - *y2 = max(y1, y3) -} - -func (tr MatrixTransform) TransformRasterPoint(points ...*raster.Point) { - for _, point := range points { - x := float64(point.X) / 256 - y := float64(point.Y) / 256 - point.X = raster.Fix32((x*tr[0] + y*tr[2] + tr[4]) * 256) - point.Y = raster.Fix32((x*tr[1] + y*tr[3] + tr[5]) * 256) - } -} - -func (tr MatrixTransform) InverseTransform(points ...*float64) { - d := tr.Determinant() // matrix determinant - for i, j := 0, 1; j < len(points); i, j = i+2, j+2 { - x := *points[i] - y := *points[j] - *points[i] = ((x-tr[4])*tr[3] - (y-tr[5])*tr[2]) / d - *points[j] = ((y-tr[5])*tr[0] - (x-tr[4])*tr[1]) / d - } -} - -// ******************** Vector transformations ******************** - -func (tr MatrixTransform) VectorTransform(points ...*float64) { - for i, j := 0, 1; j < len(points); i, j = i+2, j+2 { - x := *points[i] - y := *points[j] - *points[i] = x*tr[0] + y*tr[2] - *points[j] = x*tr[1] + y*tr[3] - } -} - -// ******************** Transformations creation ******************** - -/** Creates an identity transformation. */ -func NewIdentityMatrix() MatrixTransform { - return [6]float64{1, 0, 0, 1, 0, 0} -} - -/** - * Creates a transformation with a translation, that, - * transform point1 into point2. - */ -func NewTranslationMatrix(tx, ty float64) MatrixTransform { - return [6]float64{1, 0, 0, 1, tx, ty} -} - -/** - * Creates a transformation with a sx, sy scale factor - */ -func NewScaleMatrix(sx, sy float64) MatrixTransform { - return [6]float64{sx, 0, 0, sy, 0, 0} -} - -/** - * Creates a rotation transformation. - */ -func NewRotationMatrix(angle float64) MatrixTransform { - c := math.Cos(angle) - s := math.Sin(angle) - return [6]float64{c, s, -s, c, 0, 0} -} - -/** - * Creates a transformation, combining a scale and a translation, that transform rectangle1 into rectangle2. - */ -func NewMatrixTransform(rectangle1, rectangle2 [4]float64) MatrixTransform { - xScale := (rectangle2[2] - rectangle2[0]) / (rectangle1[2] - rectangle1[0]) - yScale := (rectangle2[3] - rectangle2[1]) / (rectangle1[3] - rectangle1[1]) - xOffset := rectangle2[0] - (rectangle1[0] * xScale) - yOffset := rectangle2[1] - (rectangle1[1] * yScale) - return [6]float64{xScale, 0, 0, yScale, xOffset, yOffset} -} - -// ******************** Transformations operations ******************** - -/** - * Returns a transformation that is the inverse of the given transformation. - */ -func (tr MatrixTransform) GetInverseTransformation() MatrixTransform { - d := tr.Determinant() // matrix determinant - return [6]float64{ - tr[3] / d, - -tr[1] / d, - -tr[2] / d, - tr[0] / d, - (tr[2]*tr[5] - tr[3]*tr[4]) / d, - (tr[1]*tr[4] - tr[0]*tr[5]) / d} -} - -func (tr1 MatrixTransform) Multiply(tr2 MatrixTransform) MatrixTransform { - return [6]float64{ - tr1[0]*tr2[0] + tr1[1]*tr2[2], - tr1[1]*tr2[3] + tr1[0]*tr2[1], - tr1[2]*tr2[0] + tr1[3]*tr2[2], - tr1[3]*tr2[3] + tr1[2]*tr2[1], - tr1[4]*tr2[0] + tr1[5]*tr2[2] + tr2[4], - tr1[5]*tr2[3] + tr1[4]*tr2[1] + tr2[5]} -} - -func (tr *MatrixTransform) Scale(sx, sy float64) *MatrixTransform { - tr[0] = sx * tr[0] - tr[1] = sx * tr[1] - tr[2] = sy * tr[2] - tr[3] = sy * tr[3] - return tr -} - -func (tr *MatrixTransform) Translate(tx, ty float64) *MatrixTransform { - tr[4] = tx*tr[0] + ty*tr[2] + tr[4] - tr[5] = ty*tr[3] + tx*tr[1] + tr[5] - return tr -} - -func (tr *MatrixTransform) Rotate(angle float64) *MatrixTransform { - c := math.Cos(angle) - s := math.Sin(angle) - t0 := c*tr[0] + s*tr[2] - t1 := s*tr[3] + c*tr[1] - t2 := c*tr[2] - s*tr[0] - t3 := c*tr[3] - s*tr[1] - tr[0] = t0 - tr[1] = t1 - tr[2] = t2 - tr[3] = t3 - return tr -} - -func (tr MatrixTransform) GetTranslation() (x, y float64) { - return tr[4], tr[5] -} - -func (tr MatrixTransform) GetScaling() (x, y float64) { - return tr[0], tr[3] -} - -func (tr MatrixTransform) GetScale() float64 { - x := 0.707106781*tr[0] + 0.707106781*tr[1] - y := 0.707106781*tr[2] + 0.707106781*tr[3] - return math.Sqrt(x*x + y*y) -} - -func (tr MatrixTransform) GetMaxAbsScaling() (s float64) { - sx := math.Abs(tr[0]) - sy := math.Abs(tr[3]) - if sx > sy { - return sx - } - return sy -} - -func (tr MatrixTransform) GetMinAbsScaling() (s float64) { - sx := math.Abs(tr[0]) - sy := math.Abs(tr[3]) - if sx > sy { - return sy - } - return sx -} - -// ******************** Testing ******************** - -/** - * Tests if a two transformation are equal. A tolerance is applied when - * comparing matrix elements. - */ -func (tr1 MatrixTransform) Equals(tr2 MatrixTransform) bool { - for i := 0; i < 6; i = i + 1 { - if !fequals(tr1[i], tr2[i]) { - return false - } - } - return true -} - -/** - * Tests if a transformation is the identity transformation. A tolerance - * is applied when comparing matrix elements. - */ -func (tr MatrixTransform) IsIdentity() bool { - return fequals(tr[4], 0) && fequals(tr[5], 0) && tr.IsTranslation() -} - -/** - * Tests if a transformation is is a pure translation. A tolerance - * is applied when comparing matrix elements. - */ -func (tr MatrixTransform) IsTranslation() bool { - return fequals(tr[0], 1) && fequals(tr[1], 0) && fequals(tr[2], 0) && fequals(tr[3], 1) -} - -/** - * Compares two floats. - * return true if the distance between the two floats is less than epsilon, false otherwise - */ -func fequals(float1, float2 float64) bool { - return math.Abs(float1-float2) <= epsilon -} - -// this VertexConverter apply the Matrix transformation tr -type VertexMatrixTransform struct { - tr MatrixTransform - Next VertexConverter -} - -func NewVertexMatrixTransform(tr MatrixTransform, converter VertexConverter) *VertexMatrixTransform { - return &VertexMatrixTransform{tr, converter} -} - -// Vertex Matrix Transform -func (vmt *VertexMatrixTransform) NextCommand(command VertexCommand) { - vmt.Next.NextCommand(command) -} - -func (vmt *VertexMatrixTransform) Vertex(x, y float64) { - u := x*vmt.tr[0] + y*vmt.tr[2] + vmt.tr[4] - v := x*vmt.tr[1] + y*vmt.tr[3] + vmt.tr[5] - vmt.Next.Vertex(u, v) -} - -// this adder apply a Matrix transformation to points -type MatrixTransformAdder struct { - tr MatrixTransform - next raster.Adder -} - -func NewMatrixTransformAdder(tr MatrixTransform, adder raster.Adder) *MatrixTransformAdder { - return &MatrixTransformAdder{tr, adder} -} - -// Start starts a new curve at the given point. -func (mta MatrixTransformAdder) Start(a raster.Point) { - mta.tr.TransformRasterPoint(&a) - mta.next.Start(a) -} - -// Add1 adds a linear segment to the current curve. -func (mta MatrixTransformAdder) Add1(b raster.Point) { - mta.tr.TransformRasterPoint(&b) - mta.next.Add1(b) -} - -// Add2 adds a quadratic segment to the current curve. -func (mta MatrixTransformAdder) Add2(b, c raster.Point) { - mta.tr.TransformRasterPoint(&b, &c) - mta.next.Add2(b, c) -} - -// Add3 adds a cubic segment to the current curve. -func (mta MatrixTransformAdder) Add3(b, c, d raster.Point) { - mta.tr.TransformRasterPoint(&b, &c, &d) - mta.next.Add3(b, c, d) -} diff --git a/_third_party/code.google.com/p/draw2d/draw2d/vertex2d.go b/_third_party/code.google.com/p/draw2d/draw2d/vertex2d.go deleted file mode 100644 index 4e4d4fd83e..0000000000 --- a/_third_party/code.google.com/p/draw2d/draw2d/vertex2d.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2010 The draw2d Authors. All rights reserved. -// created: 21/11/2010 by Laurent Le Goff - -package draw2d - -type VertexCommand byte - -const ( - VertexNoCommand VertexCommand = iota - VertexStartCommand - VertexJoinCommand - VertexCloseCommand - VertexStopCommand -) - -type VertexConverter interface { - NextCommand(cmd VertexCommand) - Vertex(x, y float64) -} diff --git a/_third_party/code.google.com/p/graphics-go/graphics/Makefile b/_third_party/code.google.com/p/graphics-go/graphics/Makefile deleted file mode 100644 index 28a06f0e84..0000000000 --- a/_third_party/code.google.com/p/graphics-go/graphics/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2011 The Graphics-Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -include $(GOROOT)/src/Make.inc - -TARG=code.google.com/p/graphics-go/graphics -GOFILES=\ - affine.go\ - blur.go\ - rotate.go\ - scale.go\ - thumbnail.go\ - -include $(GOROOT)/src/Make.pkg diff --git a/_third_party/code.google.com/p/graphics-go/graphics/affine.go b/_third_party/code.google.com/p/graphics-go/graphics/affine.go deleted file mode 100644 index 7a71ba5756..0000000000 --- a/_third_party/code.google.com/p/graphics-go/graphics/affine.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2011 The Graphics-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package graphics - -import ( - "bosun.org/_third_party/code.google.com/p/graphics-go/graphics/interp" - "errors" - "image" - "image/draw" - "math" -) - -// I is the identity Affine transform matrix. -var I = Affine{ - 1, 0, 0, - 0, 1, 0, - 0, 0, 1, -} - -// Affine is a 3x3 2D affine transform matrix. -// M(i,j) is Affine[i*3+j]. -type Affine [9]float64 - -// Mul returns the multiplication of two affine transform matrices. -func (a Affine) Mul(b Affine) Affine { - return Affine{ - a[0]*b[0] + a[1]*b[3] + a[2]*b[6], - a[0]*b[1] + a[1]*b[4] + a[2]*b[7], - a[0]*b[2] + a[1]*b[5] + a[2]*b[8], - a[3]*b[0] + a[4]*b[3] + a[5]*b[6], - a[3]*b[1] + a[4]*b[4] + a[5]*b[7], - a[3]*b[2] + a[4]*b[5] + a[5]*b[8], - a[6]*b[0] + a[7]*b[3] + a[8]*b[6], - a[6]*b[1] + a[7]*b[4] + a[8]*b[7], - a[6]*b[2] + a[7]*b[5] + a[8]*b[8], - } -} - -func (a Affine) transformRGBA(dst *image.RGBA, src *image.RGBA, i interp.RGBA) error { - srcb := src.Bounds() - b := dst.Bounds() - for y := b.Min.Y; y < b.Max.Y; y++ { - for x := b.Min.X; x < b.Max.X; x++ { - sx, sy := a.pt(x, y) - if inBounds(srcb, sx, sy) { - c := i.RGBA(src, sx, sy) - off := (y-dst.Rect.Min.Y)*dst.Stride + (x-dst.Rect.Min.X)*4 - dst.Pix[off+0] = c.R - dst.Pix[off+1] = c.G - dst.Pix[off+2] = c.B - dst.Pix[off+3] = c.A - } - } - } - return nil -} - -// Transform applies the affine transform to src and produces dst. -func (a Affine) Transform(dst draw.Image, src image.Image, i interp.Interp) error { - if dst == nil { - return errors.New("graphics: dst is nil") - } - if src == nil { - return errors.New("graphics: src is nil") - } - - // RGBA fast path. - dstRGBA, dstOk := dst.(*image.RGBA) - srcRGBA, srcOk := src.(*image.RGBA) - interpRGBA, interpOk := i.(interp.RGBA) - if dstOk && srcOk && interpOk { - return a.transformRGBA(dstRGBA, srcRGBA, interpRGBA) - } - - srcb := src.Bounds() - b := dst.Bounds() - for y := b.Min.Y; y < b.Max.Y; y++ { - for x := b.Min.X; x < b.Max.X; x++ { - sx, sy := a.pt(x, y) - if inBounds(srcb, sx, sy) { - dst.Set(x, y, i.Interp(src, sx, sy)) - } - } - } - return nil -} - -func inBounds(b image.Rectangle, x, y float64) bool { - if x < float64(b.Min.X) || x >= float64(b.Max.X) { - return false - } - if y < float64(b.Min.Y) || y >= float64(b.Max.Y) { - return false - } - return true -} - -func (a Affine) pt(x0, y0 int) (x1, y1 float64) { - fx := float64(x0) + 0.5 - fy := float64(y0) + 0.5 - x1 = fx*a[0] + fy*a[1] + a[2] - y1 = fx*a[3] + fy*a[4] + a[5] - return x1, y1 -} - -// TransformCenter applies the affine transform to src and produces dst. -// Equivalent to -// a.CenterFit(dst, src).Transform(dst, src, i). -func (a Affine) TransformCenter(dst draw.Image, src image.Image, i interp.Interp) error { - if dst == nil { - return errors.New("graphics: dst is nil") - } - if src == nil { - return errors.New("graphics: src is nil") - } - - return a.CenterFit(dst.Bounds(), src.Bounds()).Transform(dst, src, i) -} - -// Scale produces a scaling transform of factors x and y. -func (a Affine) Scale(x, y float64) Affine { - return a.Mul(Affine{ - 1 / x, 0, 0, - 0, 1 / y, 0, - 0, 0, 1, - }) -} - -// Rotate produces a clockwise rotation transform of angle, in radians. -func (a Affine) Rotate(angle float64) Affine { - s, c := math.Sincos(angle) - return a.Mul(Affine{ - +c, +s, +0, - -s, +c, +0, - +0, +0, +1, - }) -} - -// Shear produces a shear transform by the slopes x and y. -func (a Affine) Shear(x, y float64) Affine { - d := 1 - x*y - return a.Mul(Affine{ - +1 / d, -x / d, 0, - -y / d, +1 / d, 0, - 0, 0, 1, - }) -} - -// Translate produces a translation transform with pixel distances x and y. -func (a Affine) Translate(x, y float64) Affine { - return a.Mul(Affine{ - 1, 0, -x, - 0, 1, -y, - 0, 0, +1, - }) -} - -// Center produces the affine transform, centered around the provided point. -func (a Affine) Center(x, y float64) Affine { - return I.Translate(-x, -y).Mul(a).Translate(x, y) -} - -// CenterFit produces the affine transform, centered around the rectangles. -// It is equivalent to -// I.Translate(-
).Mul(a).Translate(
) -func (a Affine) CenterFit(dst, src image.Rectangle) Affine { - dx := float64(dst.Min.X) + float64(dst.Dx())/2 - dy := float64(dst.Min.Y) + float64(dst.Dy())/2 - sx := float64(src.Min.X) + float64(src.Dx())/2 - sy := float64(src.Min.Y) + float64(src.Dy())/2 - return I.Translate(-sx, -sy).Mul(a).Translate(dx, dy) -} diff --git a/_third_party/code.google.com/p/graphics-go/graphics/blur.go b/_third_party/code.google.com/p/graphics-go/graphics/blur.go deleted file mode 100644 index afa41f94b3..0000000000 --- a/_third_party/code.google.com/p/graphics-go/graphics/blur.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2011 The Graphics-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package graphics - -import ( - "bosun.org/_third_party/code.google.com/p/graphics-go/graphics/convolve" - "errors" - "image" - "image/draw" - "math" -) - -// DefaultStdDev is the default blurring parameter. -var DefaultStdDev = 0.5 - -// BlurOptions are the blurring parameters. -// StdDev is the standard deviation of the normal, higher is blurrier. -// Size is the size of the kernel. If zero, it is set to Ceil(6 * StdDev). -type BlurOptions struct { - StdDev float64 - Size int -} - -// Blur produces a blurred version of the image, using a Gaussian blur. -func Blur(dst draw.Image, src image.Image, opt *BlurOptions) error { - if dst == nil { - return errors.New("graphics: dst is nil") - } - if src == nil { - return errors.New("graphics: src is nil") - } - - sd := DefaultStdDev - size := 0 - - if opt != nil { - sd = opt.StdDev - size = opt.Size - } - - if size < 1 { - size = int(math.Ceil(sd * 6)) - } - - kernel := make([]float64, 2*size+1) - for i := 0; i <= size; i++ { - x := float64(i) / sd - x = math.Pow(1/math.SqrtE, x*x) - kernel[size-i] = x - kernel[size+i] = x - } - - // Normalize the weights to sum to 1.0. - kSum := 0.0 - for _, k := range kernel { - kSum += k - } - for i, k := range kernel { - kernel[i] = k / kSum - } - - return convolve.Convolve(dst, src, &convolve.SeparableKernel{ - X: kernel, - Y: kernel, - }) -} diff --git a/_third_party/code.google.com/p/graphics-go/graphics/blur_test.go b/_third_party/code.google.com/p/graphics-go/graphics/blur_test.go deleted file mode 100644 index 9bef261574..0000000000 --- a/_third_party/code.google.com/p/graphics-go/graphics/blur_test.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2011 The Graphics-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package graphics - -import ( - "bosun.org/_third_party/code.google.com/p/graphics-go/graphics/graphicstest" - "image" - "image/color" - "testing" - - _ "image/png" -) - -var blurOneColorTests = []transformOneColorTest{ - { - "1x1-blank", 1, 1, 1, 1, - &BlurOptions{0.83, 1}, - []uint8{0xff}, - []uint8{0xff}, - }, - { - "1x1-spreadblank", 1, 1, 1, 1, - &BlurOptions{0.83, 2}, - []uint8{0xff}, - []uint8{0xff}, - }, - { - "3x3-blank", 3, 3, 3, 3, - &BlurOptions{0.83, 2}, - []uint8{ - 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, - }, - []uint8{ - 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, - }, - }, - { - "3x3-dot", 3, 3, 3, 3, - &BlurOptions{0.34, 1}, - []uint8{ - 0x00, 0x00, 0x00, - 0x00, 0xff, 0x00, - 0x00, 0x00, 0x00, - }, - []uint8{ - 0x00, 0x03, 0x00, - 0x03, 0xf2, 0x03, - 0x00, 0x03, 0x00, - }, - }, - { - "5x5-dot", 5, 5, 5, 5, - &BlurOptions{0.34, 1}, - []uint8{ - 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0xff, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, - }, - []uint8{ - 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x03, 0x00, 0x00, - 0x00, 0x03, 0xf2, 0x03, 0x00, - 0x00, 0x00, 0x03, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, - }, - }, - { - "5x5-dot-spread", 5, 5, 5, 5, - &BlurOptions{0.85, 1}, - []uint8{ - 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0xff, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, - }, - []uint8{ - 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x10, 0x20, 0x10, 0x00, - 0x00, 0x20, 0x40, 0x20, 0x00, - 0x00, 0x10, 0x20, 0x10, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, - }, - }, - { - "4x4-box", 4, 4, 4, 4, - &BlurOptions{0.34, 1}, - []uint8{ - 0x00, 0x00, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x00, - 0x00, 0xff, 0xff, 0x00, - 0x00, 0x00, 0x00, 0x00, - }, - []uint8{ - 0x00, 0x03, 0x03, 0x00, - 0x03, 0xf8, 0xf8, 0x03, - 0x03, 0xf8, 0xf8, 0x03, - 0x00, 0x03, 0x03, 0x00, - }, - }, - { - "5x5-twodots", 5, 5, 5, 5, - &BlurOptions{0.34, 1}, - []uint8{ - 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x96, 0x00, 0x96, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, - }, - []uint8{ - 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x02, 0x00, 0x02, 0x00, - 0x02, 0x8e, 0x04, 0x8e, 0x02, - 0x00, 0x02, 0x00, 0x02, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, - }, - }, -} - -func TestBlurOneColor(t *testing.T) { - for _, oc := range blurOneColorTests { - dst := oc.newDst() - src := oc.newSrc() - opt := oc.opt.(*BlurOptions) - if err := Blur(dst, src, opt); err != nil { - t.Fatal(err) - } - - if !checkTransformTest(t, &oc, dst) { - continue - } - } -} - -func TestBlurEmpty(t *testing.T) { - empty := image.NewRGBA(image.Rect(0, 0, 0, 0)) - if err := Blur(empty, empty, nil); err != nil { - t.Fatal(err) - } -} - -func TestBlurGopher(t *testing.T) { - src, err := graphicstest.LoadImage("../testdata/gopher.png") - if err != nil { - t.Fatal(err) - } - - dst := image.NewRGBA(src.Bounds()) - if err = Blur(dst, src, &BlurOptions{StdDev: 1.1}); err != nil { - t.Fatal(err) - } - - cmp, err := graphicstest.LoadImage("../testdata/gopher-blur.png") - if err != nil { - t.Fatal(err) - } - err = graphicstest.ImageWithinTolerance(dst, cmp, 0x101) - if err != nil { - t.Fatal(err) - } -} - -func benchBlur(b *testing.B, bounds image.Rectangle) { - b.StopTimer() - - // Construct a fuzzy image. - src := image.NewRGBA(bounds) - for y := bounds.Min.Y; y < bounds.Max.Y; y++ { - for x := bounds.Min.X; x < bounds.Max.X; x++ { - src.SetRGBA(x, y, color.RGBA{ - uint8(5 * x % 0x100), - uint8(7 * y % 0x100), - uint8((7*x + 5*y) % 0x100), - 0xff, - }) - } - } - dst := image.NewRGBA(bounds) - - b.StartTimer() - for i := 0; i < b.N; i++ { - Blur(dst, src, &BlurOptions{0.84, 3}) - } -} - -func BenchmarkBlur400x400x3(b *testing.B) { - benchBlur(b, image.Rect(0, 0, 400, 400)) -} - -// Exactly twice the pixel count of 400x400. -func BenchmarkBlur400x800x3(b *testing.B) { - benchBlur(b, image.Rect(0, 0, 400, 800)) -} - -// Exactly twice the pixel count of 400x800 -func BenchmarkBlur400x1600x3(b *testing.B) { - benchBlur(b, image.Rect(0, 0, 400, 1600)) -} diff --git a/_third_party/code.google.com/p/graphics-go/graphics/convolve/Makefile b/_third_party/code.google.com/p/graphics-go/graphics/convolve/Makefile deleted file mode 100644 index a5691fa30a..0000000000 --- a/_third_party/code.google.com/p/graphics-go/graphics/convolve/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright 2011 The Graphics-Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -include $(GOROOT)/src/Make.inc - -TARG=code.google.com/p/graphics-go/graphics/convolve -GOFILES=\ - convolve.go\ - -include $(GOROOT)/src/Make.pkg diff --git a/_third_party/code.google.com/p/graphics-go/graphics/convolve/convolve.go b/_third_party/code.google.com/p/graphics-go/graphics/convolve/convolve.go deleted file mode 100644 index da69496d05..0000000000 --- a/_third_party/code.google.com/p/graphics-go/graphics/convolve/convolve.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright 2011 The Graphics-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package convolve - -import ( - "errors" - "fmt" - "image" - "image/draw" - "math" -) - -// clamp clamps x to the range [x0, x1]. -func clamp(x, x0, x1 float64) float64 { - if x < x0 { - return x0 - } - if x > x1 { - return x1 - } - return x -} - -// Kernel is a square matrix that defines a convolution. -type Kernel interface { - // Weights returns the square matrix of weights in row major order. - Weights() []float64 -} - -// SeparableKernel is a linearly separable, square convolution kernel. -// X and Y are the per-axis weights. Each slice must be the same length, and -// have an odd length. The middle element of each slice is the weight for the -// central pixel. For example, the horizontal Sobel kernel is: -// sobelX := &SeparableKernel{ -// X: []float64{-1, 0, +1}, -// Y: []float64{1, 2, 1}, -// } -type SeparableKernel struct { - X, Y []float64 -} - -func (k *SeparableKernel) Weights() []float64 { - n := len(k.X) - w := make([]float64, n*n) - for y := 0; y < n; y++ { - for x := 0; x < n; x++ { - w[y*n+x] = k.X[x] * k.Y[y] - } - } - return w -} - -// fullKernel is a square convolution kernel. -type fullKernel []float64 - -func (k fullKernel) Weights() []float64 { return k } - -func kernelSize(w []float64) (size int, err error) { - size = int(math.Sqrt(float64(len(w)))) - if size*size != len(w) { - return 0, errors.New("graphics: kernel is not square") - } - if size%2 != 1 { - return 0, errors.New("graphics: kernel size is not odd") - } - return size, nil -} - -// NewKernel returns a square convolution kernel. -func NewKernel(w []float64) (Kernel, error) { - if _, err := kernelSize(w); err != nil { - return nil, err - } - return fullKernel(w), nil -} - -func convolveRGBASep(dst *image.RGBA, src image.Image, k *SeparableKernel) error { - if len(k.X) != len(k.Y) { - return fmt.Errorf("graphics: kernel not square (x %d, y %d)", len(k.X), len(k.Y)) - } - if len(k.X)%2 != 1 { - return fmt.Errorf("graphics: kernel length (%d) not odd", len(k.X)) - } - radius := (len(k.X) - 1) / 2 - - // buf holds the result of vertically blurring src. - bounds := dst.Bounds() - width, height := bounds.Dx(), bounds.Dy() - buf := make([]float64, width*height*4) - for y := bounds.Min.Y; y < bounds.Max.Y; y++ { - for x := bounds.Min.X; x < bounds.Max.X; x++ { - var r, g, b, a float64 - // k0 is the kernel weight for the center pixel. This may be greater - // than kernel[0], near the boundary of the source image, to avoid - // vignetting. - k0 := k.X[radius] - - // Add the pixels from above. - for i := 1; i <= radius; i++ { - f := k.Y[radius-i] - if y-i < bounds.Min.Y { - k0 += f - } else { - or, og, ob, oa := src.At(x, y-i).RGBA() - r += float64(or>>8) * f - g += float64(og>>8) * f - b += float64(ob>>8) * f - a += float64(oa>>8) * f - } - } - - // Add the pixels from below. - for i := 1; i <= radius; i++ { - f := k.Y[radius+i] - if y+i >= bounds.Max.Y { - k0 += f - } else { - or, og, ob, oa := src.At(x, y+i).RGBA() - r += float64(or>>8) * f - g += float64(og>>8) * f - b += float64(ob>>8) * f - a += float64(oa>>8) * f - } - } - - // Add the central pixel. - or, og, ob, oa := src.At(x, y).RGBA() - r += float64(or>>8) * k0 - g += float64(og>>8) * k0 - b += float64(ob>>8) * k0 - a += float64(oa>>8) * k0 - - // Write to buf. - o := (y-bounds.Min.Y)*width*4 + (x-bounds.Min.X)*4 - buf[o+0] = r - buf[o+1] = g - buf[o+2] = b - buf[o+3] = a - } - } - - // dst holds the result of horizontally blurring buf. - for y := 0; y < height; y++ { - for x := 0; x < width; x++ { - var r, g, b, a float64 - k0, off := k.X[radius], y*width*4+x*4 - - // Add the pixels from the left. - for i := 1; i <= radius; i++ { - f := k.X[radius-i] - if x-i < 0 { - k0 += f - } else { - o := off - i*4 - r += buf[o+0] * f - g += buf[o+1] * f - b += buf[o+2] * f - a += buf[o+3] * f - } - } - - // Add the pixels from the right. - for i := 1; i <= radius; i++ { - f := k.X[radius+i] - if x+i >= width { - k0 += f - } else { - o := off + i*4 - r += buf[o+0] * f - g += buf[o+1] * f - b += buf[o+2] * f - a += buf[o+3] * f - } - } - - // Add the central pixel. - r += buf[off+0] * k0 - g += buf[off+1] * k0 - b += buf[off+2] * k0 - a += buf[off+3] * k0 - - // Write to dst, clamping to the range [0, 255]. - dstOff := (y-dst.Rect.Min.Y)*dst.Stride + (x-dst.Rect.Min.X)*4 - dst.Pix[dstOff+0] = uint8(clamp(r+0.5, 0, 255)) - dst.Pix[dstOff+1] = uint8(clamp(g+0.5, 0, 255)) - dst.Pix[dstOff+2] = uint8(clamp(b+0.5, 0, 255)) - dst.Pix[dstOff+3] = uint8(clamp(a+0.5, 0, 255)) - } - } - - return nil -} - -func convolveRGBA(dst *image.RGBA, src image.Image, k Kernel) error { - b := dst.Bounds() - bs := src.Bounds() - w := k.Weights() - size, err := kernelSize(w) - if err != nil { - return err - } - radius := (size - 1) / 2 - - for y := b.Min.Y; y < b.Max.Y; y++ { - for x := b.Min.X; x < b.Max.X; x++ { - if !image.Pt(x, y).In(bs) { - continue - } - - var r, g, b, a, adj float64 - for cy := y - radius; cy <= y+radius; cy++ { - for cx := x - radius; cx <= x+radius; cx++ { - factor := w[(cy-y+radius)*size+cx-x+radius] - if !image.Pt(cx, cy).In(bs) { - adj += factor - } else { - sr, sg, sb, sa := src.At(cx, cy).RGBA() - r += float64(sr>>8) * factor - g += float64(sg>>8) * factor - b += float64(sb>>8) * factor - a += float64(sa>>8) * factor - } - } - } - - if adj != 0 { - sr, sg, sb, sa := src.At(x, y).RGBA() - r += float64(sr>>8) * adj - g += float64(sg>>8) * adj - b += float64(sb>>8) * adj - a += float64(sa>>8) * adj - } - - off := (y-dst.Rect.Min.Y)*dst.Stride + (x-dst.Rect.Min.X)*4 - dst.Pix[off+0] = uint8(clamp(r+0.5, 0, 0xff)) - dst.Pix[off+1] = uint8(clamp(g+0.5, 0, 0xff)) - dst.Pix[off+2] = uint8(clamp(b+0.5, 0, 0xff)) - dst.Pix[off+3] = uint8(clamp(a+0.5, 0, 0xff)) - } - } - - return nil -} - -// Convolve produces dst by applying the convolution kernel k to src. -func Convolve(dst draw.Image, src image.Image, k Kernel) (err error) { - if dst == nil || src == nil || k == nil { - return nil - } - - b := dst.Bounds() - dstRgba, ok := dst.(*image.RGBA) - if !ok { - dstRgba = image.NewRGBA(b) - } - - switch k := k.(type) { - case *SeparableKernel: - err = convolveRGBASep(dstRgba, src, k) - default: - err = convolveRGBA(dstRgba, src, k) - } - - if err != nil { - return err - } - - if !ok { - draw.Draw(dst, b, dstRgba, b.Min, draw.Src) - } - return nil -} diff --git a/_third_party/code.google.com/p/graphics-go/graphics/convolve/convolve_test.go b/_third_party/code.google.com/p/graphics-go/graphics/convolve/convolve_test.go deleted file mode 100644 index 3b969cdda7..0000000000 --- a/_third_party/code.google.com/p/graphics-go/graphics/convolve/convolve_test.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2011 The Graphics-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package convolve - -import ( - "bosun.org/_third_party/code.google.com/p/graphics-go/graphics/graphicstest" - "image" - "reflect" - "testing" - - _ "image/png" -) - -func TestSeparableWeights(t *testing.T) { - sobelXFull := []float64{ - -1, 0, 1, - -2, 0, 2, - -1, 0, 1, - } - sobelXSep := &SeparableKernel{ - X: []float64{-1, 0, +1}, - Y: []float64{1, 2, 1}, - } - w := sobelXSep.Weights() - if !reflect.DeepEqual(w, sobelXFull) { - t.Errorf("got %v want %v", w, sobelXFull) - } -} - -func TestConvolve(t *testing.T) { - kernFull, err := NewKernel([]float64{ - 0, 0, 0, - 1, 1, 1, - 0, 0, 0, - }) - if err != nil { - t.Fatal(err) - } - - kernSep := &SeparableKernel{ - X: []float64{1, 1, 1}, - Y: []float64{0, 1, 0}, - } - - src, err := graphicstest.LoadImage("../../testdata/gopher.png") - if err != nil { - t.Fatal(err) - } - b := src.Bounds() - - sep := image.NewRGBA(b) - if err = Convolve(sep, src, kernSep); err != nil { - t.Fatal(err) - } - - full := image.NewRGBA(b) - Convolve(full, src, kernFull) - - err = graphicstest.ImageWithinTolerance(sep, full, 0x101) - if err != nil { - t.Fatal(err) - } -} - -func TestConvolveNil(t *testing.T) { - if err := Convolve(nil, nil, nil); err != nil { - t.Fatal(err) - } -} - -func TestConvolveEmpty(t *testing.T) { - empty := image.NewRGBA(image.Rect(0, 0, 0, 0)) - if err := Convolve(empty, empty, nil); err != nil { - t.Fatal(err) - } -} diff --git a/_third_party/code.google.com/p/graphics-go/graphics/interp/Makefile b/_third_party/code.google.com/p/graphics-go/graphics/interp/Makefile deleted file mode 100644 index 4d8f524fb7..0000000000 --- a/_third_party/code.google.com/p/graphics-go/graphics/interp/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2012 The Graphics-Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -include $(GOROOT)/src/Make.inc - -TARG=code.google.com/p/graphics-go/graphics/interp -GOFILES=\ - bilinear.go\ - doc.go\ - interp.go\ - -include $(GOROOT)/src/Make.pkg diff --git a/_third_party/code.google.com/p/graphics-go/graphics/interp/bilinear.go b/_third_party/code.google.com/p/graphics-go/graphics/interp/bilinear.go deleted file mode 100644 index e18321a153..0000000000 --- a/_third_party/code.google.com/p/graphics-go/graphics/interp/bilinear.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2012 The Graphics-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package interp - -import ( - "image" - "image/color" - "math" -) - -// Bilinear implements bilinear interpolation. -var Bilinear Interp = bilinear{} - -type bilinear struct{} - -func (i bilinear) Interp(src image.Image, x, y float64) color.Color { - if src, ok := src.(*image.RGBA); ok { - return i.RGBA(src, x, y) - } - return bilinearGeneral(src, x, y) -} - -func bilinearGeneral(src image.Image, x, y float64) color.Color { - p := findLinearSrc(src.Bounds(), x, y) - var fr, fg, fb, fa float64 - var r, g, b, a uint32 - - r, g, b, a = src.At(p.low.X, p.low.Y).RGBA() - fr += float64(r) * p.frac00 - fg += float64(g) * p.frac00 - fb += float64(b) * p.frac00 - fa += float64(a) * p.frac00 - - r, g, b, a = src.At(p.high.X, p.low.Y).RGBA() - fr += float64(r) * p.frac01 - fg += float64(g) * p.frac01 - fb += float64(b) * p.frac01 - fa += float64(a) * p.frac01 - - r, g, b, a = src.At(p.low.X, p.high.Y).RGBA() - fr += float64(r) * p.frac10 - fg += float64(g) * p.frac10 - fb += float64(b) * p.frac10 - fa += float64(a) * p.frac10 - - r, g, b, a = src.At(p.high.X, p.high.Y).RGBA() - fr += float64(r) * p.frac11 - fg += float64(g) * p.frac11 - fb += float64(b) * p.frac11 - fa += float64(a) * p.frac11 - - var c color.RGBA64 - c.R = uint16(fr + 0.5) - c.G = uint16(fg + 0.5) - c.B = uint16(fb + 0.5) - c.A = uint16(fa + 0.5) - return c -} - -func (bilinear) RGBA(src *image.RGBA, x, y float64) color.RGBA { - p := findLinearSrc(src.Bounds(), x, y) - - // Array offsets for the surrounding pixels. - off00 := offRGBA(src, p.low.X, p.low.Y) - off01 := offRGBA(src, p.high.X, p.low.Y) - off10 := offRGBA(src, p.low.X, p.high.Y) - off11 := offRGBA(src, p.high.X, p.high.Y) - - var fr, fg, fb, fa float64 - - fr += float64(src.Pix[off00+0]) * p.frac00 - fg += float64(src.Pix[off00+1]) * p.frac00 - fb += float64(src.Pix[off00+2]) * p.frac00 - fa += float64(src.Pix[off00+3]) * p.frac00 - - fr += float64(src.Pix[off01+0]) * p.frac01 - fg += float64(src.Pix[off01+1]) * p.frac01 - fb += float64(src.Pix[off01+2]) * p.frac01 - fa += float64(src.Pix[off01+3]) * p.frac01 - - fr += float64(src.Pix[off10+0]) * p.frac10 - fg += float64(src.Pix[off10+1]) * p.frac10 - fb += float64(src.Pix[off10+2]) * p.frac10 - fa += float64(src.Pix[off10+3]) * p.frac10 - - fr += float64(src.Pix[off11+0]) * p.frac11 - fg += float64(src.Pix[off11+1]) * p.frac11 - fb += float64(src.Pix[off11+2]) * p.frac11 - fa += float64(src.Pix[off11+3]) * p.frac11 - - var c color.RGBA - c.R = uint8(fr + 0.5) - c.G = uint8(fg + 0.5) - c.B = uint8(fb + 0.5) - c.A = uint8(fa + 0.5) - return c -} - -func (bilinear) Gray(src *image.Gray, x, y float64) color.Gray { - p := findLinearSrc(src.Bounds(), x, y) - - // Array offsets for the surrounding pixels. - off00 := offGray(src, p.low.X, p.low.Y) - off01 := offGray(src, p.high.X, p.low.Y) - off10 := offGray(src, p.low.X, p.high.Y) - off11 := offGray(src, p.high.X, p.high.Y) - - var fc float64 - fc += float64(src.Pix[off00]) * p.frac00 - fc += float64(src.Pix[off01]) * p.frac01 - fc += float64(src.Pix[off10]) * p.frac10 - fc += float64(src.Pix[off11]) * p.frac11 - - var c color.Gray - c.Y = uint8(fc + 0.5) - return c -} - -type bilinearSrc struct { - // Top-left and bottom-right interpolation sources - low, high image.Point - // Fraction of each pixel to take. The 0 suffix indicates - // top/left, and the 1 suffix indicates bottom/right. - frac00, frac01, frac10, frac11 float64 -} - -func findLinearSrc(b image.Rectangle, sx, sy float64) bilinearSrc { - maxX := float64(b.Max.X) - maxY := float64(b.Max.Y) - minX := float64(b.Min.X) - minY := float64(b.Min.Y) - lowX := math.Floor(sx - 0.5) - lowY := math.Floor(sy - 0.5) - if lowX < minX { - lowX = minX - } - if lowY < minY { - lowY = minY - } - - highX := math.Ceil(sx - 0.5) - highY := math.Ceil(sy - 0.5) - if highX >= maxX { - highX = maxX - 1 - } - if highY >= maxY { - highY = maxY - 1 - } - - // In the variables below, the 0 suffix indicates top/left, and the - // 1 suffix indicates bottom/right. - - // Center of each surrounding pixel. - x00 := lowX + 0.5 - y00 := lowY + 0.5 - x01 := highX + 0.5 - y01 := lowY + 0.5 - x10 := lowX + 0.5 - y10 := highY + 0.5 - x11 := highX + 0.5 - y11 := highY + 0.5 - - p := bilinearSrc{ - low: image.Pt(int(lowX), int(lowY)), - high: image.Pt(int(highX), int(highY)), - } - - // Literally, edge cases. If we are close enough to the edge of - // the image, curtail the interpolation sources. - if lowX == highX && lowY == highY { - p.frac00 = 1.0 - } else if sy-minY <= 0.5 && sx-minX <= 0.5 { - p.frac00 = 1.0 - } else if maxY-sy <= 0.5 && maxX-sx <= 0.5 { - p.frac11 = 1.0 - } else if sy-minY <= 0.5 || lowY == highY { - p.frac00 = x01 - sx - p.frac01 = sx - x00 - } else if sx-minX <= 0.5 || lowX == highX { - p.frac00 = y10 - sy - p.frac10 = sy - y00 - } else if maxY-sy <= 0.5 { - p.frac10 = x11 - sx - p.frac11 = sx - x10 - } else if maxX-sx <= 0.5 { - p.frac01 = y11 - sy - p.frac11 = sy - y01 - } else { - p.frac00 = (x01 - sx) * (y10 - sy) - p.frac01 = (sx - x00) * (y11 - sy) - p.frac10 = (x11 - sx) * (sy - y00) - p.frac11 = (sx - x10) * (sy - y01) - } - - return p -} - -// TODO(crawshaw): When we have inlining, consider func (p *RGBA) Off(x, y) int -func offRGBA(src *image.RGBA, x, y int) int { - return (y-src.Rect.Min.Y)*src.Stride + (x-src.Rect.Min.X)*4 -} -func offGray(src *image.Gray, x, y int) int { - return (y-src.Rect.Min.Y)*src.Stride + (x - src.Rect.Min.X) -} diff --git a/_third_party/code.google.com/p/graphics-go/graphics/interp/bilinear_test.go b/_third_party/code.google.com/p/graphics-go/graphics/interp/bilinear_test.go deleted file mode 100644 index 242d70546f..0000000000 --- a/_third_party/code.google.com/p/graphics-go/graphics/interp/bilinear_test.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2012 The Graphics-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package interp - -import ( - "image" - "image/color" - "testing" -) - -type interpTest struct { - desc string - src []uint8 - srcWidth int - x, y float64 - expect uint8 -} - -func (p *interpTest) newSrc() *image.RGBA { - b := image.Rect(0, 0, p.srcWidth, len(p.src)/p.srcWidth) - src := image.NewRGBA(b) - i := 0 - for y := b.Min.Y; y < b.Max.Y; y++ { - for x := b.Min.X; x < b.Max.X; x++ { - src.SetRGBA(x, y, color.RGBA{ - R: p.src[i], - G: p.src[i], - B: p.src[i], - A: 0xff, - }) - i++ - } - } - return src -} - -var interpTests = []interpTest{ - { - desc: "center of a single white pixel should match that pixel", - src: []uint8{0x00}, - srcWidth: 1, - x: 0.5, - y: 0.5, - expect: 0x00, - }, - { - desc: "middle of a square is equally weighted", - src: []uint8{ - 0x00, 0xff, - 0xff, 0x00, - }, - srcWidth: 2, - x: 1.0, - y: 1.0, - expect: 0x80, - }, - { - desc: "center of a pixel is just that pixel", - src: []uint8{ - 0x00, 0xff, - 0xff, 0x00, - }, - srcWidth: 2, - x: 1.5, - y: 0.5, - expect: 0xff, - }, - { - desc: "asymmetry abounds", - src: []uint8{ - 0xaa, 0x11, 0x55, - 0xff, 0x95, 0xdd, - }, - srcWidth: 3, - x: 2.0, - y: 1.0, - expect: 0x76, // (0x11 + 0x55 + 0x95 + 0xdd) / 4 - }, -} - -func TestBilinearRGBA(t *testing.T) { - for _, p := range interpTests { - src := p.newSrc() - - // Fast path. - c := Bilinear.(RGBA).RGBA(src, p.x, p.y) - if c.R != c.G || c.R != c.B || c.A != 0xff { - t.Errorf("expect channels to match, got %v", c) - continue - } - if c.R != p.expect { - t.Errorf("%s: got 0x%02x want 0x%02x", p.desc, c.R, p.expect) - continue - } - - // Standard Interp should use the fast path. - cStd := Bilinear.Interp(src, p.x, p.y) - if cStd != c { - t.Errorf("%s: standard mismatch got %v want %v", p.desc, cStd, c) - continue - } - - // General case should match the fast path. - cGen := color.RGBAModel.Convert(bilinearGeneral(src, p.x, p.y)) - r0, g0, b0, a0 := c.RGBA() - r1, g1, b1, a1 := cGen.RGBA() - if r0 != r1 || g0 != g1 || b0 != b1 || a0 != a1 { - t.Errorf("%s: general case mismatch got %v want %v", p.desc, c, cGen) - continue - } - } -} - -func TestBilinearSubImage(t *testing.T) { - b0 := image.Rect(0, 0, 4, 4) - src0 := image.NewRGBA(b0) - b1 := image.Rect(1, 1, 3, 3) - src1 := src0.SubImage(b1).(*image.RGBA) - src1.Set(1, 1, color.RGBA{0x11, 0, 0, 0xff}) - src1.Set(2, 1, color.RGBA{0x22, 0, 0, 0xff}) - src1.Set(1, 2, color.RGBA{0x33, 0, 0, 0xff}) - src1.Set(2, 2, color.RGBA{0x44, 0, 0, 0xff}) - - tests := []struct { - x, y float64 - want uint8 - }{ - {1, 1, 0x11}, - {3, 1, 0x22}, - {1, 3, 0x33}, - {3, 3, 0x44}, - {2, 2, 0x2b}, - } - - for _, p := range tests { - c := Bilinear.(RGBA).RGBA(src1, p.x, p.y) - if c.R != p.want { - t.Errorf("(%.0f, %.0f): got 0x%02x want 0x%02x", p.x, p.y, c.R, p.want) - } - } -} diff --git a/_third_party/code.google.com/p/graphics-go/graphics/interp/doc.go b/_third_party/code.google.com/p/graphics-go/graphics/interp/doc.go deleted file mode 100644 index bc2b503c72..0000000000 --- a/_third_party/code.google.com/p/graphics-go/graphics/interp/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2012 The Graphics-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package interp implements image interpolation. - -An interpolator provides the Interp interface, which can be used -to interpolate a pixel: - - c := interp.Bilinear.Interp(src, 1.2, 1.8) - -To interpolate a large number of RGBA or Gray pixels, an implementation -may provide a fast-path by implementing the RGBA or Gray interfaces. - - i1, ok := i.(interp.RGBA) - if ok { - c := i1.RGBA(src, 1.2, 1.8) - // use c.R, c.G, etc - return - } - c := i.Interp(src, 1.2, 1.8) - // use generic color.Color -*/ -package interp diff --git a/_third_party/code.google.com/p/graphics-go/graphics/interp/interp.go b/_third_party/code.google.com/p/graphics-go/graphics/interp/interp.go deleted file mode 100644 index 560637d4a9..0000000000 --- a/_third_party/code.google.com/p/graphics-go/graphics/interp/interp.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2012 The Graphics-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package interp - -import ( - "image" - "image/color" -) - -// Interp interpolates an image's color at fractional co-ordinates. -type Interp interface { - // Interp interpolates (x, y). - Interp(src image.Image, x, y float64) color.Color -} - -// RGBA is a fast-path interpolation implementation for image.RGBA. -// It is common for an Interp to also implement RGBA. -type RGBA interface { - // RGBA interpolates (x, y). - RGBA(src *image.RGBA, x, y float64) color.RGBA -} - -// Gray is a fast-path interpolation implementation for image.Gray. -type Gray interface { - // Gray interpolates (x, y). - Gray(src *image.Gray, x, y float64) color.Gray -} diff --git a/_third_party/code.google.com/p/graphics-go/graphics/rotate.go b/_third_party/code.google.com/p/graphics-go/graphics/rotate.go deleted file mode 100644 index f40d288dc1..0000000000 --- a/_third_party/code.google.com/p/graphics-go/graphics/rotate.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2011 The Graphics-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package graphics - -import ( - "bosun.org/_third_party/code.google.com/p/graphics-go/graphics/interp" - "errors" - "image" - "image/draw" -) - -// RotateOptions are the rotation parameters. -// Angle is the angle, in radians, to rotate the image clockwise. -type RotateOptions struct { - Angle float64 -} - -// Rotate produces a rotated version of src, drawn onto dst. -func Rotate(dst draw.Image, src image.Image, opt *RotateOptions) error { - if dst == nil { - return errors.New("graphics: dst is nil") - } - if src == nil { - return errors.New("graphics: src is nil") - } - - angle := 0.0 - if opt != nil { - angle = opt.Angle - } - - return I.Rotate(angle).TransformCenter(dst, src, interp.Bilinear) -} diff --git a/_third_party/code.google.com/p/graphics-go/graphics/rotate_test.go b/_third_party/code.google.com/p/graphics-go/graphics/rotate_test.go deleted file mode 100644 index 17a605631e..0000000000 --- a/_third_party/code.google.com/p/graphics-go/graphics/rotate_test.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2011 The Graphics-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package graphics - -import ( - "bosun.org/_third_party/code.google.com/p/graphics-go/graphics/graphicstest" - "image" - "math" - "testing" - - _ "image/png" -) - -var rotateOneColorTests = []transformOneColorTest{ - { - "onepixel-onequarter", 1, 1, 1, 1, - &RotateOptions{math.Pi / 2}, - []uint8{0xff}, - []uint8{0xff}, - }, - { - "onepixel-partial", 1, 1, 1, 1, - &RotateOptions{math.Pi * 2.0 / 3.0}, - []uint8{0xff}, - []uint8{0xff}, - }, - { - "onepixel-complete", 1, 1, 1, 1, - &RotateOptions{2 * math.Pi}, - []uint8{0xff}, - []uint8{0xff}, - }, - { - "even-onequarter", 2, 2, 2, 2, - &RotateOptions{math.Pi / 2.0}, - []uint8{ - 0xff, 0x00, - 0x00, 0xff, - }, - []uint8{ - 0x00, 0xff, - 0xff, 0x00, - }, - }, - { - "even-complete", 2, 2, 2, 2, - &RotateOptions{2.0 * math.Pi}, - []uint8{ - 0xff, 0x00, - 0x00, 0xff, - }, - []uint8{ - 0xff, 0x00, - 0x00, 0xff, - }, - }, - { - "line-partial", 3, 3, 3, 3, - &RotateOptions{math.Pi * 1.0 / 3.0}, - []uint8{ - 0x00, 0x00, 0x00, - 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, - }, - []uint8{ - 0xa2, 0x80, 0x00, - 0x22, 0xff, 0x22, - 0x00, 0x80, 0xa2, - }, - }, - { - "line-offset-partial", 3, 3, 3, 3, - &RotateOptions{math.Pi * 3 / 2}, - []uint8{ - 0x00, 0x00, 0x00, - 0x00, 0xff, 0xff, - 0x00, 0x00, 0x00, - }, - []uint8{ - 0x00, 0xff, 0x00, - 0x00, 0xff, 0x00, - 0x00, 0x00, 0x00, - }, - }, - { - "dot-partial", 4, 4, 4, 4, - &RotateOptions{math.Pi}, - []uint8{ - 0x00, 0x00, 0x00, 0x00, - 0x00, 0xff, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - }, - []uint8{ - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0xff, 0x00, - 0x00, 0x00, 0x00, 0x00, - }, - }, -} - -func TestRotateOneColor(t *testing.T) { - for _, oc := range rotateOneColorTests { - src := oc.newSrc() - dst := oc.newDst() - - if err := Rotate(dst, src, oc.opt.(*RotateOptions)); err != nil { - t.Errorf("rotate %s: %v", oc.desc, err) - continue - } - if !checkTransformTest(t, &oc, dst) { - continue - } - } -} - -func TestRotateEmpty(t *testing.T) { - empty := image.NewRGBA(image.Rect(0, 0, 0, 0)) - if err := Rotate(empty, empty, nil); err != nil { - t.Fatal(err) - } -} - -func TestRotateGopherSide(t *testing.T) { - src, err := graphicstest.LoadImage("../testdata/gopher.png") - if err != nil { - t.Fatal(err) - } - - srcb := src.Bounds() - dst := image.NewRGBA(image.Rect(0, 0, srcb.Dy(), srcb.Dx())) - if err := Rotate(dst, src, &RotateOptions{math.Pi / 2.0}); err != nil { - t.Fatal(err) - } - - cmp, err := graphicstest.LoadImage("../testdata/gopher-rotate-side.png") - if err != nil { - t.Fatal(err) - } - err = graphicstest.ImageWithinTolerance(dst, cmp, 0x101) - if err != nil { - t.Fatal(err) - } -} - -func TestRotateGopherPartial(t *testing.T) { - src, err := graphicstest.LoadImage("../testdata/gopher.png") - if err != nil { - t.Fatal(err) - } - - srcb := src.Bounds() - dst := image.NewRGBA(image.Rect(0, 0, srcb.Dx(), srcb.Dy())) - if err := Rotate(dst, src, &RotateOptions{math.Pi / 3.0}); err != nil { - t.Fatal(err) - } - - cmp, err := graphicstest.LoadImage("../testdata/gopher-rotate-partial.png") - if err != nil { - t.Fatal(err) - } - err = graphicstest.ImageWithinTolerance(dst, cmp, 0x101) - if err != nil { - t.Fatal(err) - } -} diff --git a/_third_party/code.google.com/p/graphics-go/graphics/scale.go b/_third_party/code.google.com/p/graphics-go/graphics/scale.go deleted file mode 100644 index 70f6d5a78e..0000000000 --- a/_third_party/code.google.com/p/graphics-go/graphics/scale.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2011 The Graphics-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package graphics - -import ( - "bosun.org/_third_party/code.google.com/p/graphics-go/graphics/interp" - "errors" - "image" - "image/draw" -) - -// Scale produces a scaled version of the image using bilinear interpolation. -func Scale(dst draw.Image, src image.Image) error { - if dst == nil { - return errors.New("graphics: dst is nil") - } - if src == nil { - return errors.New("graphics: src is nil") - } - - b := dst.Bounds() - srcb := src.Bounds() - if b.Empty() || srcb.Empty() { - return nil - } - sx := float64(b.Dx()) / float64(srcb.Dx()) - sy := float64(b.Dy()) / float64(srcb.Dy()) - return I.Scale(sx, sy).Transform(dst, src, interp.Bilinear) -} diff --git a/_third_party/code.google.com/p/graphics-go/graphics/scale_test.go b/_third_party/code.google.com/p/graphics-go/graphics/scale_test.go deleted file mode 100644 index 7e70f0e2db..0000000000 --- a/_third_party/code.google.com/p/graphics-go/graphics/scale_test.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2011 The Graphics-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package graphics - -import ( - "bosun.org/_third_party/code.google.com/p/graphics-go/graphics/graphicstest" - "image" - "testing" - - _ "image/png" -) - -var scaleOneColorTests = []transformOneColorTest{ - { - "down-half", - 1, 1, - 2, 2, - nil, - []uint8{ - 0x80, 0x00, - 0x00, 0x80, - }, - []uint8{ - 0x40, - }, - }, - { - "up-double", - 4, 4, - 2, 2, - nil, - []uint8{ - 0x80, 0x00, - 0x00, 0x80, - }, - []uint8{ - 0x80, 0x60, 0x20, 0x00, - 0x60, 0x50, 0x30, 0x20, - 0x20, 0x30, 0x50, 0x60, - 0x00, 0x20, 0x60, 0x80, - }, - }, - { - "up-doublewidth", - 4, 2, - 2, 2, - nil, - []uint8{ - 0x80, 0x00, - 0x00, 0x80, - }, - []uint8{ - 0x80, 0x60, 0x20, 0x00, - 0x00, 0x20, 0x60, 0x80, - }, - }, - { - "up-doubleheight", - 2, 4, - 2, 2, - nil, - []uint8{ - 0x80, 0x00, - 0x00, 0x80, - }, - []uint8{ - 0x80, 0x00, - 0x60, 0x20, - 0x20, 0x60, - 0x00, 0x80, - }, - }, - { - "up-partial", - 3, 3, - 2, 2, - nil, - []uint8{ - 0x80, 0x00, - 0x00, 0x80, - }, - []uint8{ - 0x80, 0x40, 0x00, - 0x40, 0x40, 0x40, - 0x00, 0x40, 0x80, - }, - }, -} - -func TestScaleOneColor(t *testing.T) { - for _, oc := range scaleOneColorTests { - dst := oc.newDst() - src := oc.newSrc() - if err := Scale(dst, src); err != nil { - t.Errorf("scale %s: %v", oc.desc, err) - continue - } - - if !checkTransformTest(t, &oc, dst) { - continue - } - } -} - -func TestScaleEmpty(t *testing.T) { - empty := image.NewRGBA(image.Rect(0, 0, 0, 0)) - if err := Scale(empty, empty); err != nil { - t.Fatal(err) - } -} - -func TestScaleGopher(t *testing.T) { - dst := image.NewRGBA(image.Rect(0, 0, 100, 150)) - - src, err := graphicstest.LoadImage("../testdata/gopher.png") - if err != nil { - t.Error(err) - return - } - - // Down-sample. - if err := Scale(dst, src); err != nil { - t.Fatal(err) - } - cmp, err := graphicstest.LoadImage("../testdata/gopher-100x150.png") - if err != nil { - t.Error(err) - return - } - err = graphicstest.ImageWithinTolerance(dst, cmp, 0) - if err != nil { - t.Error(err) - return - } - - // Up-sample. - dst = image.NewRGBA(image.Rect(0, 0, 500, 750)) - if err := Scale(dst, src); err != nil { - t.Fatal(err) - } - cmp, err = graphicstest.LoadImage("../testdata/gopher-500x750.png") - if err != nil { - t.Error(err) - return - } - err = graphicstest.ImageWithinTolerance(dst, cmp, 0) - if err != nil { - t.Error(err) - return - } -} diff --git a/_third_party/code.google.com/p/graphics-go/graphics/shared_test.go b/_third_party/code.google.com/p/graphics-go/graphics/shared_test.go deleted file mode 100644 index 93e632bd59..0000000000 --- a/_third_party/code.google.com/p/graphics-go/graphics/shared_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2011 The Graphics-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package graphics - -import ( - "bosun.org/_third_party/code.google.com/p/graphics-go/graphics/graphicstest" - "bytes" - "image" - "image/color" - "testing" -) - -type transformOneColorTest struct { - desc string - dstWidth int - dstHeight int - srcWidth int - srcHeight int - opt interface{} - src []uint8 - res []uint8 -} - -func (oc *transformOneColorTest) newSrc() *image.RGBA { - b := image.Rect(0, 0, oc.srcWidth, oc.srcHeight) - src := image.NewRGBA(b) - i := 0 - for y := b.Min.Y; y < b.Max.Y; y++ { - for x := b.Min.X; x < b.Max.X; x++ { - src.SetRGBA(x, y, color.RGBA{ - R: oc.src[i], - G: oc.src[i], - B: oc.src[i], - A: oc.src[i], - }) - i++ - } - } - return src -} - -func (oc *transformOneColorTest) newDst() *image.RGBA { - return image.NewRGBA(image.Rect(0, 0, oc.dstWidth, oc.dstHeight)) -} - -func checkTransformTest(t *testing.T, oc *transformOneColorTest, dst *image.RGBA) bool { - for ch := 0; ch < 4; ch++ { - i := 0 - res := make([]byte, len(oc.res)) - for y := 0; y < oc.dstHeight; y++ { - for x := 0; x < oc.dstWidth; x++ { - off := (y-dst.Rect.Min.Y)*dst.Stride + (x-dst.Rect.Min.X)*4 - res[i] = dst.Pix[off+ch] - i++ - } - } - - if !bytes.Equal(res, oc.res) { - got := graphicstest.SprintBox(res, oc.dstWidth, oc.dstHeight) - want := graphicstest.SprintBox(oc.res, oc.dstWidth, oc.dstHeight) - t.Errorf("%s: ch=%d\n got\n%s\n want\n%s", oc.desc, ch, got, want) - return false - } - } - - return true -} diff --git a/_third_party/code.google.com/p/graphics-go/graphics/thumbnail.go b/_third_party/code.google.com/p/graphics-go/graphics/thumbnail.go deleted file mode 100644 index d3ad7e8f7d..0000000000 --- a/_third_party/code.google.com/p/graphics-go/graphics/thumbnail.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2011 The Graphics-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package graphics - -import ( - "image" - "image/draw" -) - -// Thumbnail scales and crops src so it fits in dst. -func Thumbnail(dst draw.Image, src image.Image) error { - // Scale down src in the dimension that is closer to dst. - sb := src.Bounds() - db := dst.Bounds() - rx := float64(sb.Dx()) / float64(db.Dx()) - ry := float64(sb.Dy()) / float64(db.Dy()) - var b image.Rectangle - if rx < ry { - b = image.Rect(0, 0, db.Dx(), int(float64(sb.Dy())/rx)) - } else { - b = image.Rect(0, 0, int(float64(sb.Dx())/ry), db.Dy()) - } - - buf := image.NewRGBA(b) - if err := Scale(buf, src); err != nil { - return err - } - - // Crop. - // TODO(crawshaw): improve on center-alignment. - var pt image.Point - if rx < ry { - pt.Y = (b.Dy() - db.Dy()) / 2 - } else { - pt.X = (b.Dx() - db.Dx()) / 2 - } - draw.Draw(dst, db, buf, pt, draw.Src) - return nil -} diff --git a/_third_party/code.google.com/p/graphics-go/graphics/thumbnail_test.go b/_third_party/code.google.com/p/graphics-go/graphics/thumbnail_test.go deleted file mode 100644 index 8e59480f98..0000000000 --- a/_third_party/code.google.com/p/graphics-go/graphics/thumbnail_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2011 The Graphics-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package graphics - -import ( - "bosun.org/_third_party/code.google.com/p/graphics-go/graphics/graphicstest" - "image" - "testing" - - _ "image/png" -) - -func TestThumbnailGopher(t *testing.T) { - dst := image.NewRGBA(image.Rect(0, 0, 80, 80)) - - src, err := graphicstest.LoadImage("../testdata/gopher.png") - if err != nil { - t.Fatal(err) - } - if err := Thumbnail(dst, src); err != nil { - t.Fatal(err) - } - cmp, err := graphicstest.LoadImage("../testdata/gopher-thumb-80x80.png") - if err != nil { - t.Fatal(err) - } - err = graphicstest.ImageWithinTolerance(dst, cmp, 0) - if err != nil { - t.Error(err) - } -} - -func TestThumbnailLongGopher(t *testing.T) { - dst := image.NewRGBA(image.Rect(0, 0, 50, 150)) - - src, err := graphicstest.LoadImage("../testdata/gopher.png") - if err != nil { - t.Fatal(err) - } - if err := Thumbnail(dst, src); err != nil { - t.Fatal(err) - } - cmp, err := graphicstest.LoadImage("../testdata/gopher-thumb-50x150.png") - if err != nil { - t.Fatal(err) - } - err = graphicstest.ImageWithinTolerance(dst, cmp, 0) - if err != nil { - t.Error(err) - } -} diff --git a/_third_party/github.com/PuerkitoBio/goquery/array.go b/_third_party/github.com/PuerkitoBio/goquery/array.go index 78646539a5..d7af5eee1c 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/array.go +++ b/_third_party/github.com/PuerkitoBio/goquery/array.go @@ -1,7 +1,7 @@ package goquery import ( - "bosun.org/_third_party/golang.org/x/net/html" + "golang.org/x/net/html" ) // First reduces the set of matched elements to the first in the set. diff --git a/_third_party/github.com/PuerkitoBio/goquery/expand.go b/_third_party/github.com/PuerkitoBio/goquery/expand.go index c42c75e58b..286ee284ab 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/expand.go +++ b/_third_party/github.com/PuerkitoBio/goquery/expand.go @@ -2,7 +2,7 @@ package goquery import ( "bosun.org/_third_party/github.com/andybalholm/cascadia" - "bosun.org/_third_party/golang.org/x/net/html" + "golang.org/x/net/html" ) // Add adds the selector string's matching nodes to those in the current diff --git a/_third_party/github.com/PuerkitoBio/goquery/filter.go b/_third_party/github.com/PuerkitoBio/goquery/filter.go index 5b4da99efe..75da9c32c4 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/filter.go +++ b/_third_party/github.com/PuerkitoBio/goquery/filter.go @@ -2,7 +2,7 @@ package goquery import ( "bosun.org/_third_party/github.com/andybalholm/cascadia" - "bosun.org/_third_party/golang.org/x/net/html" + "golang.org/x/net/html" ) // Filter reduces the set of matched elements to those that match the selector string. diff --git a/_third_party/github.com/PuerkitoBio/goquery/iteration_test.go b/_third_party/github.com/PuerkitoBio/goquery/iteration_test.go index 33434047db..9b6aafb7b6 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/iteration_test.go +++ b/_third_party/github.com/PuerkitoBio/goquery/iteration_test.go @@ -3,7 +3,7 @@ package goquery import ( "testing" - "bosun.org/_third_party/golang.org/x/net/html" + "golang.org/x/net/html" ) func TestEach(t *testing.T) { diff --git a/_third_party/github.com/PuerkitoBio/goquery/manipulation.go b/_third_party/github.com/PuerkitoBio/goquery/manipulation.go index 8b278f0d49..e673334f4e 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/manipulation.go +++ b/_third_party/github.com/PuerkitoBio/goquery/manipulation.go @@ -4,7 +4,7 @@ import ( "strings" "bosun.org/_third_party/github.com/andybalholm/cascadia" - "bosun.org/_third_party/golang.org/x/net/html" + "golang.org/x/net/html" ) // After applies the selector from the root document and inserts the matched elements diff --git a/_third_party/github.com/PuerkitoBio/goquery/property.go b/_third_party/github.com/PuerkitoBio/goquery/property.go index ca8641a84c..af3a9eacb5 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/property.go +++ b/_third_party/github.com/PuerkitoBio/goquery/property.go @@ -5,7 +5,7 @@ import ( "regexp" "strings" - "bosun.org/_third_party/golang.org/x/net/html" + "golang.org/x/net/html" ) var rxClassTrim = regexp.MustCompile("[\t\r\n]") diff --git a/_third_party/github.com/PuerkitoBio/goquery/query.go b/_third_party/github.com/PuerkitoBio/goquery/query.go index 98b81dccfe..d16e323341 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/query.go +++ b/_third_party/github.com/PuerkitoBio/goquery/query.go @@ -2,7 +2,7 @@ package goquery import ( "bosun.org/_third_party/github.com/andybalholm/cascadia" - "bosun.org/_third_party/golang.org/x/net/html" + "golang.org/x/net/html" ) // Is checks the current matched set of elements against a selector and diff --git a/_third_party/github.com/PuerkitoBio/goquery/traversal.go b/_third_party/github.com/PuerkitoBio/goquery/traversal.go index 72f645ae28..f039755cd5 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/traversal.go +++ b/_third_party/github.com/PuerkitoBio/goquery/traversal.go @@ -2,7 +2,7 @@ package goquery import ( "bosun.org/_third_party/github.com/andybalholm/cascadia" - "bosun.org/_third_party/golang.org/x/net/html" + "golang.org/x/net/html" ) type siblingType int diff --git a/_third_party/github.com/PuerkitoBio/goquery/type.go b/_third_party/github.com/PuerkitoBio/goquery/type.go index 762bba8f6f..2f871204e2 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/type.go +++ b/_third_party/github.com/PuerkitoBio/goquery/type.go @@ -6,7 +6,7 @@ import ( "net/http" "net/url" - "bosun.org/_third_party/golang.org/x/net/html" + "golang.org/x/net/html" ) // Document represents an HTML document to be manipulated. Unlike jQuery, which @@ -56,10 +56,12 @@ func NewDocumentFromReader(r io.Reader) (*Document, error) { // node, ready to be manipulated. The response's body is closed on return. func NewDocumentFromResponse(res *http.Response) (*Document, error) { if res == nil { - return nil, errors.New("Response is nil pointer") + return nil, errors.New("Response is nil") } - defer res.Body.Close() + if res.Request == nil { + return nil, errors.New("Response.Request is nil") + } // Parse the HTML into nodes root, e := html.Parse(res.Body) diff --git a/_third_party/github.com/PuerkitoBio/goquery/type_test.go b/_third_party/github.com/PuerkitoBio/goquery/type_test.go index 36b142bdfd..98ee3a64d1 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/type_test.go +++ b/_third_party/github.com/PuerkitoBio/goquery/type_test.go @@ -6,7 +6,7 @@ import ( "os" "testing" - "bosun.org/_third_party/golang.org/x/net/html" + "golang.org/x/net/html" ) // Test helper functions and members diff --git a/_third_party/github.com/PuerkitoBio/goquery/utilities.go b/_third_party/github.com/PuerkitoBio/goquery/utilities.go index 29f8ec4abd..aa509e6ab3 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/utilities.go +++ b/_third_party/github.com/PuerkitoBio/goquery/utilities.go @@ -1,7 +1,7 @@ package goquery import ( - "bosun.org/_third_party/golang.org/x/net/html" + "golang.org/x/net/html" ) func getChildren(n *html.Node) (result []*html.Node) { diff --git a/_third_party/github.com/StackExchange/httpunit/httpunit.go b/_third_party/github.com/StackExchange/httpunit/httpunit.go index 6d7cd702f7..876a5c1ad0 100644 --- a/_third_party/github.com/StackExchange/httpunit/httpunit.go +++ b/_third_party/github.com/StackExchange/httpunit/httpunit.go @@ -393,6 +393,7 @@ type TestResult struct { GotText bool GotRegex bool InvalidCert bool + TimeTotal time.Duration } func (c *TestCase) addr() string { @@ -416,6 +417,10 @@ func (c *TestCase) Test() *TestResult { func (c *TestCase) testConnect() (r *TestResult) { r = new(TestResult) + t := time.Now() + defer func() { + r.TimeTotal = time.Now().Sub(t) + }() conn, err := net.DialTimeout(c.URL.Scheme, c.addr(), Timeout) if err != nil { r.Result = err @@ -428,6 +433,10 @@ func (c *TestCase) testConnect() (r *TestResult) { func (c *TestCase) testHTTP() (r *TestResult) { r = new(TestResult) + t := time.Now() + defer func() { + r.TimeTotal = time.Now().Sub(t) + }() tr := &http.Transport{ Dial: func(network, a string) (net.Conn, error) { conn, err := net.DialTimeout(network, c.addr(), Timeout) @@ -443,16 +452,23 @@ func (c *TestCase) testHTTP() (r *TestResult) { r.Result = err return } - time.AfterFunc(Timeout, func() { + timedOut := false + timout := time.AfterFunc(Timeout, func() { + timedOut = true r.Connected = false tr.CancelRequest(req) }) + defer timout.Stop() resp, err := tr.RoundTrip(req) if err != nil { if strings.HasPrefix(err.Error(), "x509") { r.InvalidCert = true } - r.Result = err + if timedOut { + r.Result = fmt.Errorf("i/o timeout") + } else { + r.Result = err + } return } defer resp.Body.Close() @@ -464,7 +480,11 @@ func (c *TestCase) testHTTP() (r *TestResult) { } text, err := ioutil.ReadAll(resp.Body) if err != nil { - r.Result = err + if timedOut { + r.Result = fmt.Errorf("i/o timeout") + } else { + r.Result = err + } return } short := text diff --git a/_third_party/github.com/andybalholm/cascadia/benchmark_test.go b/_third_party/github.com/andybalholm/cascadia/benchmark_test.go index df1c4c2bf6..42bf50006b 100644 --- a/_third_party/github.com/andybalholm/cascadia/benchmark_test.go +++ b/_third_party/github.com/andybalholm/cascadia/benchmark_test.go @@ -4,7 +4,7 @@ import ( "strings" "testing" - "bosun.org/_third_party/golang.org/x/net/html" + "golang.org/x/net/html" ) func MustParseHTML(doc string) *html.Node { diff --git a/_third_party/github.com/andybalholm/cascadia/parser.go b/_third_party/github.com/andybalholm/cascadia/parser.go index 3973f959b6..42af28c5bb 100644 --- a/_third_party/github.com/andybalholm/cascadia/parser.go +++ b/_third_party/github.com/andybalholm/cascadia/parser.go @@ -8,7 +8,7 @@ import ( "strconv" "strings" - "bosun.org/_third_party/golang.org/x/net/html" + "golang.org/x/net/html" ) // a parser for CSS selectors diff --git a/_third_party/github.com/andybalholm/cascadia/selector.go b/_third_party/github.com/andybalholm/cascadia/selector.go index 7331709fff..aeffdbd54d 100644 --- a/_third_party/github.com/andybalholm/cascadia/selector.go +++ b/_third_party/github.com/andybalholm/cascadia/selector.go @@ -6,7 +6,7 @@ import ( "regexp" "strings" - "bosun.org/_third_party/golang.org/x/net/html" + "golang.org/x/net/html" ) // the Selector type, and functions for creating them diff --git a/_third_party/github.com/andybalholm/cascadia/selector_test.go b/_third_party/github.com/andybalholm/cascadia/selector_test.go index 62b5417f42..8438d384bd 100644 --- a/_third_party/github.com/andybalholm/cascadia/selector_test.go +++ b/_third_party/github.com/andybalholm/cascadia/selector_test.go @@ -4,7 +4,7 @@ import ( "strings" "testing" - "bosun.org/_third_party/golang.org/x/net/html" + "golang.org/x/net/html" ) type selectorTest struct { diff --git a/_third_party/github.com/aws/aws-sdk-go/aws/version.go b/_third_party/github.com/aws/aws-sdk-go/aws/version.go index f89f1e112f..8a086b3a82 100644 --- a/_third_party/github.com/aws/aws-sdk-go/aws/version.go +++ b/_third_party/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "0.9.9" +const SDKVersion = "0.9.11" diff --git a/_third_party/github.com/aws/aws-sdk-go/service/ec2/api.go b/_third_party/github.com/aws/aws-sdk-go/service/ec2/api.go index 318c21cd20..3de5563248 100644 --- a/_third_party/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/_third_party/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -696,6 +696,13 @@ func (c *EC2) CancelSpotFleetRequestsRequest(input *CancelSpotFleetRequestsInput } // Cancels the specified Spot fleet requests. +// +// After you cancel a Spot fleet request, the Spot fleet launches no new Spot +// instances. You must specify whether the Spot fleet should also terminate +// its Spot instances. If you terminate the instances, the Spot fleet request +// enters the cancelled_terminating state. Otherwise, the Spot fleet request +// enters the cancelled_running state and the instances continue to run until +// they are interrupted or you terminate them manually. func (c *EC2) CancelSpotFleetRequests(input *CancelSpotFleetRequestsInput) (*CancelSpotFleetRequestsOutput, error) { req, out := c.CancelSpotFleetRequestsRequest(input) err := req.Send() @@ -5169,6 +5176,52 @@ func (c *EC2) ModifySnapshotAttribute(input *ModifySnapshotAttributeInput) (*Mod return out, err } +const opModifySpotFleetRequest = "ModifySpotFleetRequest" + +// ModifySpotFleetRequestRequest generates a request for the ModifySpotFleetRequest operation. +func (c *EC2) ModifySpotFleetRequestRequest(input *ModifySpotFleetRequestInput) (req *request.Request, output *ModifySpotFleetRequestOutput) { + op := &request.Operation{ + Name: opModifySpotFleetRequest, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifySpotFleetRequestInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifySpotFleetRequestOutput{} + req.Data = output + return +} + +// Modifies the specified Spot fleet request. +// +// While the Spot fleet request is being modified, it is in the modifying state. +// +// To scale up your Spot fleet, increase its target capacity. The Spot fleet +// launches the additional Spot instances according to the allocation strategy +// for the Spot fleet request. If the allocation strategy is lowestPrice, the +// Spot fleet launches instances using the Spot pool with the lowest price. +// If the allocation strategy is diversified, the Spot fleet distributes the +// instances across the Spot pools. +// +// To scale down your Spot fleet, decrease its target capacity. First, the +// Spot fleet cancels any open bids that exceed the new target capacity. You +// can request that the Spot fleet terminate Spot instances until the size of +// the fleet no longer exceeds the new target capacity. If the allocation strategy +// is lowestPrice, the Spot fleet terminates the instances with the highest +// price per unit. If the allocation strategy is diversified, the Spot fleet +// terminates instances across the Spot pools. Alternatively, you can request +// that the Spot fleet keep the fleet at its current size, but not replace any +// Spot instances that are interrupted or that you terminate manually. +func (c *EC2) ModifySpotFleetRequest(input *ModifySpotFleetRequestInput) (*ModifySpotFleetRequestOutput, error) { + req, out := c.ModifySpotFleetRequestRequest(input) + err := req.Send() + return out, err +} + const opModifySubnetAttribute = "ModifySubnetAttribute" // ModifySubnetAttributeRequest generates a request for the ModifySubnetAttribute operation. @@ -7196,7 +7249,7 @@ type AvailabilityZone struct { // The name of the region. RegionName *string `locationName:"regionName" type:"string"` - // The state of the Availability Zone (available | impaired | unavailable). + // The state of the Availability Zone. State *string `locationName:"zoneState" type:"string" enum:"AvailabilityZoneState"` // The name of the Availability Zone. @@ -10995,7 +11048,8 @@ type DescribeAvailabilityZonesInput struct { // region-name - The name of the region for the Availability Zone (for example, // us-east-1). // - // state - The state of the Availability Zone (available | impaired | unavailable). + // state - The state of the Availability Zone (available | information | + // impaired | unavailable). // // zone-name - The name of the Availability Zone (for example, us-east-1a). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -16201,11 +16255,15 @@ type EventInformation struct { // // The following are the error events. // - // iamFleetRoleInvalid - Spot fleet did not have the required permissions + // iamFleetRoleInvalid - The Spot fleet did not have the required permissions // either to launch or terminate an instance. // + // launchSpecTemporarilyBlacklisted - The configuration is not valid and + // several attempts to launch instances have failed. For more information, see + // the description of the event. + // // spotFleetRequestConfigurationInvalid - The configuration is not valid. - // For more information, see the description. + // For more information, see the description of the event. // // spotInstanceCountLimitExceeded - You've reached the limit on the number // of Spot instances that you can launch. @@ -16229,6 +16287,11 @@ type EventInformation struct { // that the instances were terminated, if the request was created with TerminateInstancesWithExpiration // set. // + // modify_in_progress - A request to modify the Spot fleet request was accepted + // and is in progress. + // + // modify_successful - The Spot fleet request was modified. + // // price_update - The bid price for a launch configuration was adjusted because // it was too high. This change is permanent. // @@ -18358,10 +18421,9 @@ type LaunchSpecification struct { // The ID of the RAM disk. RamdiskId *string `locationName:"ramdiskId" type:"string"` - // One or more security groups. To request an instance in a nondefault VPC, - // you must specify the ID of the security group. To request an instance in - // EC2-Classic or a default VPC, you can specify the name or the ID of the security - // group. + // One or more security groups. When requesting instances in a VPC, you must + // specify the IDs of the security groups. When requesting instances in EC2-Classic, + // you can specify the names or the IDs of the security groups. SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` // The ID of the subnet in which to launch the instance. @@ -18751,6 +18813,58 @@ func (s ModifySnapshotAttributeOutput) GoString() string { return s.String() } +// Contains the parameters for ModifySpotFleetRequest. +type ModifySpotFleetRequestInput struct { + // Indicates whether running Spot instances should be terminated if the target + // capacity of the Spot fleet request is decreased below the current size of + // the Spot fleet. + ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"ExcessCapacityTerminationPolicy"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + // The size of the fleet. + TargetCapacity *int64 `locationName:"targetCapacity" type:"integer"` + + metadataModifySpotFleetRequestInput `json:"-" xml:"-"` +} + +type metadataModifySpotFleetRequestInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifySpotFleetRequestInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySpotFleetRequestInput) GoString() string { + return s.String() +} + +// Contains the output of ModifySpotFleetRequest. +type ModifySpotFleetRequestOutput struct { + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` + + metadataModifySpotFleetRequestOutput `json:"-" xml:"-"` +} + +type metadataModifySpotFleetRequestOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifySpotFleetRequestOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySpotFleetRequestOutput) GoString() string { + return s.String() +} + type ModifySubnetAttributeInput struct { // Specify true to indicate that instances launched into the specified subnet // should be assigned public IP address. @@ -22060,10 +22174,9 @@ type SpotFleetLaunchSpecification struct { // The ID of the RAM disk. RamdiskId *string `locationName:"ramdiskId" type:"string"` - // One or more security groups. To request an instance in a nondefault VPC, - // you must specify the ID of the security group. To request an instance in - // EC2-Classic or a default VPC, you can specify the name or the ID of the security - // group. + // One or more security groups. When requesting instances in a VPC, you must + // specify the IDs of the security groups. When requesting instances in EC2-Classic, + // you can specify the names or the IDs of the security groups. SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` // The bid price per unit hour for the specified instance type. If this value @@ -22130,6 +22243,9 @@ func (s SpotFleetMonitoring) GoString() string { // Describes a Spot fleet request. type SpotFleetRequestConfig struct { + // The creation date and time of the request. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` + // Information about the configuration of the Spot fleet request. SpotFleetRequestConfig *SpotFleetRequestConfigData `locationName:"spotFleetRequestConfig" type:"structure" required:"true"` @@ -22158,7 +22274,7 @@ func (s SpotFleetRequestConfig) GoString() string { // Describes the configuration of a Spot fleet request. type SpotFleetRequestConfigData struct { - // Determines how to allocate the target capacity across the Spot pools specified + // Indicates how to allocate the target capacity across the Spot pools specified // by the Spot fleet request. The default is lowestPrice. AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"AllocationStrategy"` @@ -22167,6 +22283,11 @@ type SpotFleetRequestConfigData struct { // see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `locationName:"clientToken" type:"string"` + // Indicates whether running Spot instances should be terminated if the target + // capacity of the Spot fleet request is decreased below the current size of + // the Spot fleet. + ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"ExcessCapacityTerminationPolicy"` + // Grants the Spot fleet permission to terminate Spot instances on your behalf // when you cancel its Spot fleet request using CancelSpotFleetRequests or when // the Spot fleet request expires, if you set terminateInstancesWithExpiration. @@ -23721,6 +23842,12 @@ const ( const ( // @enum AvailabilityZoneState AvailabilityZoneStateAvailable = "available" + // @enum AvailabilityZoneState + AvailabilityZoneStateInformation = "information" + // @enum AvailabilityZoneState + AvailabilityZoneStateImpaired = "impaired" + // @enum AvailabilityZoneState + AvailabilityZoneStateUnavailable = "unavailable" ) const ( @@ -23736,6 +23863,8 @@ const ( BatchStateCancelledRunning = "cancelled_running" // @enum BatchState BatchStateCancelledTerminating = "cancelled_terminating" + // @enum BatchState + BatchStateModifying = "modifying" ) const ( @@ -23852,6 +23981,13 @@ const ( EventTypeError = "error" ) +const ( + // @enum ExcessCapacityTerminationPolicy + ExcessCapacityTerminationPolicyNoTermination = "noTermination" + // @enum ExcessCapacityTerminationPolicy + ExcessCapacityTerminationPolicyDefault = "default" +) + const ( // @enum ExportEnvironment ExportEnvironmentCitrix = "citrix" diff --git a/_third_party/github.com/aws/aws-sdk-go/service/ec2/examples_test.go b/_third_party/github.com/aws/aws-sdk-go/service/ec2/examples_test.go index 86605a4606..1da2b20bbd 100644 --- a/_third_party/github.com/aws/aws-sdk-go/service/ec2/examples_test.go +++ b/_third_party/github.com/aws/aws-sdk-go/service/ec2/examples_test.go @@ -4132,6 +4132,27 @@ func ExampleEC2_ModifySnapshotAttribute() { fmt.Println(resp) } +func ExampleEC2_ModifySpotFleetRequest() { + svc := ec2.New(nil) + + params := &ec2.ModifySpotFleetRequestInput{ + SpotFleetRequestId: aws.String("String"), // Required + ExcessCapacityTerminationPolicy: aws.String("ExcessCapacityTerminationPolicy"), + TargetCapacity: aws.Int64(1), + } + resp, err := svc.ModifySpotFleetRequest(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + func ExampleEC2_ModifySubnetAttribute() { svc := ec2.New(nil) @@ -4623,6 +4644,7 @@ func ExampleEC2_RequestSpotFleet() { TargetCapacity: aws.Int64(1), // Required AllocationStrategy: aws.String("AllocationStrategy"), ClientToken: aws.String("String"), + ExcessCapacityTerminationPolicy: aws.String("ExcessCapacityTerminationPolicy"), TerminateInstancesWithExpiration: aws.Bool(true), ValidFrom: aws.Time(time.Now()), ValidUntil: aws.Time(time.Now()), diff --git a/_third_party/github.com/aws/aws-sdk-go/service/ec2/service.go b/_third_party/github.com/aws/aws-sdk-go/service/ec2/service.go index d46eb5f324..46b17c22dd 100644 --- a/_third_party/github.com/aws/aws-sdk-go/service/ec2/service.go +++ b/_third_party/github.com/aws/aws-sdk-go/service/ec2/service.go @@ -32,7 +32,7 @@ func New(config *aws.Config) *EC2 { ServiceInfo: serviceinfo.ServiceInfo{ Config: defaults.DefaultConfig.Merge(config), ServiceName: "ec2", - APIVersion: "2015-04-15", + APIVersion: "2015-10-01", }, } service.Initialize() diff --git a/_third_party/github.com/aymerick/douceur/css/declaration.go b/_third_party/github.com/aymerick/douceur/css/declaration.go index 9d435707a4..61d29d3359 100644 --- a/_third_party/github.com/aymerick/douceur/css/declaration.go +++ b/_third_party/github.com/aymerick/douceur/css/declaration.go @@ -2,14 +2,14 @@ package css import "fmt" -// A parsed style property +// Declaration represents a parsed style property type Declaration struct { Property string Value string Important bool } -// Instanciate a new Declaration +// NewDeclaration instanciates a new Declaration func NewDeclaration() *Declaration { return &Declaration{} } @@ -19,7 +19,7 @@ func (decl *Declaration) String() string { return decl.StringWithImportant(true) } -// Returns string representation with optional !important part +// StringWithImportant returns string representation with optional !important part func (decl *Declaration) StringWithImportant(option bool) string { result := fmt.Sprintf("%s: %s", decl.Property, decl.Value) @@ -32,7 +32,7 @@ func (decl *Declaration) StringWithImportant(option bool) string { return result } -// Returns true if both Declarations are equals +// Equal returns true if both Declarations are equals func (decl *Declaration) Equal(other *Declaration) bool { return (decl.Property == other.Property) && (decl.Value == other.Value) && (decl.Important == other.Important) } @@ -41,7 +41,7 @@ func (decl *Declaration) Equal(other *Declaration) bool { // DeclarationsByProperty // -// Sortable style declarations +// DeclarationsByProperty represents sortable style declarations type DeclarationsByProperty []*Declaration // Implements sort.Interface diff --git a/_third_party/github.com/aymerick/douceur/css/rule.go b/_third_party/github.com/aymerick/douceur/css/rule.go index a9240a6948..b5a44b5429 100644 --- a/_third_party/github.com/aymerick/douceur/css/rule.go +++ b/_third_party/github.com/aymerick/douceur/css/rule.go @@ -6,15 +6,16 @@ import ( ) const ( - IDENT_SPACES = 2 + indentSpace = 2 ) +// RuleKind represents a Rule kind type RuleKind int // Rule kinds const ( - QUALIFIED_RULE RuleKind = iota - AT_RULE + QualifiedRule RuleKind = iota + AtRule ) // At Rules than have Rules inside their block instead of Declarations @@ -22,7 +23,7 @@ var atRulesWithRulesBlock = []string{ "@document", "@font-feature-values", "@keyframes", "@media", "@supports", } -// A parsed CSS rule +// Rule represents a parsed CSS rule type Rule struct { Kind RuleKind @@ -45,7 +46,7 @@ type Rule struct { EmbedLevel int } -// Instanciate a new Rule +// NewRule instanciates a new Rule func NewRule(kind RuleKind) *Rule { return &Rule{ Kind: kind, @@ -55,18 +56,18 @@ func NewRule(kind RuleKind) *Rule { // Returns string representation of rule kind func (kind RuleKind) String() string { switch kind { - case QUALIFIED_RULE: + case QualifiedRule: return "Qualified Rule" - case AT_RULE: + case AtRule: return "At Rule" default: return "WAT" } } -// Returns true if this rule embeds another rules +// EmbedsRules returns true if this rule embeds another rules func (rule *Rule) EmbedsRules() bool { - if rule.Kind == AT_RULE { + if rule.Kind == AtRule { for _, atRuleName := range atRulesWithRulesBlock { if rule.Name == atRuleName { return true @@ -77,7 +78,7 @@ func (rule *Rule) EmbedsRules() bool { return false } -// Returns true if both rules are equals +// Equal returns true if both rules are equals func (rule *Rule) Equal(other *Rule) bool { if (rule.Kind != other.Kind) || (rule.Prelude != other.Prelude) || @@ -112,7 +113,7 @@ func (rule *Rule) Equal(other *Rule) bool { return true } -// Returns a string representation of rules differences +// Diff returns a string representation of rules differences func (rule *Rule) Diff(other *Rule) []string { result := []string{} @@ -166,7 +167,7 @@ func (rule *Rule) Diff(other *Rule) []string { func (rule *Rule) String() string { result := "" - if rule.Kind == QUALIFIED_RULE { + if rule.Kind == QualifiedRule { for i, sel := range rule.Selectors { if i != 0 { result += ", " @@ -174,7 +175,7 @@ func (rule *Rule) String() string { result += sel } } else { - // AT_RULE + // AtRule result += fmt.Sprintf("%s", rule.Name) if rule.Prelude != "" { @@ -210,7 +211,7 @@ func (rule *Rule) String() string { func (rule *Rule) indent() string { result := "" - for i := 0; i < ((rule.EmbedLevel + 1) * IDENT_SPACES); i++ { + for i := 0; i < ((rule.EmbedLevel + 1) * indentSpace); i++ { result += " " } @@ -221,7 +222,7 @@ func (rule *Rule) indent() string { func (rule *Rule) indentEndBlock() string { result := "" - for i := 0; i < (rule.EmbedLevel * IDENT_SPACES); i++ { + for i := 0; i < (rule.EmbedLevel * indentSpace); i++ { result += " " } diff --git a/_third_party/github.com/aymerick/douceur/css/stylesheet.go b/_third_party/github.com/aymerick/douceur/css/stylesheet.go index 02f409b077..6b32c2ec90 100644 --- a/_third_party/github.com/aymerick/douceur/css/stylesheet.go +++ b/_third_party/github.com/aymerick/douceur/css/stylesheet.go @@ -1,11 +1,11 @@ package css -// A Parsed stylesheet +// Stylesheet represents a parsed stylesheet type Stylesheet struct { Rules []*Rule } -// Instanciate a new Stylesheet +// NewStylesheet instanciate a new Stylesheet func NewStylesheet() *Stylesheet { return &Stylesheet{} } diff --git a/_third_party/github.com/aymerick/douceur/inliner/element.go b/_third_party/github.com/aymerick/douceur/inliner/element.go index 5691d247ad..806f58b05a 100644 --- a/_third_party/github.com/aymerick/douceur/inliner/element.go +++ b/_third_party/github.com/aymerick/douceur/inliner/element.go @@ -9,7 +9,7 @@ import ( "bosun.org/_third_party/github.com/aymerick/douceur/parser" ) -// An HTML element with matching CSS rules +// Element represents a HTML element with matching CSS rules type Element struct { // The goquery handler elt *goquery.Selection @@ -18,6 +18,7 @@ type Element struct { styleRules []*StyleRule } +// ElementAttr represents a HTML element attribute type ElementAttr struct { attr string elements []string @@ -30,23 +31,23 @@ func init() { // Borrowed from premailer: // https://github.com/premailer/premailer/blob/master/lib/premailer/premailer.rb styleToAttr = map[string]*ElementAttr{ - "text-align": &ElementAttr{ + "text-align": { "align", []string{"h1", "h2", "h3", "h4", "h5", "h6", "p", "div", "blockquote", "tr", "th", "td"}, }, - "background-color": &ElementAttr{ + "background-color": { "bgcolor", []string{"body", "table", "tr", "th", "td"}, }, - "background-image": &ElementAttr{ + "background-image": { "background", []string{"table"}, }, - "vertical-align": &ElementAttr{ + "vertical-align": { "valign", []string{"th", "td"}, }, - "float": &ElementAttr{ + "float": { "align", []string{"img"}, }, @@ -54,7 +55,7 @@ func init() { } } -// Instanciate a new element +// NewElement instanciates a new element func NewElement(elt *goquery.Selection) *Element { return &Element{ elt: elt, @@ -128,7 +129,7 @@ func (element *Element) parseInlineStyle() ([]*StyleRule, error) { return result, err } - result = append(result, NewStyleRule(INLINE_FAKE_SELECTOR, declarations)) + result = append(result, NewStyleRule(inlineFakeSelector, declarations)) return result, nil } diff --git a/_third_party/github.com/aymerick/douceur/inliner/inliner.go b/_third_party/github.com/aymerick/douceur/inliner/inliner.go index 4088f6112d..43dc73d231 100644 --- a/_third_party/github.com/aymerick/douceur/inliner/inliner.go +++ b/_third_party/github.com/aymerick/douceur/inliner/inliner.go @@ -8,11 +8,11 @@ import ( "bosun.org/_third_party/github.com/PuerkitoBio/goquery" "bosun.org/_third_party/github.com/aymerick/douceur/css" "bosun.org/_third_party/github.com/aymerick/douceur/parser" - "bosun.org/_third_party/golang.org/x/net/html" + "golang.org/x/net/html" ) const ( - ELT_MARKER_ATTR = "douceur-mark" + eltMarkerAttr = "douceur-mark" ) var unsupportedSelectors = []string{ @@ -20,7 +20,7 @@ var unsupportedSelectors = []string{ ":first-line", ":first-letter", ":focus", ":hover", ":invalid", ":in-range", ":lang", ":link", ":root", ":selection", ":target", ":valid", ":visited"} -// CSS Inliner +// Inliner presents a CSS Inliner type Inliner struct { // Raw HTML html string @@ -44,7 +44,7 @@ type Inliner struct { eltMarker int } -// Instanciate a new Inliner +// NewInliner instanciates a new Inliner func NewInliner(html string) *Inliner { return &Inliner{ html: html, @@ -52,17 +52,17 @@ func NewInliner(html string) *Inliner { } } -// Inlines css into html document +// Inline inlines css into html document func Inline(html string) (string, error) { result, err := NewInliner(html).Inline() if err != nil { return "", err - } else { - return result, nil } + + return result, nil } -// Inlines CSS and returns HTML +// Inline inlines CSS and returns HTML func (inliner *Inliner) Inline() (string, error) { // parse HTML document if err := inliner.parseHTML(); err != nil { @@ -127,7 +127,7 @@ func (inliner *Inliner) parseStylesheets() error { func (inliner *Inliner) collectElementsAndRules() { for _, stylesheet := range inliner.stylesheets { for _, rule := range stylesheet.Rules { - if rule.Kind == css.QUALIFIED_RULE { + if rule.Kind == css.QualifiedRule { // Let's go! inliner.handleQualifiedRule(rule) } else { @@ -144,12 +144,12 @@ func (inliner *Inliner) handleQualifiedRule(rule *css.Rule) { if Inlinable(selector) { inliner.doc.Find(selector).Each(func(i int, s *goquery.Selection) { // get marker - eltMarker, exists := s.Attr(ELT_MARKER_ATTR) + eltMarker, exists := s.Attr(eltMarkerAttr) if !exists { // mark element eltMarker = strconv.Itoa(inliner.eltMarker) - s.SetAttr(ELT_MARKER_ATTR, eltMarker) - inliner.eltMarker += 1 + s.SetAttr(eltMarkerAttr, eltMarker) + inliner.eltMarker++ // add new element inliner.elements[eltMarker] = NewElement(s) @@ -169,7 +169,7 @@ func (inliner *Inliner) handleQualifiedRule(rule *css.Rule) { func (inliner *Inliner) inlineStyleRules() error { for _, element := range inliner.elements { // remove marker - element.elt.RemoveAttr(ELT_MARKER_ATTR) + element.elt.RemoveAttr(eltMarkerAttr) // inline element err := element.inline() @@ -206,7 +206,7 @@ func (inliner *Inliner) insertRawStylesheet() { styleNode := &html.Node{ Type: html.ElementNode, Data: "style", - Attr: []html.Attribute{html.Attribute{Key: "type", Val: "text/css"}}, + Attr: []html.Attribute{{Key: "type", Val: "text/css"}}, } styleNode.AppendChild(cssNode) @@ -227,7 +227,7 @@ func (inliner *Inliner) genHTML() (string, error) { return inliner.doc.Html() } -// Returns true if given selector is inlinable +// Inlinable returns true if given selector is inlinable func Inlinable(selector string) bool { if strings.Contains(selector, "::") { return false diff --git a/_third_party/github.com/aymerick/douceur/inliner/style_declaration.go b/_third_party/github.com/aymerick/douceur/inliner/style_declaration.go index 8210c544dc..bf52fa72cf 100644 --- a/_third_party/github.com/aymerick/douceur/inliner/style_declaration.go +++ b/_third_party/github.com/aymerick/douceur/inliner/style_declaration.go @@ -2,11 +2,13 @@ package inliner import "bosun.org/_third_party/github.com/aymerick/douceur/css" +// StyleDeclaration represents a style declaration type StyleDeclaration struct { StyleRule *StyleRule Declaration *css.Declaration } +// NewStyleDeclaration instanciates a new StyleDeclaration func NewStyleDeclaration(styleRule *StyleRule, declaration *css.Declaration) *StyleDeclaration { return &StyleDeclaration{ StyleRule: styleRule, @@ -14,7 +16,7 @@ func NewStyleDeclaration(styleRule *StyleRule, declaration *css.Declaration) *St } } -// Computes style declaration specificity +// Specificity computes style declaration specificity func (styleDecl *StyleDeclaration) Specificity() int { if styleDecl.Declaration.Important { return 10000 diff --git a/_third_party/github.com/aymerick/douceur/inliner/style_rule.go b/_third_party/github.com/aymerick/douceur/inliner/style_rule.go index fc2ef6ea5a..8517e8fd65 100644 --- a/_third_party/github.com/aymerick/douceur/inliner/style_rule.go +++ b/_third_party/github.com/aymerick/douceur/inliner/style_rule.go @@ -9,12 +9,12 @@ import ( ) const ( - INLINE_FAKE_SELECTOR = "*INLINE*" + inlineFakeSelector = "*INLINE*" // Regular expressions borrowed from premailer: // https://github.com/premailer/css_parser/blob/master/lib/css_parser/regexps.rb - NON_ID_ATTRIBUTES_AND_PSEUDO_CLASSES_REGEXP = `(?i)(\.[\w]+)|\[(\w+)|(\:(link|visited|active|hover|focus|lang|target|enabled|disabled|checked|indeterminate|root|nth-child|nth-last-child|nth-of-type|nth-last-of-type|first-child|last-child|first-of-type|last-of-type|only-child|only-of-type|empty|contains))` - ELEMENTS_AND_PSEUDO_ELEMENTS_REGEXP = `(?i)((^|[\s\+\>\~]+)[\w]+|\:{1,2}(after|before|first-letter|first-line|selection))` + nonIDAttributesAndPseudoClassesRegexpConst = `(?i)(\.[\w]+)|\[(\w+)|(\:(link|visited|active|hover|focus|lang|target|enabled|disabled|checked|indeterminate|root|nth-child|nth-last-child|nth-of-type|nth-last-of-type|first-child|last-child|first-of-type|last-of-type|only-child|only-of-type|empty|contains))` + elementsAndPseudoElementsRegexpConst = `(?i)((^|[\s\+\>\~]+)[\w]+|\:{1,2}(after|before|first-letter|first-line|selection))` ) var ( @@ -22,7 +22,7 @@ var ( elementsAndPseudoElementsRegexp *regexp.Regexp ) -// A Qualifier Rule for a uniq selector +// StyleRule represents a Qualifier Rule for a uniq selector type StyleRule struct { // The style rule selector Selector string @@ -35,11 +35,11 @@ type StyleRule struct { } func init() { - nonIDAttrAndPseudoClassesRegexp, _ = regexp.Compile(NON_ID_ATTRIBUTES_AND_PSEUDO_CLASSES_REGEXP) - elementsAndPseudoElementsRegexp, _ = regexp.Compile(ELEMENTS_AND_PSEUDO_ELEMENTS_REGEXP) + nonIDAttrAndPseudoClassesRegexp, _ = regexp.Compile(nonIDAttributesAndPseudoClassesRegexpConst) + elementsAndPseudoElementsRegexp, _ = regexp.Compile(elementsAndPseudoElementsRegexpConst) } -// Instanciate a new StyleRule +// NewStyleRule instanciates a new StyleRule func NewStyleRule(selector string, declarations []*css.Declaration) *StyleRule { return &StyleRule{ Selector: selector, @@ -69,13 +69,13 @@ func (styleRule *StyleRule) String() string { return result } -// Computes style rule specificity +// ComputeSpecificity computes style rule specificity // // cf. http://www.w3.org/TR/selectors/#specificity func ComputeSpecificity(selector string) int { result := 0 - if selector == INLINE_FAKE_SELECTOR { + if selector == inlineFakeSelector { result += 1000 } diff --git a/_third_party/github.com/aymerick/douceur/parser/parser.go b/_third_party/github.com/aymerick/douceur/parser/parser.go index a558c3914b..8803158bed 100644 --- a/_third_party/github.com/aymerick/douceur/parser/parser.go +++ b/_third_party/github.com/aymerick/douceur/parser/parser.go @@ -12,13 +12,14 @@ import ( ) const ( - IMPORTANT_SUFFIX_REGEXP = `(?i)\s*!important\s*$` + importantSuffixRegexp = `(?i)\s*!important\s*$` ) var ( importantRegexp *regexp.Regexp ) +// Parser represents a CSS parser type Parser struct { scan *scanner.Scanner // Tokenizer @@ -30,37 +31,37 @@ type Parser struct { } func init() { - importantRegexp = regexp.MustCompile(IMPORTANT_SUFFIX_REGEXP) + importantRegexp = regexp.MustCompile(importantSuffixRegexp) } -// Instanciate a new parser +// NewParser instanciates a new parser func NewParser(txt string) *Parser { return &Parser{ scan: scanner.New(txt), } } -// Parse a whole stylesheet +// Parse parses a whole stylesheet func Parse(text string) (*css.Stylesheet, error) { result, err := NewParser(text).ParseStylesheet() if err != nil { return nil, err - } else { - return result, nil } + + return result, nil } -// Parse CSS declarations +// ParseDeclarations parses CSS declarations func ParseDeclarations(text string) ([]*css.Declaration, error) { result, err := NewParser(text).ParseDeclarations() if err != nil { return nil, err - } else { - return result, nil } + + return result, nil } -// Parse a stylesheet +// ParseStylesheet parses a stylesheet func (parser *Parser) ParseStylesheet() (*css.Stylesheet, error) { result := css.NewStylesheet() @@ -80,7 +81,7 @@ func (parser *Parser) ParseStylesheet() (*css.Stylesheet, error) { return result, nil } -// Parse a list of rules +// ParseRules parses a list of rules func (parser *Parser) ParseRules() ([]*css.Rule, error) { result := []*css.Rule{} @@ -88,7 +89,7 @@ func (parser *Parser) ParseRules() ([]*css.Rule, error) { if parser.tokenChar("{") { // parsing a block of rules inBlock = true - parser.embedLevel += 1 + parser.embedLevel++ parser.shiftToken() } @@ -103,7 +104,7 @@ func (parser *Parser) ParseRules() ([]*css.Rule, error) { } parser.shiftToken() - parser.embedLevel -= 1 + parser.embedLevel-- // finished break @@ -121,16 +122,16 @@ func (parser *Parser) ParseRules() ([]*css.Rule, error) { return result, parser.err() } -// Parse a rule +// ParseRule parses a rule func (parser *Parser) ParseRule() (*css.Rule, error) { if parser.tokenAtKeyword() { return parser.parseAtRule() - } else { - return parser.parseQualifiedRule() } + + return parser.parseQualifiedRule() } -// Parse a list of declarations +// ParseDeclarations parses a list of declarations func (parser *Parser) ParseDeclarations() ([]*css.Declaration, error) { result := []*css.Declaration{} @@ -158,7 +159,7 @@ func (parser *Parser) ParseDeclarations() ([]*css.Declaration, error) { return result, parser.err() } -// Parse a declaration +// ParseDeclaration parses a declaration func (parser *Parser) ParseDeclaration() (*css.Declaration, error) { result := css.NewDeclaration() curValue := "" @@ -204,7 +205,7 @@ func (parser *Parser) parseAtRule() (*css.Rule, error) { // parse rule name (eg: "@import") token := parser.shiftToken() - result := css.NewRule(css.AT_RULE) + result := css.NewRule(css.AtRule) result.Name = token.Value for parser.tokenParsable() { @@ -252,7 +253,7 @@ func (parser *Parser) parseAtRule() (*css.Rule, error) { // Parse a Qualified Rule func (parser *Parser) parseQualifiedRule() (*css.Rule, error) { - result := css.NewRule(css.QUALIFIED_RULE) + result := css.NewRule(css.QualifiedRule) for parser.tokenParsable() { if parser.tokenChar("{") { @@ -313,9 +314,9 @@ func (parser *Parser) parseBOM() (bool, error) { if parser.nextToken().Type == scanner.TokenBOM { parser.shiftToken() return true, nil - } else { - return false, parser.err() } + + return false, parser.err() } // Returns next token without removing it from tokens buffer @@ -345,10 +346,10 @@ func (parser *Parser) shiftToken() *scanner.Token { func (parser *Parser) err() error { if parser.tokenError() { token := parser.nextToken() - return errors.New(fmt.Sprintf("Tokenizer error: %s", token.String())) - } else { - return nil + return fmt.Errorf("Tokenizer error: %s", token.String()) } + + return nil } // Returns true if next token is Error diff --git a/_third_party/github.com/aymerick/douceur/parser/parser_test.go b/_third_party/github.com/aymerick/douceur/parser/parser_test.go index c57c63edc7..07f9366dfc 100644 --- a/_third_party/github.com/aymerick/douceur/parser/parser_test.go +++ b/_third_party/github.com/aymerick/douceur/parser/parser_test.go @@ -43,15 +43,15 @@ p > a { }` expectedRule := &css.Rule{ - Kind: css.QUALIFIED_RULE, + Kind: css.QualifiedRule, Prelude: "p > a", Selectors: []string{"p > a"}, Declarations: []*css.Declaration{ - &css.Declaration{ + { Property: "color", Value: "blue", }, - &css.Declaration{ + { Property: "text-decoration", Value: "underline", }, @@ -80,21 +80,21 @@ p > a { }` expectedRule := &css.Rule{ - Kind: css.QUALIFIED_RULE, + Kind: css.QualifiedRule, Prelude: "p > a", Selectors: []string{"p > a"}, Declarations: []*css.Declaration{ - &css.Declaration{ + { Property: "color", Value: "blue", Important: false, }, - &css.Declaration{ + { Property: "text-decoration", Value: "underline", Important: true, }, - &css.Declaration{ + { Property: "font-weight", Value: "normal", Important: true, @@ -128,11 +128,11 @@ body, }` expectedRule1 := &css.Rule{ - Kind: css.QUALIFIED_RULE, + Kind: css.QualifiedRule, Prelude: "table, tr, td", Selectors: []string{"table", "tr", "td"}, Declarations: []*css.Declaration{ - &css.Declaration{ + { Property: "padding", Value: "0", }, @@ -140,13 +140,13 @@ body, } expectedRule2 := &css.Rule{ - Kind: css.QUALIFIED_RULE, + Kind: css.QualifiedRule, Prelude: `body, h1, h2, h3`, Selectors: []string{"body", "h1", "h2", "h3"}, Declarations: []*css.Declaration{ - &css.Declaration{ + { Property: "color", Value: "#fff", }, @@ -172,7 +172,7 @@ func TestAtRuleCharset(t *testing.T) { input := `@charset "UTF-8";` expectedRule := &css.Rule{ - Kind: css.AT_RULE, + Kind: css.AtRule, Name: "@charset", Prelude: "\"UTF-8\"", } @@ -195,19 +195,19 @@ func TestAtRuleCounterStyle(t *testing.T) { }` expectedRule := &css.Rule{ - Kind: css.AT_RULE, + Kind: css.AtRule, Name: "@counter-style", Prelude: "footnote", Declarations: []*css.Declaration{ - &css.Declaration{ + { Property: "system", Value: "symbolic", }, - &css.Declaration{ + { Property: "symbols", Value: "'*' ⁑ † ‡", }, - &css.Declaration{ + { Property: "suffix", Value: "''", }, @@ -240,23 +240,23 @@ func TestAtRuleDocument(t *testing.T) { }` expectedRule := &css.Rule{ - Kind: css.AT_RULE, + Kind: css.AtRule, Name: "@document", Prelude: `url(http://www.w3.org/), url-prefix(http://www.w3.org/Style/), domain(mozilla.org), regexp("https:.*")`, Rules: []*css.Rule{ - &css.Rule{ - Kind: css.QUALIFIED_RULE, + { + Kind: css.QualifiedRule, Prelude: "body", Selectors: []string{"body"}, Declarations: []*css.Declaration{ - &css.Declaration{ + { Property: "color", Value: "purple", }, - &css.Declaration{ + { Property: "background", Value: "yellow", }, @@ -293,20 +293,20 @@ func TestAtRuleFontFace(t *testing.T) { }` expectedRule := &css.Rule{ - Kind: css.AT_RULE, + Kind: css.AtRule, Name: "@font-face", Declarations: []*css.Declaration{ - &css.Declaration{ + { Property: "font-family", Value: "MyHelvetica", }, - &css.Declaration{ + { Property: "src", Value: `local("Helvetica Neue Bold"), local("HelveticaNeue-Bold"), url(MgOpenModernaBold.ttf)`, }, - &css.Declaration{ + { Property: "font-weight", Value: "bold", }, @@ -328,15 +328,15 @@ func TestAtRuleFontFeatureValues(t *testing.T) { } }` expectedRule := &css.Rule{ - Kind: css.AT_RULE, + Kind: css.AtRule, Name: "@font-feature-values", Prelude: "Font Two", Rules: []*css.Rule{ - &css.Rule{ - Kind: css.AT_RULE, + { + Kind: css.AtRule, Name: "@styleset", Declarations: []*css.Declaration{ - &css.Declaration{ + { Property: "nice-style", Value: "4", }, @@ -364,13 +364,13 @@ func TestAtRuleImport(t *testing.T) { @import url('landscape.css') screen and (orientation:landscape);` expectedRule1 := &css.Rule{ - Kind: css.AT_RULE, + Kind: css.AtRule, Name: "@import", Prelude: "\"my-styles.css\"", } expectedRule2 := &css.Rule{ - Kind: css.AT_RULE, + Kind: css.AtRule, Name: "@import", Prelude: "url('landscape.css') screen and (orientation:landscape)", } @@ -389,35 +389,35 @@ func TestAtRuleKeyframes(t *testing.T) { 100% { top: 100px; left: 100%; } }` expectedRule := &css.Rule{ - Kind: css.AT_RULE, + Kind: css.AtRule, Name: "@keyframes", Prelude: "identifier", Rules: []*css.Rule{ - &css.Rule{ - Kind: css.QUALIFIED_RULE, + { + Kind: css.QualifiedRule, Prelude: "0%", Selectors: []string{"0%"}, Declarations: []*css.Declaration{ - &css.Declaration{ + { Property: "top", Value: "0", }, - &css.Declaration{ + { Property: "left", Value: "0", }, }, }, - &css.Rule{ - Kind: css.QUALIFIED_RULE, + { + Kind: css.QualifiedRule, Prelude: "100%", Selectors: []string{"100%"}, Declarations: []*css.Declaration{ - &css.Declaration{ + { Property: "top", Value: "100px", }, - &css.Declaration{ + { Property: "left", Value: "100%", }, @@ -450,16 +450,16 @@ func TestAtRuleMedia(t *testing.T) { body { line-height: 1.2 } }` expectedRule := &css.Rule{ - Kind: css.AT_RULE, + Kind: css.AtRule, Name: "@media", Prelude: "screen, print", Rules: []*css.Rule{ - &css.Rule{ - Kind: css.QUALIFIED_RULE, + { + Kind: css.QualifiedRule, Prelude: "body", Selectors: []string{"body"}, Declarations: []*css.Declaration{ - &css.Declaration{ + { Property: "line-height", Value: "1.2", }, @@ -485,7 +485,7 @@ func TestAtRuleMedia(t *testing.T) { func TestAtRuleNamespace(t *testing.T) { input := `@namespace svg url(http://www.w3.org/2000/svg);` expectedRule := &css.Rule{ - Kind: css.AT_RULE, + Kind: css.AtRule, Name: "@namespace", Prelude: "svg url(http://www.w3.org/2000/svg)", } @@ -504,15 +504,15 @@ func TestAtRulePage(t *testing.T) { margin-right: 3cm; }` expectedRule := &css.Rule{ - Kind: css.AT_RULE, + Kind: css.AtRule, Name: "@page", Prelude: ":left", Declarations: []*css.Declaration{ - &css.Declaration{ + { Property: "margin-left", Value: "4cm", }, - &css.Declaration{ + { Property: "margin-right", Value: "3cm", }, @@ -536,39 +536,39 @@ func TestAtRuleSupports(t *testing.T) { } }` expectedRule := &css.Rule{ - Kind: css.AT_RULE, + Kind: css.AtRule, Name: "@supports", Prelude: "(animation-name: test)", Rules: []*css.Rule{ - &css.Rule{ - Kind: css.AT_RULE, + { + Kind: css.AtRule, Name: "@keyframes", Rules: []*css.Rule{ - &css.Rule{ - Kind: css.QUALIFIED_RULE, + { + Kind: css.QualifiedRule, Prelude: "0%", Selectors: []string{"0%"}, Declarations: []*css.Declaration{ - &css.Declaration{ + { Property: "top", Value: "0", }, - &css.Declaration{ + { Property: "left", Value: "0", }, }, }, - &css.Rule{ - Kind: css.QUALIFIED_RULE, + { + Kind: css.QualifiedRule, Prelude: "100%", Selectors: []string{"100%"}, Declarations: []*css.Declaration{ - &css.Declaration{ + { Property: "top", Value: "100px", }, - &css.Declaration{ + { Property: "left", Value: "100%", }, @@ -609,11 +609,11 @@ func TestParseDeclarations(t *testing.T) { } expectedOutput := []*css.Declaration{ - &css.Declaration{ + { Property: "color", Value: "blue", }, - &css.Declaration{ + { Property: "text-decoration", Value: "underline", }, diff --git a/_third_party/github.com/boltdb/bolt/README.md b/_third_party/github.com/boltdb/bolt/README.md index 00fad6afb8..3c9e46517f 100644 --- a/_third_party/github.com/boltdb/bolt/README.md +++ b/_third_party/github.com/boltdb/bolt/README.md @@ -617,5 +617,9 @@ Below is a list of public, open source projects that use Bolt: * [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. * [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistant, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. * [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. +* [stow](https://github.com/djherbis/stow) - a persistence manager for objects + backed by boltdb. +* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining + simple tx and key scans. If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/_third_party/github.com/boltdb/bolt/bolt_unix.go b/_third_party/github.com/boltdb/bolt/bolt_unix.go index 17ca318bf7..6eef6b2203 100644 --- a/_third_party/github.com/boltdb/bolt/bolt_unix.go +++ b/_third_party/github.com/boltdb/bolt/bolt_unix.go @@ -1,4 +1,4 @@ -// +build !windows,!plan9 +// +build !windows,!plan9,!solaris package bolt diff --git a/_third_party/github.com/boltdb/bolt/bolt_unix_solaris.go b/_third_party/github.com/boltdb/bolt/bolt_unix_solaris.go index 6134ea9539..3360e4ff89 100644 --- a/_third_party/github.com/boltdb/bolt/bolt_unix_solaris.go +++ b/_third_party/github.com/boltdb/bolt/bolt_unix_solaris.go @@ -1,8 +1,8 @@ package bolt import ( - "bosun.org/_third_party/golang.org/x/sys/unix" "fmt" + "golang.org/x/sys/unix" "os" "syscall" "time" diff --git a/_third_party/github.com/boltdb/bolt/bucket.go b/_third_party/github.com/boltdb/bolt/bucket.go index 6766992100..8c3edae86a 100644 --- a/_third_party/github.com/boltdb/bolt/bucket.go +++ b/_third_party/github.com/boltdb/bolt/bucket.go @@ -346,7 +346,8 @@ func (b *Bucket) NextSequence() (uint64, error) { // ForEach executes a function for each key/value pair in a bucket. // If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. +// the error is returned to the caller. The provided function must not modify +// the bucket; this will result in undefined behavior. func (b *Bucket) ForEach(fn func(k, v []byte) error) error { if b.tx.db == nil { return ErrTxClosed diff --git a/_third_party/github.com/boltdb/bolt/db_test.go b/_third_party/github.com/boltdb/bolt/db_test.go index 918906dbe3..210fce8868 100644 --- a/_third_party/github.com/boltdb/bolt/db_test.go +++ b/_third_party/github.com/boltdb/bolt/db_test.go @@ -42,6 +42,9 @@ func TestOpen_Timeout(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("timeout not supported on windows") } + if runtime.GOOS == "solaris" { + t.Skip("solaris fcntl locks don't support intra-process locking") + } path := tempfile() defer os.Remove(path) @@ -66,6 +69,9 @@ func TestOpen_Wait(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("timeout not supported on windows") } + if runtime.GOOS == "solaris" { + t.Skip("solaris fcntl locks don't support intra-process locking") + } path := tempfile() defer os.Remove(path) @@ -228,6 +234,10 @@ func TestDB_Open_FileTooSmall(t *testing.T) { // and that a database can not be opened in read-write mode and in read-only // mode at the same time. func TestOpen_ReadOnly(t *testing.T) { + if runtime.GOOS == "solaris" { + t.Skip("solaris fcntl locks don't support intra-process locking") + } + bucket, key, value := []byte(`bucket`), []byte(`key`), []byte(`value`) path := tempfile() diff --git a/_third_party/github.com/boltdb/bolt/tx.go b/_third_party/github.com/boltdb/bolt/tx.go index 6b52b2c896..cb60149ee5 100644 --- a/_third_party/github.com/boltdb/bolt/tx.go +++ b/_third_party/github.com/boltdb/bolt/tx.go @@ -236,7 +236,8 @@ func (tx *Tx) close() { var freelistPendingN = tx.db.freelist.pending_count() var freelistAlloc = tx.db.freelist.size() - // Remove writer lock. + // Remove transaction ref & writer lock. + tx.db.rwtx = nil tx.db.rwlock.Unlock() // Merge statistics. @@ -250,7 +251,12 @@ func (tx *Tx) close() { } else { tx.db.removeTx(tx) } + + // Clear all references. tx.db = nil + tx.meta = nil + tx.root = Bucket{tx: tx} + tx.pages = nil } // Copy writes the entire database to a writer. diff --git a/_third_party/github.com/garyburd/redigo/redis/conn.go b/_third_party/github.com/garyburd/redigo/redis/conn.go index 310ac76236..6a3819f1d2 100644 --- a/_third_party/github.com/garyburd/redigo/redis/conn.go +++ b/_third_party/github.com/garyburd/redigo/redis/conn.go @@ -56,7 +56,7 @@ type conn struct { // DialTimeout acts like Dial but takes timeouts for establishing the // connection to the server, writing a command and reading a reply. // -// DialTimeout is deprecated. +// Deprecated: Use Dial with options instead. func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) { return Dial(network, address, DialConnectTimeout(connectTimeout), @@ -162,7 +162,7 @@ func Dial(network, address string, options ...DialOption) (Conn, error) { return c, nil } -var pathDBRegexp = regexp.MustCompile(`/(\d)\z`) +var pathDBRegexp = regexp.MustCompile(`/(\d+)\z`) // DialURL connects to a Redis server at the given URL using the Redis // URI scheme. URLs should follow the draft IANA specification for the diff --git a/_third_party/github.com/garyburd/redigo/redis/pool.go b/_third_party/github.com/garyburd/redigo/redis/pool.go index fa486676b6..f3a93e4e19 100644 --- a/_third_party/github.com/garyburd/redigo/redis/pool.go +++ b/_third_party/github.com/garyburd/redigo/redis/pool.go @@ -116,7 +116,7 @@ type Pool struct { // the timeout to a value less than the server's timeout. IdleTimeout time.Duration - // If Wait is true and the pool is at the MaxIdle limit, then Get() waits + // If Wait is true and the pool is at the MaxActive limit, then Get() waits // for a connection to be returned to the pool before returning. Wait bool @@ -135,8 +135,9 @@ type idleConn struct { t time.Time } -// NewPool creates a new pool. This function is deprecated. Applications should -// initialize the Pool fields directly as shown in example. +// NewPool creates a new pool. +// +// Deprecated: Initialize the Pool directory as shown in the example. func NewPool(newFn func() (Conn, error), maxIdle int) *Pool { return &Pool{Dial: newFn, MaxIdle: maxIdle} } diff --git a/_third_party/github.com/garyburd/redigo/redis/reply.go b/_third_party/github.com/garyburd/redigo/redis/reply.go index 4dfb24aabd..348a080417 100644 --- a/_third_party/github.com/garyburd/redigo/redis/reply.go +++ b/_third_party/github.com/garyburd/redigo/redis/reply.go @@ -215,7 +215,9 @@ func Bool(reply interface{}, err error) (bool, error) { return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply) } -// MultiBulk is deprecated. Use Values. +// MultiBulk is a helper that converts an array command reply to a []interface{}. +// +// Deprecated: Use Values instead. func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) } // Values is a helper that converts an array command reply to a []interface{}. diff --git a/_third_party/github.com/garyburd/redigo/redis/scan.go b/_third_party/github.com/garyburd/redigo/redis/scan.go index 6bb0e4945a..f9f4a9af49 100644 --- a/_third_party/github.com/garyburd/redigo/redis/scan.go +++ b/_third_party/github.com/garyburd/redigo/redis/scan.go @@ -183,6 +183,13 @@ func convertAssign(d interface{}, s interface{}) (err error) { err = convertAssignInt(d.Elem(), s) } } + case string: + switch d := d.(type) { + case *string: + *d = string(s) + default: + err = cannotConvert(reflect.ValueOf(d), s) + } case []interface{}: switch d := d.(type) { case *[]interface{}: diff --git a/_third_party/github.com/garyburd/redigo/redis/scan_test.go b/_third_party/github.com/garyburd/redigo/redis/scan_test.go index f324276b12..a7af80e116 100644 --- a/_third_party/github.com/garyburd/redigo/redis/scan_test.go +++ b/_third_party/github.com/garyburd/redigo/redis/scan_test.go @@ -47,6 +47,7 @@ var scanConversionTests = []struct { {[]byte("1"), true}, {int64(1), true}, {[]byte("t"), true}, + {"hello", "hello"}, {[]byte("hello"), "hello"}, {[]byte("world"), []byte("world")}, {[]interface{}{[]byte("foo")}, []interface{}{[]byte("foo")}}, diff --git a/_third_party/github.com/go-ole/go-ole/ChangeLog.md b/_third_party/github.com/go-ole/go-ole/ChangeLog.md index 64d1945e8f..a67438f6a2 100644 --- a/_third_party/github.com/go-ole/go-ole/ChangeLog.md +++ b/_third_party/github.com/go-ole/go-ole/ChangeLog.md @@ -1,8 +1,19 @@ -# Version 1.1.X +# Version 1.x.x - * Add CI configuration for Travis-CI and AppVeyor. - * Add test InterfaceID and ClassID for the COM Test Server project. - * **Add more test cases and reference new test COM server project.** (Placeholder for future additions) +* **Add more test cases and reference new test COM server project.** (Placeholder for future additions) + +# Version 1.2.0-alphaX + +**Minimum supported version is now Go 1.4. Go 1.1 support is deprecated, but should still build.** + + * Added CI configuration for Travis-CI and AppVeyor. + * Added test InterfaceID and ClassID for the COM Test Server project. + * Added more inline documentation (#83). + * Added IEnumVARIANT implementation (#88). + * Added support for retrieving `time.Time` from VARIANT (#92). + * Added test case for IUnknown (#64). + * Added test case for IDispatch (#64). + * Added test cases for scalar variants (#64, #76). # Version 1.1.1 diff --git a/_third_party/github.com/go-ole/go-ole/README.md b/_third_party/github.com/go-ole/go-ole/README.md index e905aa7845..0ea9db33c7 100644 --- a/_third_party/github.com/go-ole/go-ole/README.md +++ b/_third_party/github.com/go-ole/go-ole/README.md @@ -1,7 +1,8 @@ #Go OLE +[![Build status](https://ci.appveyor.com/api/projects/status/qr0u2sf7q43us9fj?svg=true)](https://ci.appveyor.com/project/jacobsantos/go-ole-jgs28) +[![Build Status](https://travis-ci.org/go-ole/go-ole.svg?branch=master)](https://travis-ci.org/go-ole/go-ole) [![GoDoc](https://godoc.org/github.com/go-ole/go-ole?status.svg)](https://godoc.org/github.com/go-ole/go-ole) -[![Build Status](https://travis-ci.org/go-ole/go-ole.svg)](https://travis-ci.org/go-ole/go-ole) Go bindings for Windows COM using shared libraries instead of cgo. diff --git a/_third_party/github.com/go-ole/go-ole/appveyor.yml b/_third_party/github.com/go-ole/go-ole/appveyor.yml index 74071bc529..cec0b24727 100644 --- a/_third_party/github.com/go-ole/go-ole/appveyor.yml +++ b/_third_party/github.com/go-ole/go-ole/appveyor.yml @@ -4,7 +4,7 @@ # - All section names are case-sensitive. # - Section names should be unique on each level. -version: "{branch}.{build}" +version: "1.2.0.{build}-alpha-{branch}" os: Windows Server 2012 R2 @@ -23,20 +23,48 @@ environment: matrix: - GOARCH: amd64 GOVERSION: 1.4 + GOROOT: c:\go + DOWNLOADPLATFORM: "x64" + - GOARCH: 386 + GOVERSION: 1.4 + GOROOT: c:\go + DOWNLOADPLATFORM: "x86" + +matrix: + fast_finish: true + allow_failures: + - GOARCH: 386 + GOVERSION: 1.4 + GOROOT: c:\go + DOWNLOADPLATFORM: "x86" install: - # - echo %PATH% - - echo %GOPATH% + - choco install mingw + - SET PATH=c:\tools\mingw64\bin;%PATH% + # - Download COM Server + - ps: Start-FileDownload "https://github.com/go-ole/test-com-server/releases/download/v1.0.0/test-com-server-${env:DOWNLOADPLATFORM}.zip" + - 7z e test-com-server-%DOWNLOADPLATFORM%.zip -oc:\gopath\src\github.com\go-ole\go-ole > NUL + - c:\gopath\src\github.com\go-ole\go-ole\build\register-assembly.bat # - set - go version - go env - - go get -v -t ./... + - c:\gopath\src\github.com\go-ole\go-ole\build\compile-go.bat + - go tool dist install -v cmd/8a + - go tool dist install -v cmd/8c + - go tool dist install -v cmd/8g + - go tool dist install -v cmd/8l + - go tool dist install -v cmd/6a + - go tool dist install -v cmd/6c + - go tool dist install -v cmd/6g + - go tool dist install -v cmd/6l - go get -u golang.org/x/tools/cmd/cover -# - go get -u golang.org/x/tools/cmd/vet + - go get -u golang.org/x/tools/cmd/godoc + - go get -u golang.org/x/tools/cmd/stringer build_script: + - cd c:\gopath\src\github.com\go-ole\go-ole + - go get -v -t ./... - go build - - go test -race ./... - go test -v -cover ./... # disable automatic tests diff --git a/_third_party/github.com/go-ole/go-ole/com.go b/_third_party/github.com/go-ole/go-ole/com.go index e596b0c66b..06696087e4 100644 --- a/_third_party/github.com/go-ole/go-ole/com.go +++ b/_third_party/github.com/go-ole/go-ole/com.go @@ -3,38 +3,50 @@ package ole import ( + "errors" "syscall" + "time" "unicode/utf16" "unsafe" ) var ( - procCoInitialize, _ = modole32.FindProc("CoInitialize") - procCoInitializeEx, _ = modole32.FindProc("CoInitializeEx") - procCoUninitialize, _ = modole32.FindProc("CoUninitialize") - procCoCreateInstance, _ = modole32.FindProc("CoCreateInstance") - procCoTaskMemFree, _ = modole32.FindProc("CoTaskMemFree") - procCLSIDFromProgID, _ = modole32.FindProc("CLSIDFromProgID") - procCLSIDFromString, _ = modole32.FindProc("CLSIDFromString") - procStringFromCLSID, _ = modole32.FindProc("StringFromCLSID") - procStringFromIID, _ = modole32.FindProc("StringFromIID") - procIIDFromString, _ = modole32.FindProc("IIDFromString") - procGetUserDefaultLCID, _ = modkernel32.FindProc("GetUserDefaultLCID") - procCopyMemory, _ = modkernel32.FindProc("RtlMoveMemory") - procVariantInit, _ = modoleaut32.FindProc("VariantInit") - procVariantClear, _ = modoleaut32.FindProc("VariantClear") - procSysAllocString, _ = modoleaut32.FindProc("SysAllocString") - procSysAllocStringLen, _ = modoleaut32.FindProc("SysAllocStringLen") - procSysFreeString, _ = modoleaut32.FindProc("SysFreeString") - procSysStringLen, _ = modoleaut32.FindProc("SysStringLen") - procCreateDispTypeInfo, _ = modoleaut32.FindProc("CreateDispTypeInfo") - procCreateStdDispatch, _ = modoleaut32.FindProc("CreateStdDispatch") - procGetActiveObject, _ = modoleaut32.FindProc("GetActiveObject") + procCoInitialize, _ = modole32.FindProc("CoInitialize") + procCoInitializeEx, _ = modole32.FindProc("CoInitializeEx") + procCoUninitialize, _ = modole32.FindProc("CoUninitialize") + procCoCreateInstance, _ = modole32.FindProc("CoCreateInstance") + procCoTaskMemFree, _ = modole32.FindProc("CoTaskMemFree") + procCLSIDFromProgID, _ = modole32.FindProc("CLSIDFromProgID") + procCLSIDFromString, _ = modole32.FindProc("CLSIDFromString") + procStringFromCLSID, _ = modole32.FindProc("StringFromCLSID") + procStringFromIID, _ = modole32.FindProc("StringFromIID") + procIIDFromString, _ = modole32.FindProc("IIDFromString") + procGetUserDefaultLCID, _ = modkernel32.FindProc("GetUserDefaultLCID") + procCopyMemory, _ = modkernel32.FindProc("RtlMoveMemory") + procVariantInit, _ = modoleaut32.FindProc("VariantInit") + procVariantClear, _ = modoleaut32.FindProc("VariantClear") + procVariantTimeToSystemTime, _ = modoleaut32.FindProc("VariantTimeToSystemTime") + procSysAllocString, _ = modoleaut32.FindProc("SysAllocString") + procSysAllocStringLen, _ = modoleaut32.FindProc("SysAllocStringLen") + procSysFreeString, _ = modoleaut32.FindProc("SysFreeString") + procSysStringLen, _ = modoleaut32.FindProc("SysStringLen") + procCreateDispTypeInfo, _ = modoleaut32.FindProc("CreateDispTypeInfo") + procCreateStdDispatch, _ = modoleaut32.FindProc("CreateStdDispatch") + procGetActiveObject, _ = modoleaut32.FindProc("GetActiveObject") procGetMessageW, _ = moduser32.FindProc("GetMessageW") procDispatchMessageW, _ = moduser32.FindProc("DispatchMessageW") ) +// coInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). func coInitialize() (err error) { // http://msdn.microsoft.com/en-us/library/windows/desktop/ms678543(v=vs.85).aspx // Suggests that no value should be passed to CoInitialized. @@ -46,6 +58,7 @@ func coInitialize() (err error) { return } +// coInitializeEx initializes COM library with concurrency model. func coInitializeEx(coinit uint32) (err error) { // http://msdn.microsoft.com/en-us/library/windows/desktop/ms695279(v=vs.85).aspx // Suggests that the first parameter is not only optional but should always be NULL. @@ -56,6 +69,15 @@ func coInitializeEx(coinit uint32) (err error) { return } +// CoInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). func CoInitialize(p uintptr) (err error) { // p is ignored and won't be used. // Avoid any variable not used errors. @@ -63,20 +85,36 @@ func CoInitialize(p uintptr) (err error) { return coInitialize() } +// CoInitializeEx initializes COM library with concurrency model. func CoInitializeEx(p uintptr, coinit uint32) (err error) { // Avoid any variable not used errors. p = uintptr(0) return coInitializeEx(coinit) } +// CoUninitialize uninitializes COM Library. func CoUninitialize() { procCoUninitialize.Call() } +// CoTaskMemFree frees memory pointer. func CoTaskMemFree(memptr uintptr) { procCoTaskMemFree.Call(memptr) } +// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier. +// +// The Programmatic Identifier must be registered, because it will be looked up +// in the Windows Registry. The registry entry has the following keys: CLSID, +// Insertable, Protocol and Shell +// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx). +// +// programID identifies the class id with less precision and is not guaranteed +// to be unique. These are usually found in the registry under +// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of +// "Program.Component.Version" with version being optional. +// +// CLSIDFromProgID in Windows API. func CLSIDFromProgID(progId string) (clsid *GUID, err error) { var guid GUID lpszProgID := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId))) @@ -88,6 +126,12 @@ func CLSIDFromProgID(progId string) (clsid *GUID, err error) { return } +// CLSIDFromString retrieves Class ID from string representation. +// +// This is technically the string version of the GUID and will convert the +// string to object. +// +// CLSIDFromString in Windows API. func CLSIDFromString(str string) (clsid *GUID, err error) { var guid GUID lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(str))) @@ -99,6 +143,7 @@ func CLSIDFromString(str string) (clsid *GUID, err error) { return } +// StringFromCLSID returns GUID formated string from GUID object. func StringFromCLSID(clsid *GUID) (str string, err error) { var p *uint16 hr, _, _ := procStringFromCLSID.Call(uintptr(unsafe.Pointer(clsid)), uintptr(unsafe.Pointer(&p))) @@ -109,6 +154,7 @@ func StringFromCLSID(clsid *GUID) (str string, err error) { return } +// IIDFromString returns GUID from program ID. func IIDFromString(progId string) (clsid *GUID, err error) { var guid GUID lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId))) @@ -120,6 +166,7 @@ func IIDFromString(progId string) (clsid *GUID, err error) { return } +// StringFromIID returns GUID formatted string from GUID object. func StringFromIID(iid *GUID) (str string, err error) { var p *uint16 hr, _, _ := procStringFromIID.Call(uintptr(unsafe.Pointer(iid)), uintptr(unsafe.Pointer(&p))) @@ -130,6 +177,7 @@ func StringFromIID(iid *GUID) (str string, err error) { return } +// CreateInstance of single uninitialized object with GUID. func CreateInstance(clsid *GUID, iid *GUID) (unk *IUnknown, err error) { if iid == nil { iid = IID_IUnknown @@ -146,6 +194,7 @@ func CreateInstance(clsid *GUID, iid *GUID) (unk *IUnknown, err error) { return } +// GetActiveObject retrieves pointer to active object. func GetActiveObject(clsid *GUID, iid *GUID) (unk *IUnknown, err error) { if iid == nil { iid = IID_IUnknown @@ -160,6 +209,7 @@ func GetActiveObject(clsid *GUID, iid *GUID) (unk *IUnknown, err error) { return } +// VariantInit initializes variant. func VariantInit(v *VARIANT) (err error) { hr, _, _ := procVariantInit.Call(uintptr(unsafe.Pointer(v))) if hr != 0 { @@ -168,6 +218,7 @@ func VariantInit(v *VARIANT) (err error) { return } +// VariantClear clears value in Variant settings to VT_EMPTY. func VariantClear(v *VARIANT) (err error) { hr, _, _ := procVariantClear.Call(uintptr(unsafe.Pointer(v))) if hr != 0 { @@ -176,12 +227,14 @@ func VariantClear(v *VARIANT) (err error) { return } +// SysAllocString allocates memory for string and copies string into memory. func SysAllocString(v string) (ss *int16) { pss, _, _ := procSysAllocString.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(v)))) ss = (*int16)(unsafe.Pointer(pss)) return } +// SysAllocStringLen copies up to length of given string returning pointer. func SysAllocStringLen(v string) (ss *int16) { utf16 := utf16.Encode([]rune(v + "\x00")) ptr := &utf16[0] @@ -191,6 +244,7 @@ func SysAllocStringLen(v string) (ss *int16) { return } +// SysFreeString frees string system memory. This must be called with SysAllocString. func SysFreeString(v *int16) (err error) { hr, _, _ := procSysFreeString.Call(uintptr(unsafe.Pointer(v))) if hr != 0 { @@ -199,11 +253,17 @@ func SysFreeString(v *int16) (err error) { return } +// SysStringLen is the length of the system allocated string. func SysStringLen(v *int16) uint32 { l, _, _ := procSysStringLen.Call(uintptr(unsafe.Pointer(v))) return uint32(l) } +// CreateStdDispatch provides default IDispatch implementation for IUnknown. +// +// This handles default IDispatch implementation for objects. It haves a few +// limitations with only supporting one language. It will also only return +// default exception codes. func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (disp *IDispatch, err error) { hr, _, _ := procCreateStdDispatch.Call( uintptr(unsafe.Pointer(unk)), @@ -216,6 +276,9 @@ func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (disp *IDispa return } +// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch. +// +// This will not handle the full implementation of the interface. func CreateDispTypeInfo(idata *INTERFACEDATA) (pptinfo *IUnknown, err error) { hr, _, _ := procCreateDispTypeInfo.Call( uintptr(unsafe.Pointer(idata)), @@ -227,24 +290,39 @@ func CreateDispTypeInfo(idata *INTERFACEDATA) (pptinfo *IUnknown, err error) { return } +// copyMemory moves location of a block of memory. func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) { procCopyMemory.Call(uintptr(dest), uintptr(src), uintptr(length)) } +// GetUserDefaultLCID retrieves current user default locale. func GetUserDefaultLCID() (lcid uint32) { ret, _, _ := procGetUserDefaultLCID.Call() lcid = uint32(ret) return } +// GetMessage in message queue from runtime. +// +// This function appears to block. PeekMessage does not block. func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (ret int32, err error) { r0, _, err := procGetMessageW.Call(uintptr(unsafe.Pointer(msg)), uintptr(hwnd), uintptr(MsgFilterMin), uintptr(MsgFilterMax)) ret = int32(r0) return } +// DispatchMessage to window procedure. func DispatchMessage(msg *Msg) (ret int32) { r0, _, _ := procDispatchMessageW.Call(uintptr(unsafe.Pointer(msg))) ret = int32(r0) return } + +func GetVariantDate(value float64) (time.Time, error) { + var st syscall.Systemtime + r, _, _ := procVariantTimeToSystemTime.Call(uintptr(unsafe.Pointer(&value)), uintptr(unsafe.Pointer(&st))) + if r != 0 { + return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), nil), nil + } + return time.Now(), errors.New("Could not convert to time, passing current time.") +} diff --git a/_third_party/github.com/go-ole/go-ole/com_func.go b/_third_party/github.com/go-ole/go-ole/com_func.go index f3aebf0324..425aad3233 100644 --- a/_third_party/github.com/go-ole/go-ole/com_func.go +++ b/_third_party/github.com/go-ole/go-ole/com_func.go @@ -2,7 +2,10 @@ package ole -import "unsafe" +import ( + "time" + "unsafe" +) // coInitialize initializes COM library on current thread. // @@ -88,62 +91,84 @@ func StringFromIID(iid *GUID) (string, error) { return "", NewError(E_NOTIMPL) } +// CreateInstance of single uninitialized object with GUID. func CreateInstance(clsid *GUID, iid *GUID) (*IUnknown, error) { return nil, NewError(E_NOTIMPL) } +// GetActiveObject retrieves pointer to active object. func GetActiveObject(clsid *GUID, iid *GUID) (*IUnknown, error) { return nil, NewError(E_NOTIMPL) } +// VariantInit initializes variant. func VariantInit(v *VARIANT) error { return NewError(E_NOTIMPL) } +// VariantClear clears value in Variant settings to VT_EMPTY. func VariantClear(v *VARIANT) error { return NewError(E_NOTIMPL) } +// SysAllocString allocates memory for string and copies string into memory. func SysAllocString(v string) *int16 { u := int16(0) return &u } +// SysAllocStringLen copies up to length of given string returning pointer. func SysAllocStringLen(v string) *int16 { u := int16(0) return &u } +// SysFreeString frees string system memory. This must be called with SysAllocString. func SysFreeString(v *int16) error { return NewError(E_NOTIMPL) } +// SysStringLen is the length of the system allocated string. func SysStringLen(v *int16) uint32 { return uint32(0) } +// CreateStdDispatch provides default IDispatch implementation for IUnknown. +// +// This handles default IDispatch implementation for objects. It haves a few +// limitations with only supporting one language. It will also only return +// default exception codes. func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (*IDispatch, error) { return nil, NewError(E_NOTIMPL) } +// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch. +// +// This will not handle the full implementation of the interface. func CreateDispTypeInfo(idata *INTERFACEDATA) (*IUnknown, error) { return nil, NewError(E_NOTIMPL) } +// copyMemory moves location of a block of memory. func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) {} -// GetUserDefaultLCID retrieves current user Locale ID for COM servers that are -// localized. +// GetUserDefaultLCID retrieves current user default locale. func GetUserDefaultLCID() uint32 { return uint32(0) } -// GetMessage from Runtime. +// GetMessage in message queue from runtime. +// +// This function appears to block. PeekMessage does not block. func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (int32, error) { return int32(0), NewError(E_NOTIMPL) } -// DispatchMessage to Runtime. +// DispatchMessage to window procedure. func DispatchMessage(msg *Msg) int32 { return int32(0) } + +func GetVariantDate(value float64) (time.Time, error) { + return time.Now(), NewError(E_NOTIMPL) +} diff --git a/_third_party/github.com/go-ole/go-ole/com_func_test.go b/_third_party/github.com/go-ole/go-ole/com_func_test.go index 6ad6cbafaf..151898e59f 100644 --- a/_third_party/github.com/go-ole/go-ole/com_func_test.go +++ b/_third_party/github.com/go-ole/go-ole/com_func_test.go @@ -4,6 +4,7 @@ package ole import "testing" +// TestComSetupAndShutDown tests that API fails on Linux. func TestComSetupAndShutDown(t *testing.T) { defer func() { if r := recover(); r != nil { @@ -21,6 +22,7 @@ func TestComSetupAndShutDown(t *testing.T) { CoUninitialize() } +// TestComPublicSetupAndShutDown tests that API fails on Linux. func TestComPublicSetupAndShutDown(t *testing.T) { defer func() { if r := recover(); r != nil { @@ -38,6 +40,7 @@ func TestComPublicSetupAndShutDown(t *testing.T) { CoUninitialize() } +// TestComPublicSetupAndShutDown_WithValue tests that API fails on Linux. func TestComPublicSetupAndShutDown_WithValue(t *testing.T) { defer func() { if r := recover(); r != nil { @@ -55,6 +58,7 @@ func TestComPublicSetupAndShutDown_WithValue(t *testing.T) { CoUninitialize() } +// TestComExSetupAndShutDown tests that API fails on Linux. func TestComExSetupAndShutDown(t *testing.T) { defer func() { if r := recover(); r != nil { @@ -72,6 +76,7 @@ func TestComExSetupAndShutDown(t *testing.T) { CoUninitialize() } +// TestComPublicExSetupAndShutDown tests that API fails on Linux. func TestComPublicExSetupAndShutDown(t *testing.T) { defer func() { if r := recover(); r != nil { @@ -89,6 +94,7 @@ func TestComPublicExSetupAndShutDown(t *testing.T) { CoUninitialize() } +// TestComPublicExSetupAndShutDown_WithValue tests that API fails on Linux. func TestComPublicExSetupAndShutDown_WithValue(t *testing.T) { defer func() { if r := recover(); r != nil { @@ -106,6 +112,7 @@ func TestComPublicExSetupAndShutDown_WithValue(t *testing.T) { CoUninitialize() } +// TestClsidFromProgID_WindowsMediaNSSManager tests that API fails on Linux. func TestClsidFromProgID_WindowsMediaNSSManager(t *testing.T) { defer func() { if r := recover(); r != nil { @@ -123,6 +130,7 @@ func TestClsidFromProgID_WindowsMediaNSSManager(t *testing.T) { } } +// TestClsidFromString_WindowsMediaNSSManager tests that API fails on Linux. func TestClsidFromString_WindowsMediaNSSManager(t *testing.T) { defer func() { if r := recover(); r != nil { @@ -141,6 +149,7 @@ func TestClsidFromString_WindowsMediaNSSManager(t *testing.T) { } } +// TestCreateInstance_WindowsMediaNSSManager tests that API fails on Linux. func TestCreateInstance_WindowsMediaNSSManager(t *testing.T) { defer func() { if r := recover(); r != nil { @@ -159,6 +168,7 @@ func TestCreateInstance_WindowsMediaNSSManager(t *testing.T) { } } +// TestError tests that API fails on Linux. func TestError(t *testing.T) { defer func() { if r := recover(); r != nil { diff --git a/_third_party/github.com/go-ole/go-ole/error_func.go b/_third_party/github.com/go-ole/go-ole/error_func.go index 1b8de2de61..8a2ffaa272 100644 --- a/_third_party/github.com/go-ole/go-ole/error_func.go +++ b/_third_party/github.com/go-ole/go-ole/error_func.go @@ -2,6 +2,7 @@ package ole +// errstr converts error code to string. func errstr(errno int) string { return "" } diff --git a/_third_party/github.com/go-ole/go-ole/error_windows.go b/_third_party/github.com/go-ole/go-ole/error_windows.go index fbb8f70ccf..d0e8e68595 100644 --- a/_third_party/github.com/go-ole/go-ole/error_windows.go +++ b/_third_party/github.com/go-ole/go-ole/error_windows.go @@ -8,6 +8,7 @@ import ( "unicode/utf16" ) +// errstr converts error code to string. func errstr(errno int) string { // ask windows for the remaining errors var flags uint32 = syscall.FORMAT_MESSAGE_FROM_SYSTEM | syscall.FORMAT_MESSAGE_ARGUMENT_ARRAY | syscall.FORMAT_MESSAGE_IGNORE_INSERTS diff --git a/_third_party/github.com/go-ole/go-ole/guid.go b/_third_party/github.com/go-ole/go-ole/guid.go index 3ced2dd534..7b3e33d8a9 100644 --- a/_third_party/github.com/go-ole/go-ole/guid.go +++ b/_third_party/github.com/go-ole/go-ole/guid.go @@ -65,16 +65,21 @@ var ( // {D530E7A6-4EE8-40D1-8931-3D63B8605001} IID_ICOMTestBoolean = &GUID{0xd530e7a6, 0x4ee8, 0x40d1, [8]byte{0x89, 0x31, 0x3d, 0x63, 0xb8, 0x60, 0x50, 0x10}} - // IID_ICOMTestObject is for ICOMTestObject interfaces. + // IID_ICOMEchoTestObject is for ICOMEchoTestObject interfaces. // // {6485B1EF-D780-4834-A4FE-1EBB51746CA3} - IID_ICOMTestObject = &GUID{0x6485b1ef, 0xd780, 0x4834, [8]byte{0xa4, 0xfe, 0x1e, 0xbb, 0x51, 0x74, 0x6c, 0xa3}} + IID_ICOMEchoTestObject = &GUID{0x6485b1ef, 0xd780, 0x4834, [8]byte{0xa4, 0xfe, 0x1e, 0xbb, 0x51, 0x74, 0x6c, 0xa3}} // IID_ICOMTestTypes is for ICOMTestTypes interfaces. // // {CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0} IID_ICOMTestTypes = &GUID{0xcca8d7ae, 0x91c0, 0x4277, [8]byte{0xa8, 0xb3, 0xff, 0x4e, 0xdf, 0x28, 0xd3, 0xc0}} + // CLSID_COMEchoTestObject is for COMEchoTestObject class. + // + // {3C24506A-AE9E-4D50-9157-EF317281F1B0} + CLSID_COMEchoTestObject = &GUID{0x3c24506a, 0xae9e, 0x4d50, [8]byte{0x91, 0x57, 0xef, 0x31, 0x72, 0x81, 0xf1, 0xb0}} + // CLSID_COMTestScalarClass is for COMTestScalarClass class. // // {865B85C5-0334-4AC6-9EF6-AACEC8FC5E86} diff --git a/_third_party/github.com/go-ole/go-ole/idispatch_windows.go b/_third_party/github.com/go-ole/go-ole/idispatch_windows.go index 15893f8a12..d698b1e31f 100644 --- a/_third_party/github.com/go-ole/go-ole/idispatch_windows.go +++ b/_third_party/github.com/go-ole/go-ole/idispatch_windows.go @@ -138,6 +138,10 @@ func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{} safeByteArray := safeArrayFromByteSlice(v.([]byte)) vargs[n] = NewVariant(VT_ARRAY|VT_UI1, int64(uintptr(unsafe.Pointer(safeByteArray)))) defer VariantClear(&vargs[n]) + case []string: + safeByteArray := safeArrayFromStringSlice(v.([]string)) + vargs[n] = NewVariant(VT_ARRAY|VT_BSTR, int64(uintptr(unsafe.Pointer(safeByteArray)))) + defer VariantClear(&vargs[n]) default: panic("unknown type") } diff --git a/_third_party/github.com/go-ole/go-ole/iunknown.go b/_third_party/github.com/go-ole/go-ole/iunknown.go index 24f344cefe..26d996345d 100644 --- a/_third_party/github.com/go-ole/go-ole/iunknown.go +++ b/_third_party/github.com/go-ole/go-ole/iunknown.go @@ -22,6 +22,20 @@ func (v *IUnknown) VTable() *IUnknownVtbl { return (*IUnknownVtbl)(unsafe.Pointer(v.RawVTable)) } +func (v *IUnknown) PutQueryInterface(interfaceID *GUID, obj interface{}) error { + return reflectQueryInterface(v, v.VTable().QueryInterface, interfaceID, &obj) +} + +func (v *IUnknown) IDispatch(interfaceID *GUID) (dispatch *IDispatch, err error) { + err = v.PutQueryInterface(interfaceID, &dispatch) + return +} + +func (v *IUnknown) IEnumVARIANT(interfaceID *GUID) (enum *IEnumVARIANT, err error) { + err = v.PutQueryInterface(interfaceID, &enum) + return +} + func (v *IUnknown) QueryInterface(iid *GUID) (*IDispatch, error) { return queryInterface(v, iid) } diff --git a/_third_party/github.com/go-ole/go-ole/iunknown_func.go b/_third_party/github.com/go-ole/go-ole/iunknown_func.go index a1478d61af..d0a62cfd73 100644 --- a/_third_party/github.com/go-ole/go-ole/iunknown_func.go +++ b/_third_party/github.com/go-ole/go-ole/iunknown_func.go @@ -2,6 +2,10 @@ package ole +func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) { + return NewError(E_NOTIMPL) +} + func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) { return nil, NewError(E_NOTIMPL) } diff --git a/_third_party/github.com/go-ole/go-ole/iunknown_windows.go b/_third_party/github.com/go-ole/go-ole/iunknown_windows.go index f69fae263e..eea5042c7c 100644 --- a/_third_party/github.com/go-ole/go-ole/iunknown_windows.go +++ b/_third_party/github.com/go-ole/go-ole/iunknown_windows.go @@ -3,10 +3,24 @@ package ole import ( + "reflect" "syscall" "unsafe" ) +func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) { + hr, _, _ := syscall.Syscall( + method, + 3, + reflect.ValueOf(self).UnsafeAddr(), + uintptr(unsafe.Pointer(interfaceID)), + reflect.ValueOf(obj).UnsafeAddr()) + if hr != 0 { + err = NewError(hr) + } + return +} + func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) { hr, _, _ := syscall.Syscall( unk.VTable().QueryInterface, diff --git a/_third_party/github.com/go-ole/go-ole/ole.go b/_third_party/github.com/go-ole/go-ole/ole.go index 9e726689da..b92b4ea189 100644 --- a/_third_party/github.com/go-ole/go-ole/ole.go +++ b/_third_party/github.com/go-ole/go-ole/ole.go @@ -5,6 +5,7 @@ import ( "strings" ) +// DISPPARAMS are the arguments that passed to methods or property. type DISPPARAMS struct { rgvarg uintptr rgdispidNamedArgs uintptr diff --git a/_third_party/github.com/go-ole/go-ole/safearray_func.go b/_third_party/github.com/go-ole/go-ole/safearray_func.go index 61fe3aef32..c261a0078c 100644 --- a/_third_party/github.com/go-ole/go-ole/safearray_func.go +++ b/_third_party/github.com/go-ole/go-ole/safearray_func.go @@ -3,112 +3,205 @@ package ole // safeArrayAccessData returns raw array pointer. +// +// AKA: SafeArrayAccessData in Windows API. func safeArrayAccessData(safearray *SafeArray) (uintptr, error) { return uintptr(0), NewError(E_NOTIMPL) } +// safeArrayUnaccessData releases raw array. +// +// AKA: SafeArrayUnaccessData in Windows API. func safeArrayUnaccessData(safearray *SafeArray) error { return NewError(E_NOTIMPL) } +// safeArrayAllocData allocates SafeArray. +// +// AKA: SafeArrayAllocData in Windows API. func safeArrayAllocData(safearray *SafeArray) error { return NewError(E_NOTIMPL) } +// safeArrayAllocDescriptor allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptor in Windows API. func safeArrayAllocDescriptor(dimensions uint32) (*SafeArray, error) { return nil, NewError(E_NOTIMPL) } +// safeArrayAllocDescriptorEx allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptorEx in Windows API. func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (*SafeArray, error) { return nil, NewError(E_NOTIMPL) } +// safeArrayCopy returns copy of SafeArray. +// +// AKA: SafeArrayCopy in Windows API. func safeArrayCopy(original *SafeArray) (*SafeArray, error) { return nil, NewError(E_NOTIMPL) } +// safeArrayCopyData duplicates SafeArray into another SafeArray object. +// +// AKA: SafeArrayCopyData in Windows API. func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) error { return NewError(E_NOTIMPL) } +// safeArrayCreate creates SafeArray. +// +// AKA: SafeArrayCreate in Windows API. func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (*SafeArray, error) { return nil, NewError(E_NOTIMPL) } +// safeArrayCreateEx creates SafeArray. +// +// AKA: SafeArrayCreateEx in Windows API. func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (*SafeArray, error) { return nil, NewError(E_NOTIMPL) } +// safeArrayCreateVector creates SafeArray. +// +// AKA: SafeArrayCreateVector in Windows API. func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (*SafeArray, error) { return nil, NewError(E_NOTIMPL) } +// safeArrayCreateVectorEx creates SafeArray. +// +// AKA: SafeArrayCreateVectorEx in Windows API. func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (*SafeArray, error) { return nil, NewError(E_NOTIMPL) } +// safeArrayDestroy destroys SafeArray object. +// +// AKA: SafeArrayDestroy in Windows API. func safeArrayDestroy(safearray *SafeArray) error { return NewError(E_NOTIMPL) } +// safeArrayDestroyData destroys SafeArray object. +// +// AKA: SafeArrayDestroyData in Windows API. func safeArrayDestroyData(safearray *SafeArray) error { return NewError(E_NOTIMPL) } +// safeArrayDestroyDescriptor destroys SafeArray object. +// +// AKA: SafeArrayDestroyDescriptor in Windows API. func safeArrayDestroyDescriptor(safearray *SafeArray) error { return NewError(E_NOTIMPL) } +// safeArrayGetDim is the amount of dimensions in the SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetDim in Windows API. func safeArrayGetDim(safearray *SafeArray) (*uint32, error) { u := uint32(0) return &u, NewError(E_NOTIMPL) } +// safeArrayGetElementSize is the element size in bytes. +// +// AKA: SafeArrayGetElemsize in Windows API. func safeArrayGetElementSize(safearray *SafeArray) (*uint32, error) { u := uint32(0) return &u, NewError(E_NOTIMPL) } +// safeArrayGetElement retrieves element at given index. func safeArrayGetElement(safearray *SafeArray, index int64) (uintptr, error) { return uintptr(0), NewError(E_NOTIMPL) } +// safeArrayGetElement retrieves element at given index and converts to string. func safeArrayGetElementString(safearray *SafeArray, index int64) (string, error) { return "", NewError(E_NOTIMPL) } +// safeArrayGetIID is the InterfaceID of the elements in the SafeArray. +// +// AKA: SafeArrayGetIID in Windows API. func safeArrayGetIID(safearray *SafeArray) (*GUID, error) { return nil, NewError(E_NOTIMPL) } +// safeArrayGetLBound returns lower bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetLBound in Windows API. func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (int64, error) { return int64(0), NewError(E_NOTIMPL) } +// safeArrayGetUBound returns upper bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetUBound in Windows API. func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (int64, error) { return int64(0), NewError(E_NOTIMPL) } +// safeArrayGetVartype returns data type of SafeArray. +// +// AKA: SafeArrayGetVartype in Windows API. func safeArrayGetVartype(safearray *SafeArray) (uint16, error) { return uint16(0), NewError(E_NOTIMPL) } +// safeArrayLock locks SafeArray for reading to modify SafeArray. +// +// This must be called during some calls to ensure that another process does not +// read or write to the SafeArray during editing. +// +// AKA: SafeArrayLock in Windows API. func safeArrayLock(safearray *SafeArray) error { return NewError(E_NOTIMPL) } +// safeArrayUnlock unlocks SafeArray for reading. +// +// AKA: SafeArrayUnlock in Windows API. func safeArrayUnlock(safearray *SafeArray) error { return NewError(E_NOTIMPL) } +// safeArrayPutElement stores the data element at the specified location in the +// array. +// +// AKA: SafeArrayPutElement in Windows API. func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) error { return NewError(E_NOTIMPL) } +// safeArrayGetRecordInfo accesses IRecordInfo info for custom types. +// +// AKA: SafeArrayGetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. func safeArrayGetRecordInfo(safearray *SafeArray) (interface{}, error) { return nil, NewError(E_NOTIMPL) } +// safeArraySetRecordInfo mutates IRecordInfo info for custom types. +// +// AKA: SafeArraySetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) error { return NewError(E_NOTIMPL) } diff --git a/_third_party/github.com/go-ole/go-ole/safearray_test.go b/_third_party/github.com/go-ole/go-ole/safearray_test.go index 7b551418d6..31409cec06 100644 --- a/_third_party/github.com/go-ole/go-ole/safearray_test.go +++ b/_third_party/github.com/go-ole/go-ole/safearray_test.go @@ -1,7 +1,7 @@ package ole -// This tests more than one function. It tests all of the functions needed in order to retrieve an -// SafeArray populated with Strings. +// This tests more than one function. It tests all of the functions needed in +// order to retrieve an SafeArray populated with Strings. func Example_safeArrayGetElementString() { CoInitialize(0) defer CoUninitialize() diff --git a/_third_party/github.com/go-ole/go-ole/safearray_windows.go b/_third_party/github.com/go-ole/go-ole/safearray_windows.go index 17233e3153..456b8c9b94 100644 --- a/_third_party/github.com/go-ole/go-ole/safearray_windows.go +++ b/_third_party/github.com/go-ole/go-ole/safearray_windows.go @@ -34,11 +34,13 @@ var ( procSafeArrayPutElement, _ = modoleaut32.FindProc("SafeArrayPutElement") //procSafeArrayRedim, _ = modoleaut32.FindProc("SafeArrayRedim") // TODO //procSafeArraySetIID, _ = modoleaut32.FindProc("SafeArraySetIID") // TODO - //procSafeArrayGetRecordInfo, _ = modoleaut32.FindProc("SafeArrayGetRecordInfo") // TODO - //procSafeArraySetRecordInfo, _ = modoleaut32.FindProc("SafeArraySetRecordInfo") // TODO + procSafeArrayGetRecordInfo, _ = modoleaut32.FindProc("SafeArrayGetRecordInfo") + procSafeArraySetRecordInfo, _ = modoleaut32.FindProc("SafeArraySetRecordInfo") ) -// Returns Raw Array +// safeArrayAccessData returns raw array pointer. +// +// AKA: SafeArrayAccessData in Windows API. // Todo: Test func safeArrayAccessData(safearray *SafeArray) (element uintptr, err error) { err = convertHresultToError( @@ -48,22 +50,34 @@ func safeArrayAccessData(safearray *SafeArray) (element uintptr, err error) { return } +// safeArrayUnaccessData releases raw array. +// +// AKA: SafeArrayUnaccessData in Windows API. func safeArrayUnaccessData(safearray *SafeArray) (err error) { err = convertHresultToError(procSafeArrayUnaccessData.Call(uintptr(unsafe.Pointer(safearray)))) return } +// safeArrayAllocData allocates SafeArray. +// +// AKA: SafeArrayAllocData in Windows API. func safeArrayAllocData(safearray *SafeArray) (err error) { err = convertHresultToError(procSafeArrayAllocData.Call(uintptr(unsafe.Pointer(safearray)))) return } +// safeArrayAllocDescriptor allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptor in Windows API. func safeArrayAllocDescriptor(dimensions uint32) (safearray *SafeArray, err error) { err = convertHresultToError( procSafeArrayAllocDescriptor.Call(uintptr(dimensions), uintptr(unsafe.Pointer(&safearray)))) return } +// safeArrayAllocDescriptorEx allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptorEx in Windows API. func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (safearray *SafeArray, err error) { err = convertHresultToError( procSafeArrayAllocDescriptorEx.Call( @@ -73,6 +87,9 @@ func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (safearray *S return } +// safeArrayCopy returns copy of SafeArray. +// +// AKA: SafeArrayCopy in Windows API. func safeArrayCopy(original *SafeArray) (safearray *SafeArray, err error) { err = convertHresultToError( procSafeArrayCopy.Call( @@ -81,6 +98,9 @@ func safeArrayCopy(original *SafeArray) (safearray *SafeArray, err error) { return } +// safeArrayCopyData duplicates SafeArray into another SafeArray object. +// +// AKA: SafeArrayCopyData in Windows API. func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) (err error) { err = convertHresultToError( procSafeArrayCopyData.Call( @@ -89,6 +109,9 @@ func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) (err error) { return } +// safeArrayCreate creates SafeArray. +// +// AKA: SafeArrayCreate in Windows API. func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (safearray *SafeArray, err error) { sa, _, err := procSafeArrayCreate.Call( uintptr(variantType), @@ -98,6 +121,9 @@ func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) return } +// safeArrayCreateEx creates SafeArray. +// +// AKA: SafeArrayCreateEx in Windows API. func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (safearray *SafeArray, err error) { sa, _, err := procSafeArrayCreateEx.Call( uintptr(variantType), @@ -108,6 +134,9 @@ func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound return } +// safeArrayCreateVector creates SafeArray. +// +// AKA: SafeArrayCreateVector in Windows API. func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (safearray *SafeArray, err error) { sa, _, err := procSafeArrayCreateVector.Call( uintptr(variantType), @@ -117,6 +146,9 @@ func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (saf return } +// safeArrayCreateVectorEx creates SafeArray. +// +// AKA: SafeArrayCreateVectorEx in Windows API. func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (safearray *SafeArray, err error) { sa, _, err := procSafeArrayCreateVectorEx.Call( uintptr(variantType), @@ -127,33 +159,52 @@ func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, ex return } +// safeArrayDestroy destroys SafeArray object. +// +// AKA: SafeArrayDestroy in Windows API. func safeArrayDestroy(safearray *SafeArray) (err error) { err = convertHresultToError(procSafeArrayDestroy.Call(uintptr(unsafe.Pointer(safearray)))) return } +// safeArrayDestroyData destroys SafeArray object. +// +// AKA: SafeArrayDestroyData in Windows API. func safeArrayDestroyData(safearray *SafeArray) (err error) { err = convertHresultToError(procSafeArrayDestroyData.Call(uintptr(unsafe.Pointer(safearray)))) return } +// safeArrayDestroyDescriptor destroys SafeArray object. +// +// AKA: SafeArrayDestroyDescriptor in Windows API. func safeArrayDestroyDescriptor(safearray *SafeArray) (err error) { err = convertHresultToError(procSafeArrayDestroyDescriptor.Call(uintptr(unsafe.Pointer(safearray)))) return } +// safeArrayGetDim is the amount of dimensions in the SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetDim in Windows API. func safeArrayGetDim(safearray *SafeArray) (dimensions *uint32, err error) { l, _, err := procSafeArrayGetDim.Call(uintptr(unsafe.Pointer(safearray))) dimensions = (*uint32)(unsafe.Pointer(l)) return } +// safeArrayGetElementSize is the element size in bytes. +// +// AKA: SafeArrayGetElemsize in Windows API. func safeArrayGetElementSize(safearray *SafeArray) (length *uint32, err error) { l, _, err := procSafeArrayGetElemsize.Call(uintptr(unsafe.Pointer(safearray))) length = (*uint32)(unsafe.Pointer(l)) return } +// safeArrayGetElement retrieves element at given index. func safeArrayGetElement(safearray *SafeArray, index int64) (element uintptr, err error) { err = convertHresultToError( procSafeArrayGetElement.Call( @@ -163,6 +214,7 @@ func safeArrayGetElement(safearray *SafeArray, index int64) (element uintptr, er return } +// safeArrayGetElement retrieves element at given index and converts to string. func safeArrayGetElementString(safearray *SafeArray, index int64) (str string, err error) { var element *int16 err = convertHresultToError( @@ -175,6 +227,9 @@ func safeArrayGetElementString(safearray *SafeArray, index int64) (str string, e return } +// safeArrayGetIID is the InterfaceID of the elements in the SafeArray. +// +// AKA: SafeArrayGetIID in Windows API. func safeArrayGetIID(safearray *SafeArray) (guid *GUID, err error) { err = convertHresultToError( procSafeArrayGetIID.Call( @@ -183,6 +238,12 @@ func safeArrayGetIID(safearray *SafeArray) (guid *GUID, err error) { return } +// safeArrayGetLBound returns lower bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetLBound in Windows API. func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (lowerBound int64, err error) { err = convertHresultToError( procSafeArrayGetLBound.Call( @@ -192,6 +253,12 @@ func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (lowerBound int6 return } +// safeArrayGetUBound returns upper bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetUBound in Windows API. func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (upperBound int64, err error) { err = convertHresultToError( procSafeArrayGetUBound.Call( @@ -201,6 +268,9 @@ func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (upperBound int6 return } +// safeArrayGetVartype returns data type of SafeArray. +// +// AKA: SafeArrayGetVartype in Windows API. func safeArrayGetVartype(safearray *SafeArray) (varType uint16, err error) { err = convertHresultToError( procSafeArrayGetVartype.Call( @@ -209,16 +279,29 @@ func safeArrayGetVartype(safearray *SafeArray) (varType uint16, err error) { return } +// safeArrayLock locks SafeArray for reading to modify SafeArray. +// +// This must be called during some calls to ensure that another process does not +// read or write to the SafeArray during editing. +// +// AKA: SafeArrayLock in Windows API. func safeArrayLock(safearray *SafeArray) (err error) { err = convertHresultToError(procSafeArrayLock.Call(uintptr(unsafe.Pointer(safearray)))) return } +// safeArrayUnlock unlocks SafeArray for reading. +// +// AKA: SafeArrayUnlock in Windows API. func safeArrayUnlock(safearray *SafeArray) (err error) { err = convertHresultToError(procSafeArrayUnlock.Call(uintptr(unsafe.Pointer(safearray)))) return } +// safeArrayPutElement stores the data element at the specified location in the +// array. +// +// AKA: SafeArrayPutElement in Windows API. func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) (err error) { err = convertHresultToError( procSafeArrayPutElement.Call( @@ -228,8 +311,11 @@ func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) (er return } -/* -// TODO: Must implement IRecordInfo interface for this to return. +// safeArrayGetRecordInfo accesses IRecordInfo info for custom types. +// +// AKA: SafeArrayGetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. func safeArrayGetRecordInfo(safearray *SafeArray) (recordInfo interface{}, err error) { err = convertHresultToError( procSafeArrayGetRecordInfo.Call( @@ -238,12 +324,15 @@ func safeArrayGetRecordInfo(safearray *SafeArray) (recordInfo interface{}, err e return } -// TODO: Must implement IRecordInfo interface for this to work. +// safeArraySetRecordInfo mutates IRecordInfo info for custom types. +// +// AKA: SafeArraySetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) (err error) { err = convertHresultToError( procSafeArraySetRecordInfo.Call( uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(recordInfo)))) + uintptr(unsafe.Pointer(&recordInfo)))) return } -*/ diff --git a/_third_party/github.com/go-ole/go-ole/safearrayslices.go b/_third_party/github.com/go-ole/go-ole/safearrayslices.go index 5e24d87b5e..a9fa885f1d 100644 --- a/_third_party/github.com/go-ole/go-ole/safearrayslices.go +++ b/_third_party/github.com/go-ole/go-ole/safearrayslices.go @@ -2,7 +2,9 @@ package ole -import "unsafe" +import ( + "unsafe" +) func safeArrayFromByteSlice(slice []byte) *SafeArray { array, _ := safeArrayCreateVector(VT_UI1, 0, uint32(len(slice))) @@ -16,3 +18,16 @@ func safeArrayFromByteSlice(slice []byte) *SafeArray { } return array } + +func safeArrayFromStringSlice(slice []string) *SafeArray { + array, _ := safeArrayCreateVector(VT_BSTR, 0, uint32(len(slice))) + + if array == nil { + panic("Could not convert []string to SAFEARRAY") + } + // SysAllocStringLen(s) + for i, v := range slice { + safeArrayPutElement(array, int64(i), uintptr(unsafe.Pointer(SysAllocStringLen(v)))) + } + return array +} diff --git a/_third_party/github.com/go-ole/go-ole/variant.go b/_third_party/github.com/go-ole/go-ole/variant.go index 9d7f722dd7..62b47fb772 100644 --- a/_third_party/github.com/go-ole/go-ole/variant.go +++ b/_third_party/github.com/go-ole/go-ole/variant.go @@ -82,8 +82,14 @@ func (v *VARIANT) Value() interface{} { return float64(v.Val) case VT_BSTR: return v.ToString() - //case VT_DATE: - // return v.ToIDispatch() // TODO: use VariantTimeToSystemTime + case VT_DATE: + // VT_DATE type will either return float64 or time.Time. + d := float64(v.Val) + date, err := GetVariantDate(d) + if err != nil { + return d + } + return date case VT_UNKNOWN: return v.ToIUnknown() case VT_DISPATCH: diff --git a/_third_party/github.com/go-ole/go-ole/winrt_doc.go b/_third_party/github.com/go-ole/go-ole/winrt_doc.go index e59c77613b..52e6d74c9a 100644 --- a/_third_party/github.com/go-ole/go-ole/winrt_doc.go +++ b/_third_party/github.com/go-ole/go-ole/winrt_doc.go @@ -2,14 +2,17 @@ package ole +// RoInitialize func RoInitialize(thread_type uint32) (err error) { return NewError(E_NOTIMPL) } +// RoActivateInstance func RoActivateInstance(clsid string) (ins *IInspectable, err error) { return nil, NewError(E_NOTIMPL) } +// RoGetActivationFactory func RoGetActivationFactory(clsid string, iid *GUID) (ins *IInspectable, err error) { return nil, NewError(E_NOTIMPL) } diff --git a/_third_party/github.com/gogo/protobuf/proto/all_test.go b/_third_party/github.com/gogo/protobuf/proto/all_test.go index 0e966ecac8..20fc4cd231 100644 --- a/_third_party/github.com/gogo/protobuf/proto/all_test.go +++ b/_third_party/github.com/gogo/protobuf/proto/all_test.go @@ -1958,6 +1958,58 @@ func TestMapFieldWithNil(t *testing.T) { } } +func TestOneof(t *testing.T) { + m := &Communique{} + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal of empty message with oneof: %v", err) + } + if len(b) != 0 { + t.Errorf("Marshal of empty message yielded too many bytes: %v", b) + } + + m = &Communique{ + Union: &Communique_Name{"Barry"}, + } + + // Round-trip. + b, err = Marshal(m) + if err != nil { + t.Fatalf("Marshal of message with oneof: %v", err) + } + if len(b) != 7 { // name tag/wire (1) + name len (1) + name (5) + t.Errorf("Incorrect marshal of message with oneof: %v", b) + } + m.Reset() + if err := Unmarshal(b, m); err != nil { + t.Fatalf("Unmarshal of message with oneof: %v", err) + } + if x, ok := m.Union.(*Communique_Name); !ok || x.Name != "Barry" { + t.Errorf("After round trip, Union = %+v", m.Union) + } + if name := m.GetName(); name != "Barry" { + t.Errorf("After round trip, GetName = %q, want %q", name, "Barry") + } + + // Let's try with a message in the oneof. + m.Union = &Communique_Msg{&Strings{StringField: String("deep deep string")}} + b, err = Marshal(m) + if err != nil { + t.Fatalf("Marshal of message with oneof set to message: %v", err) + } + if len(b) != 20 { // msg tag/wire (1) + msg len (1) + msg (1 + 1 + 16) + t.Errorf("Incorrect marshal of message with oneof set to message: %v", b) + } + m.Reset() + if err := Unmarshal(b, m); err != nil { + t.Fatalf("Unmarshal of message with oneof set to message: %v", err) + } + ss, ok := m.Union.(*Communique_Msg) + if !ok || ss.Msg.GetStringField() != "deep deep string" { + t.Errorf("After round trip with oneof set to message, Union = %+v", m.Union) + } +} + // Benchmarks func testMsg() *GoTest { diff --git a/_third_party/github.com/gogo/protobuf/proto/clone.go b/_third_party/github.com/gogo/protobuf/proto/clone.go index 57297947be..4ff1ff59e7 100644 --- a/_third_party/github.com/gogo/protobuf/proto/clone.go +++ b/_third_party/github.com/gogo/protobuf/proto/clone.go @@ -125,6 +125,17 @@ func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { return } out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) case reflect.Map: if in.Len() == 0 { return diff --git a/_third_party/github.com/gogo/protobuf/proto/clone_test.go b/_third_party/github.com/gogo/protobuf/proto/clone_test.go index 668206b75f..0ca281c3ae 100644 --- a/_third_party/github.com/gogo/protobuf/proto/clone_test.go +++ b/_third_party/github.com/gogo/protobuf/proto/clone_test.go @@ -232,6 +232,28 @@ var mergeTests = []struct { Data: []byte("texas!"), }, }, + // Oneof fields should merge by assignment. + { + src: &pb.Communique{ + Union: &pb.Communique_Number{Number: 41}, + }, + dst: &pb.Communique{ + Union: &pb.Communique_Name{Name: "Bobby Tables"}, + }, + want: &pb.Communique{ + Union: &pb.Communique_Number{Number: 41}, + }, + }, + // Oneof nil is the same as not set. + { + src: &pb.Communique{}, + dst: &pb.Communique{ + Union: &pb.Communique_Name{Name: "Bobby Tables"}, + }, + want: &pb.Communique{ + Union: &pb.Communique_Name{Name: "Bobby Tables"}, + }, + }, } func TestMerge(t *testing.T) { diff --git a/_third_party/github.com/gogo/protobuf/proto/decode.go b/_third_party/github.com/gogo/protobuf/proto/decode.go index f7b1884b3c..34258942ae 100644 --- a/_third_party/github.com/gogo/protobuf/proto/decode.go +++ b/_third_party/github.com/gogo/protobuf/proto/decode.go @@ -46,6 +46,10 @@ import ( // errOverflow is returned when an integer is too large to be represented. var errOverflow = errors.New("proto: integer overflow") +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + // The fundamental decoders that interpret bytes on the wire. // Those that take integer types all return uint64 and are // therefore of type valueDecoder. @@ -314,6 +318,24 @@ func UnmarshalMerge(buf []byte, pb Message) error { return NewBuffer(buf).Unmarshal(pb) } +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +func (p *Buffer) DecodeGroup(pb Message) error { + typ, base, err := getbase(pb) + if err != nil { + return err + } + return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) +} + // Unmarshal parses the protocol buffer representation in the // Buffer and places the decoded result in pb. If the struct // underlying pb does not match the data in the buffer, the results can be @@ -370,11 +392,11 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group if prop.extendable { if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { if err = o.skip(st, tag, wire); err == nil { - if ee, ok := e.(extensionsMap); ok { + if ee, eok := e.(extensionsMap); eok { ext := ee.ExtensionMap()[int32(tag)] // may be missing ext.enc = append(ext.enc, o.buf[oi:o.index]...) ee.ExtensionMap()[int32(tag)] = ext - } else if ee, ok := e.(extensionsBytes); ok { + } else if ee, eok := e.(extensionsBytes); eok { ext := ee.GetExtensions() *ext = append(*ext, o.buf[oi:o.index]...) } @@ -382,6 +404,20 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group continue } } + // Maybe it's a oneof? + if prop.oneofUnmarshaler != nil { + m := structPointer_Interface(base, st).(Message) + // First return value indicates whether tag is a oneof field. + ok, err = prop.oneofUnmarshaler(m, tag, wire, o) + if err == ErrInternalBadWireType { + // Map the error to something more descriptive. + // Do the formatting here to save generated code space. + err = fmt.Errorf("bad wiretype for oneof field in %T", m) + } + if ok { + continue + } + } err = o.skipAndSave(st, tag, wire, base, prop.unrecField) continue } diff --git a/_third_party/github.com/gogo/protobuf/proto/encode.go b/_third_party/github.com/gogo/protobuf/proto/encode.go index 91f3f0784d..89d0caa826 100644 --- a/_third_party/github.com/gogo/protobuf/proto/encode.go +++ b/_third_party/github.com/gogo/protobuf/proto/encode.go @@ -228,6 +228,20 @@ func Marshal(pb Message) ([]byte, error) { return p.buf, err } +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + var state errorState + err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) + } + return err +} + // Marshal takes the protocol buffer // and encodes it into the wire format, writing the result to the // Buffer. @@ -318,7 +332,7 @@ func size_bool(p *Properties, base structPointer) int { func size_proto3_bool(p *Properties, base structPointer) int { v := *structPointer_BoolVal(base, p.field) - if !v { + if !v && !p.oneof { return 0 } return len(p.tagcode) + 1 // each bool takes exactly one byte @@ -361,7 +375,7 @@ func size_int32(p *Properties, base structPointer) (n int) { func size_proto3_int32(p *Properties, base structPointer) (n int) { v := structPointer_Word32Val(base, p.field) x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { + if x == 0 && !p.oneof { return 0 } n += len(p.tagcode) @@ -407,7 +421,7 @@ func size_uint32(p *Properties, base structPointer) (n int) { func size_proto3_uint32(p *Properties, base structPointer) (n int) { v := structPointer_Word32Val(base, p.field) x := word32Val_Get(v) - if x == 0 { + if x == 0 && !p.oneof { return 0 } n += len(p.tagcode) @@ -452,7 +466,7 @@ func size_int64(p *Properties, base structPointer) (n int) { func size_proto3_int64(p *Properties, base structPointer) (n int) { v := structPointer_Word64Val(base, p.field) x := word64Val_Get(v) - if x == 0 { + if x == 0 && !p.oneof { return 0 } n += len(p.tagcode) @@ -495,7 +509,7 @@ func size_string(p *Properties, base structPointer) (n int) { func size_proto3_string(p *Properties, base structPointer) (n int) { v := *structPointer_StringVal(base, p.field) - if v == "" { + if v == "" && !p.oneof { return 0 } n += len(p.tagcode) @@ -667,7 +681,7 @@ func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error func size_slice_byte(p *Properties, base structPointer) (n int) { s := *structPointer_Bytes(base, p.field) - if s == nil { + if s == nil && !p.oneof { return 0 } n += len(p.tagcode) @@ -677,7 +691,7 @@ func size_slice_byte(p *Properties, base structPointer) (n int) { func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { + if len(s) == 0 && !p.oneof { return 0 } n += len(p.tagcode) @@ -1201,6 +1215,14 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { } } + // Do oneof fields. + if prop.oneofMarshaler != nil { + m := structPointer_Interface(base, prop.stype).(Message) + if err := prop.oneofMarshaler(m, o); err != nil { + return err + } + } + // Add unrecognized fields at the end. if prop.unrecField.IsValid() { v := *structPointer_Bytes(base, prop.unrecField) @@ -1226,6 +1248,27 @@ func size_struct(prop *StructProperties, base structPointer) (n int) { n += len(v) } + // Factor in any oneof fields. + // TODO: This could be faster and use less reflection. + if prop.oneofMarshaler != nil { + sv := reflect.ValueOf(structPointer_Interface(base, prop.stype)).Elem() + for i := 0; i < prop.stype.NumField(); i++ { + fv := sv.Field(i) + if fv.Kind() != reflect.Interface || fv.IsNil() { + continue + } + if prop.stype.Field(i).Tag.Get("protobuf_oneof") == "" { + continue + } + spv := fv.Elem() // interface -> *T + sv := spv.Elem() // *T -> T + sf := sv.Type().Field(0) // StructField inside T + var prop Properties + prop.Init(sf.Type, "whatever", sf.Tag.Get("protobuf"), &sf) + n += prop.size(&prop, toStructPointer(spv)) + } + } + return } diff --git a/_third_party/github.com/gogo/protobuf/proto/equal.go b/_third_party/github.com/gogo/protobuf/proto/equal.go index d8673a3e97..5475c3d959 100644 --- a/_third_party/github.com/gogo/protobuf/proto/equal.go +++ b/_third_party/github.com/gogo/protobuf/proto/equal.go @@ -154,6 +154,17 @@ func equalAny(v1, v2 reflect.Value) bool { return v1.Float() == v2.Float() case reflect.Int32, reflect.Int64: return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2) case reflect.Map: if v1.Len() != v2.Len() { return false diff --git a/_third_party/github.com/gogo/protobuf/proto/equal_test.go b/_third_party/github.com/gogo/protobuf/proto/equal_test.go index aa9a3dbe42..05619e3a76 100644 --- a/_third_party/github.com/gogo/protobuf/proto/equal_test.go +++ b/_third_party/github.com/gogo/protobuf/proto/equal_test.go @@ -180,6 +180,24 @@ var EqualTests = []struct { &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}}, false, }, + { + "oneof same", + &pb.Communique{Union: &pb.Communique_Number{Number: 41}}, + &pb.Communique{Union: &pb.Communique_Number{Number: 41}}, + true, + }, + { + "oneof one nil", + &pb.Communique{Union: &pb.Communique_Number{Number: 41}}, + &pb.Communique{}, + false, + }, + { + "oneof different", + &pb.Communique{Union: &pb.Communique_Number{Number: 41}}, + &pb.Communique{Union: &pb.Communique_Name{Name: "Bobby Tables"}}, + false, + }, } func TestEqual(t *testing.T) { diff --git a/_third_party/github.com/gogo/protobuf/proto/lib.go b/_third_party/github.com/gogo/protobuf/proto/lib.go index d36f9ad129..b964734c77 100644 --- a/_third_party/github.com/gogo/protobuf/proto/lib.go +++ b/_third_party/github.com/gogo/protobuf/proto/lib.go @@ -66,6 +66,8 @@ for a protocol buffer variable v: that contain it (if any) followed by the CamelCased name of the extension field itself. HasExtension, ClearExtension, GetExtension and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. - Marshal and Unmarshal are functions to encode and decode the wire format. The simplest way to describe this is to see an example. @@ -82,6 +84,10 @@ Given file test.proto, containing optional group OptionalGroup = 4 { required string RequiredField = 5; } + oneof union { + int32 number = 6; + string name = 7; + } } The resulting file, test.pb.go, is: @@ -120,15 +126,40 @@ The resulting file, test.pb.go, is: } type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` } func (m *Test) Reset() { *m = Test{} } func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } const Default_Test_Type int32 = 77 func (m *Test) GetLabel() string { @@ -165,6 +196,20 @@ The resulting file, test.pb.go, is: return "" } + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + func init() { proto.RegisterEnum("example.FOO", FOO_name, FOO_value) } @@ -187,6 +232,7 @@ package main Optionalgroup: &pb.Test_OptionalGroup{ RequiredField: proto.String("good bye"), }, + Union: &pb.Test_Name{"fred"}, } data, err := proto.Marshal(test) if err != nil { @@ -201,6 +247,11 @@ package main if test.GetLabel() != newTest.GetLabel() { log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } // etc. } */ @@ -460,7 +511,6 @@ out: break out } fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - break case WireVarint: u, err = p.DecodeVarint() @@ -471,19 +521,11 @@ out: fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) case WireStartGroup: - if err != nil { - fmt.Printf("%3d: t=%3d start err %v\n", index, tag, err) - break out - } fmt.Printf("%3d: t=%3d start\n", index, tag) depth++ case WireEndGroup: depth-- - if err != nil { - fmt.Printf("%3d: t=%3d end err %v\n", index, tag, err) - break out - } fmt.Printf("%3d: t=%3d end\n", index, tag) } } diff --git a/_third_party/github.com/gogo/protobuf/proto/properties.go b/_third_party/github.com/gogo/protobuf/proto/properties.go index 13245c00df..1bb17a26ab 100644 --- a/_third_party/github.com/gogo/protobuf/proto/properties.go +++ b/_third_party/github.com/gogo/protobuf/proto/properties.go @@ -89,6 +89,12 @@ type decoder func(p *Buffer, prop *Properties, base structPointer) error // A valueDecoder decodes a single integer in a particular encoding. type valueDecoder func(o *Buffer) (x uint64, err error) +// A oneofMarshaler does the marshaling for all oneof fields in a message. +type oneofMarshaler func(Message, *Buffer) error + +// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. +type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) + // tagMap is an optimization over map[int]int for typical protocol buffer // use-cases. Encoded protocol buffers are often in tag order with small tag // numbers. @@ -137,6 +143,21 @@ type StructProperties struct { order []int // list of struct field numbers in tag order unrecField field // field id of the XXX_unrecognized []byte field extendable bool // is this an extendable proto + + oneofMarshaler oneofMarshaler + oneofUnmarshaler oneofUnmarshaler + stype reflect.Type + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties } // Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. @@ -161,6 +182,7 @@ type Properties struct { Packed bool // relevant for repeated primitives only Enum string // set for enum types only proto3 bool // whether this is known to be a proto3 field; set for []byte only + oneof bool // whether this is a oneof field Default string // default value HasDefault bool // whether an explicit default was provided @@ -216,6 +238,9 @@ func (p *Properties) String() string { if p.proto3 { s += ",proto3" } + if p.oneof { + s += ",oneof" + } if len(p.Enum) > 0 { s += ",enum=" + p.Enum } @@ -292,6 +317,8 @@ func (p *Properties) Parse(s string) { p.Enum = f[5:] case f == "proto3": p.proto3 = true + case f == "oneof": + p.oneof = true case strings.HasPrefix(f, "def="): p.HasDefault = true p.Default = f[4:] // rest of string @@ -733,6 +760,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { if f.Name == "XXX_unrecognized" { // special case prop.unrecField = toField(&f) } + oneof := f.Tag.Get("protobuf_oneof") != "" // special case prop.Prop[i] = p prop.order[i] = i if debug { @@ -742,7 +770,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } print("\n") } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") { + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof { fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") } } @@ -750,6 +778,41 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { // Re-order prop.order. sort.Sort(prop) + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + var oots []interface{} + prop.oneofMarshaler, prop.oneofUnmarshaler, oots = om.XXX_OneofFuncs() + prop.stype = t + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + // build required counts // build tags reqCount := 0 diff --git a/_third_party/github.com/gogo/protobuf/proto/size_test.go b/_third_party/github.com/gogo/protobuf/proto/size_test.go index c3ad3163e8..a5ed9ad868 100644 --- a/_third_party/github.com/gogo/protobuf/proto/size_test.go +++ b/_third_party/github.com/gogo/protobuf/proto/size_test.go @@ -124,6 +124,11 @@ var SizeTests = []struct { {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}}, {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}}, {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}}, + + {"oneof not set", &pb.Communique{}}, + {"oneof zero int32", &pb.Communique{Union: &pb.Communique_Number{Number: 0}}}, + {"oneof int32", &pb.Communique{Union: &pb.Communique_Number{Number: 3}}}, + {"oneof string", &pb.Communique{Union: &pb.Communique_Name{Name: "Rhythmic Fman"}}}, } func TestSize(t *testing.T) { diff --git a/_third_party/github.com/gogo/protobuf/proto/text.go b/_third_party/github.com/gogo/protobuf/proto/text.go index 3652424418..c12caecafb 100644 --- a/_third_party/github.com/gogo/protobuf/proto/text.go +++ b/_third_party/github.com/gogo/protobuf/proto/text.go @@ -42,6 +42,7 @@ import ( "bufio" "bytes" "encoding" + "errors" "fmt" "io" "log" @@ -331,6 +332,32 @@ func writeStruct(w *textWriter, sv reflect.Value) error { } } + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props.Parse(tag) // Overwrite the outer props. + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + if err := writeName(w, props); err != nil { return err } @@ -396,15 +423,17 @@ func writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) if props != nil && len(props.CustomType) > 0 { - var custom Marshaler = v.Interface().(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - if err := writeString(w, string(data)); err != nil { - return err + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil } - return nil } // Floats have special cases. @@ -533,8 +562,8 @@ func writeMessageSet(w *textWriter, ms *MessageSet) error { pb := reflect.New(msd.t.Elem()) if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil { - if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil { - return err + if _, ferr := fmt.Fprintf(w, "/* bad message: %v */\n", err); ferr != nil { + return ferr } } else { if err := writeStruct(w, pb.Elem()); err != nil { @@ -569,19 +598,19 @@ func writeUnknownStruct(w *textWriter, data []byte) error { for b.index < len(b.buf) { x, err := b.DecodeVarint() if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err + _, ferr := fmt.Fprintf(w, "/* %v */\n", err) + return ferr } wire, tag := x&7, x>>3 if wire == WireEndGroup { w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err + if _, werr := w.Write(endBraceNewline); werr != nil { + return werr } continue } - if _, err := fmt.Fprint(w, tag); err != nil { - return err + if _, ferr := fmt.Fprint(w, tag); ferr != nil { + return ferr } if wire != WireStartGroup { if err := w.WriteByte(':'); err != nil { diff --git a/_third_party/github.com/gogo/protobuf/proto/text_parser.go b/_third_party/github.com/gogo/protobuf/proto/text_parser.go index 9b2fab5935..acc001fd27 100644 --- a/_third_party/github.com/gogo/protobuf/proto/text_parser.go +++ b/_third_party/github.com/gogo/protobuf/proto/text_parser.go @@ -390,8 +390,7 @@ func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSet } // Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(st reflect.Type, name string) (int, *Properties, bool) { - sprops := GetProperties(st) +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { i, ok := sprops.decoderOrigNames[name] if ok { return i, sprops.Prop[i], true @@ -443,7 +442,8 @@ func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseEr func (p *textParser) readStruct(sv reflect.Value, terminator string) error { st := sv.Type() - reqCount := GetProperties(st).reqCount + sprops := GetProperties(st) + reqCount := sprops.reqCount var reqFieldErr error fieldSet := make(map[string]bool) // A struct is a sequence of "name: value", terminated by one of @@ -525,95 +525,107 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { sl = reflect.Append(sl, ext) SetExtension(ep, desc, sl.Interface()) } - } else { - // This is a normal, non-extension field. - name := tok.value - fi, props, ok := structFieldByName(st, name) - if !ok { - return p.errorf("unknown field name %q in %v", name, st) + if err := p.consumeOptionalSeparator(); err != nil { + return err } + continue + } - dst := sv.Field(fi) - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // Technically the "key" and "value" could come in any order, - // but in practice they won't. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - if err := p.consumeToken("key"); err != nil { - return err - } - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.mkeyprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - if err := p.consumeToken("value"); err != nil { - return err - } - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.mvalprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - if err := p.consumeToken(terminator); err != nil { - return err - } + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + sv.Field(oop.Field).Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } - dst.SetMapIndex(key, val) - continue + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err } - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) } - - if err := p.checkForColon(props, st.Field(fi).Type); err != nil { + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // Technically the "key" and "value" could come in any order, + // but in practice they won't. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + if err := p.consumeToken("key"); err != nil { + return err + } + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken("value"); err != nil { + return err + } + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken(terminator); err != nil { return err } - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } else if props.Required { - reqCount-- + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err } + reqFieldErr = err + } else if props.Required { + reqCount-- } if err := p.consumeOptionalSeparator(); err != nil { diff --git a/_third_party/github.com/gogo/protobuf/proto/text_parser_test.go b/_third_party/github.com/gogo/protobuf/proto/text_parser_test.go index c08e01e6b2..f32ee09b9e 100644 --- a/_third_party/github.com/gogo/protobuf/proto/text_parser_test.go +++ b/_third_party/github.com/gogo/protobuf/proto/text_parser_test.go @@ -486,6 +486,18 @@ func TestMapParsing(t *testing.T) { } } +func TestOneofParsing(t *testing.T) { + const in = `name:"Shrek"` + m := new(Communique) + want := &Communique{Union: &Communique_Name{"Shrek"}} + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } +} + var benchInput string func init() { diff --git a/_third_party/github.com/gogo/protobuf/proto/text_test.go b/_third_party/github.com/gogo/protobuf/proto/text_test.go index 35f9ccc2c7..f3e81c1ebd 100644 --- a/_third_party/github.com/gogo/protobuf/proto/text_test.go +++ b/_third_party/github.com/gogo/protobuf/proto/text_test.go @@ -208,6 +208,30 @@ func TestMarshalTextUnknownEnum(t *testing.T) { } } +func TestTextOneof(t *testing.T) { + tests := []struct { + m proto.Message + want string + }{ + // zero message + {&pb.Communique{}, ``}, + // scalar field + {&pb.Communique{Union: &pb.Communique_Number{Number: 4}}, `number:4`}, + // message field + {&pb.Communique{Union: &pb.Communique_Msg{ + Msg: &pb.Strings{StringField: proto.String("why hello!")}, + }}, `msg:`}, + // bad oneof (should not panic) + {&pb.Communique{Union: &pb.Communique_Msg{Msg: nil}}, `msg:/* nil */`}, + } + for _, test := range tests { + got := strings.TrimSpace(test.m.String()) + if got != test.want { + t.Errorf("\n got %s\nwant %s", got, test.want) + } + } +} + func BenchmarkMarshalTextBuffered(b *testing.B) { buf := new(bytes.Buffer) m := newTestMessage() diff --git a/_third_party/github.com/golang/freetype/AUTHORS b/_third_party/github.com/golang/freetype/AUTHORS new file mode 100644 index 0000000000..7b70f7768f --- /dev/null +++ b/_third_party/github.com/golang/freetype/AUTHORS @@ -0,0 +1,18 @@ +# This is the official list of Freetype-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. +# +# Freetype-Go is derived from Freetype, which is written in C. The latter +# is copyright 1996-2010 David Turner, Robert Wilhelm, and Werner Lemberg. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Google Inc. +Jeff R. Allen +Rémy Oudompheng +Roger Peppe +Steven Edwards diff --git a/_third_party/github.com/golang/freetype/CONTRIBUTORS b/_third_party/github.com/golang/freetype/CONTRIBUTORS new file mode 100644 index 0000000000..7494b12c36 --- /dev/null +++ b/_third_party/github.com/golang/freetype/CONTRIBUTORS @@ -0,0 +1,36 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Freetype-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Andrew Gerrand +Jeff R. Allen +Nigel Tao +Rémy Oudompheng +Rob Pike +Roger Peppe +Russ Cox +Steven Edwards diff --git a/_third_party/github.com/golang/freetype/LICENSE b/_third_party/github.com/golang/freetype/LICENSE new file mode 100644 index 0000000000..e854ba5dba --- /dev/null +++ b/_third_party/github.com/golang/freetype/LICENSE @@ -0,0 +1,12 @@ +Use of the Freetype-Go software is subject to your choice of exactly one of +the following two licenses: + * The FreeType License, which is similar to the original BSD license with + an advertising clause, or + * The GNU General Public License (GPL), version 2 or later. + +The text of these licenses are available in the licenses/ftl.txt and the +licenses/gpl.txt files respectively. They are also available at +http://freetype.sourceforge.net/license.html + +The Luxi fonts in the testdata directory are licensed separately. See the +testdata/COPYING file for details. diff --git a/_third_party/github.com/golang/freetype/README b/_third_party/github.com/golang/freetype/README new file mode 100644 index 0000000000..39b3d82506 --- /dev/null +++ b/_third_party/github.com/golang/freetype/README @@ -0,0 +1,21 @@ +The Freetype font rasterizer in the Go programming language. + +To download and install from source: +$ go get github.com/golang/freetype + +It is an incomplete port: + * It only supports TrueType fonts, and not Type 1 fonts nor bitmap fonts. + * It only supports the Unicode encoding. + +There are also some implementation differences: + * It uses a 26.6 fixed point co-ordinate system everywhere internally, + as opposed to the original Freetype's mix of 26.6 (or 10.6 for 16-bit + systems) in some places, and 24.8 in the "smooth" rasterizer. + +Freetype-Go is derived from Freetype, which is written in C. Freetype is +copyright 1996-2010 David Turner, Robert Wilhelm, and Werner Lemberg. +Freetype-Go is copyright The Freetype-Go Authors, who are listed in the +AUTHORS file. + +Unless otherwise noted, the Freetype-Go source files are distributed +under the BSD-style license found in the LICENSE file. diff --git a/_third_party/code.google.com/p/freetype-go/freetype/freetype.go b/_third_party/github.com/golang/freetype/freetype.go similarity index 67% rename from _third_party/code.google.com/p/freetype-go/freetype/freetype.go rename to _third_party/github.com/golang/freetype/freetype.go index 67bc24407d..a2d385c795 100644 --- a/_third_party/code.google.com/p/freetype-go/freetype/freetype.go +++ b/_third_party/github.com/golang/freetype/freetype.go @@ -6,15 +6,17 @@ // The freetype package provides a convenient API to draw text onto an image. // Use the freetype/raster and freetype/truetype packages for lower level // control over rasterization and TrueType parsing. -package freetype +package freetype // import "bosun.org/_third_party/github.com/golang/freetype" import ( "errors" "image" "image/draw" - "bosun.org/_third_party/code.google.com/p/freetype-go/freetype/raster" - "bosun.org/_third_party/code.google.com/p/freetype-go/freetype/truetype" + "bosun.org/_third_party/github.com/golang/freetype/raster" + "bosun.org/_third_party/github.com/golang/freetype/truetype" + "golang.org/x/image/font" + "golang.org/x/image/math/fixed" ) // These constants determine the size of the glyph cache. The cache is keyed @@ -33,7 +35,7 @@ const ( type cacheEntry struct { valid bool glyph truetype.Index - advanceWidth raster.Fix32 + advanceWidth fixed.Int26_6 mask *image.Alpha offset image.Point } @@ -45,30 +47,20 @@ func ParseFont(b []byte) (*truetype.Font, error) { return truetype.Parse(b) } -// Pt converts from a co-ordinate pair measured in pixels to a raster.Point -// co-ordinate pair measured in raster.Fix32 units. -func Pt(x, y int) raster.Point { - return raster.Point{ - X: raster.Fix32(x << 8), - Y: raster.Fix32(y << 8), +// Pt converts from a co-ordinate pair measured in pixels to a fixed.Point26_6 +// co-ordinate pair measured in fixed.Int26_6 units. +func Pt(x, y int) fixed.Point26_6 { + return fixed.Point26_6{ + X: fixed.Int26_6(x << 6), + Y: fixed.Int26_6(y << 6), } } -// Hinting is the policy for snapping a glyph's contours to pixel boundaries. -type Hinting int32 - -const ( - // NoHinting means to not perform any hinting. - NoHinting = Hinting(truetype.NoHinting) - // FullHinting means to use the font's hinting instructions. - FullHinting = Hinting(truetype.FullHinting) -) - // A Context holds the state for drawing text in a given font and size. type Context struct { r *raster.Rasterizer - font *truetype.Font - glyphBuf *truetype.GlyphBuf + f *truetype.Font + glyphBuf truetype.GlyphBuf // clip is the clip rectangle for drawing. clip image.Rectangle // dst and src are the destination and source images for drawing. @@ -77,20 +69,20 @@ type Context struct { // fontSize and dpi are used to calculate scale. scale is the number of // 26.6 fixed point units in 1 em. hinting is the hinting policy. fontSize, dpi float64 - scale int32 - hinting Hinting + scale fixed.Int26_6 + hinting font.Hinting // cache is the glyph cache. cache [nGlyphs * nXFractions * nYFractions]cacheEntry } -// PointToFix32 converts the given number of points (as in ``a 12 point font'') -// into fixed point units. -func (c *Context) PointToFix32(x float64) raster.Fix32 { - return raster.Fix32(x * float64(c.dpi) * (256.0 / 72.0)) +// PointToFixed converts the given number of points (as in "a 12 point font") +// into a 26.6 fixed point number of pixels. +func (c *Context) PointToFixed(x float64) fixed.Int26_6 { + return fixed.Int26_6(x * float64(c.dpi) * (64.0 / 72.0)) } // drawContour draws the given closed contour with the given offset. -func (c *Context) drawContour(ps []truetype.Point, dx, dy raster.Fix32) { +func (c *Context) drawContour(ps []truetype.Point, dx, dy fixed.Int26_6) { if len(ps) == 0 { return } @@ -105,23 +97,23 @@ func (c *Context) drawContour(ps []truetype.Point, dx, dy raster.Fix32) { // ps[0] is a truetype.Point measured in FUnits and positive Y going // upwards. start is the same thing measured in fixed point units and // positive Y going downwards, and offset by (dx, dy). - start := raster.Point{ - X: dx + raster.Fix32(ps[0].X<<2), - Y: dy - raster.Fix32(ps[0].Y<<2), + start := fixed.Point26_6{ + X: dx + ps[0].X, + Y: dy - ps[0].Y, } others := []truetype.Point(nil) if ps[0].Flags&0x01 != 0 { others = ps[1:] } else { - last := raster.Point{ - X: dx + raster.Fix32(ps[len(ps)-1].X<<2), - Y: dy - raster.Fix32(ps[len(ps)-1].Y<<2), + last := fixed.Point26_6{ + X: dx + ps[len(ps)-1].X, + Y: dy - ps[len(ps)-1].Y, } if ps[len(ps)-1].Flags&0x01 != 0 { start = last others = ps[:len(ps)-1] } else { - start = raster.Point{ + start = fixed.Point26_6{ X: (start.X + last.X) / 2, Y: (start.Y + last.Y) / 2, } @@ -131,9 +123,9 @@ func (c *Context) drawContour(ps []truetype.Point, dx, dy raster.Fix32) { c.r.Start(start) q0, on0 := start, true for _, p := range others { - q := raster.Point{ - X: dx + raster.Fix32(p.X<<2), - Y: dy - raster.Fix32(p.Y<<2), + q := fixed.Point26_6{ + X: dx + p.X, + Y: dy - p.Y, } on := p.Flags&0x01 != 0 if on { @@ -146,7 +138,7 @@ func (c *Context) drawContour(ps []truetype.Point, dx, dy raster.Fix32) { if on0 { // No-op. } else { - mid := raster.Point{ + mid := fixed.Point26_6{ X: (q0.X + q.X) / 2, Y: (q0.Y + q.Y) / 2, } @@ -165,54 +157,54 @@ func (c *Context) drawContour(ps []truetype.Point, dx, dy raster.Fix32) { // rasterize returns the advance width, glyph mask and integer-pixel offset // to render the given glyph at the given sub-pixel offsets. -// The 24.8 fixed point arguments fx and fy must be in the range [0, 1). -func (c *Context) rasterize(glyph truetype.Index, fx, fy raster.Fix32) ( - raster.Fix32, *image.Alpha, image.Point, error) { +// The 26.6 fixed point arguments fx and fy must be in the range [0, 1). +func (c *Context) rasterize(glyph truetype.Index, fx, fy fixed.Int26_6) ( + fixed.Int26_6, *image.Alpha, image.Point, error) { - if err := c.glyphBuf.Load(c.font, c.scale, glyph, truetype.Hinting(c.hinting)); err != nil { + if err := c.glyphBuf.Load(c.f, c.scale, glyph, c.hinting); err != nil { return 0, nil, image.Point{}, err } // Calculate the integer-pixel bounds for the glyph. - xmin := int(fx+raster.Fix32(c.glyphBuf.B.XMin<<2)) >> 8 - ymin := int(fy-raster.Fix32(c.glyphBuf.B.YMax<<2)) >> 8 - xmax := int(fx+raster.Fix32(c.glyphBuf.B.XMax<<2)+0xff) >> 8 - ymax := int(fy-raster.Fix32(c.glyphBuf.B.YMin<<2)+0xff) >> 8 + xmin := int(fx+c.glyphBuf.Bounds.Min.X) >> 6 + ymin := int(fy-c.glyphBuf.Bounds.Max.Y) >> 6 + xmax := int(fx+c.glyphBuf.Bounds.Max.X+0x3f) >> 6 + ymax := int(fy-c.glyphBuf.Bounds.Min.Y+0x3f) >> 6 if xmin > xmax || ymin > ymax { return 0, nil, image.Point{}, errors.New("freetype: negative sized glyph") } // A TrueType's glyph's nodes can have negative co-ordinates, but the - // rasterizer clips anything left of x=0 or above y=0. xmin and ymin - // are the pixel offsets, based on the font's FUnit metrics, that let - // a negative co-ordinate in TrueType space be non-negative in - // rasterizer space. xmin and ymin are typically <= 0. - fx += raster.Fix32(-xmin << 8) - fy += raster.Fix32(-ymin << 8) + // rasterizer clips anything left of x=0 or above y=0. xmin and ymin are + // the pixel offsets, based on the font's FUnit metrics, that let a + // negative co-ordinate in TrueType space be non-negative in rasterizer + // space. xmin and ymin are typically <= 0. + fx -= fixed.Int26_6(xmin << 6) + fy -= fixed.Int26_6(ymin << 6) // Rasterize the glyph's vectors. c.r.Clear() e0 := 0 - for _, e1 := range c.glyphBuf.End { - c.drawContour(c.glyphBuf.Point[e0:e1], fx, fy) + for _, e1 := range c.glyphBuf.Ends { + c.drawContour(c.glyphBuf.Points[e0:e1], fx, fy) e0 = e1 } a := image.NewAlpha(image.Rect(0, 0, xmax-xmin, ymax-ymin)) c.r.Rasterize(raster.NewAlphaSrcPainter(a)) - return raster.Fix32(c.glyphBuf.AdvanceWidth << 2), a, image.Point{xmin, ymin}, nil + return c.glyphBuf.AdvanceWidth, a, image.Point{xmin, ymin}, nil } // glyph returns the advance width, glyph mask and integer-pixel offset to // render the given glyph at the given sub-pixel point. It is a cache for the // rasterize method. Unlike rasterize, p's co-ordinates do not have to be in // the range [0, 1). -func (c *Context) glyph(glyph truetype.Index, p raster.Point) ( - raster.Fix32, *image.Alpha, image.Point, error) { +func (c *Context) glyph(glyph truetype.Index, p fixed.Point26_6) ( + fixed.Int26_6, *image.Alpha, image.Point, error) { // Split p.X and p.Y into their integer and fractional parts. - ix, fx := int(p.X>>8), p.X&0xff - iy, fy := int(p.Y>>8), p.Y&0xff + ix, fx := int(p.X>>6), p.X&0x3f + iy, fy := int(p.Y>>6), p.Y&0x3f // Calculate the index t into the cache array. tg := int(glyph) % nGlyphs - tx := int(fx) / (256 / nXFractions) - ty := int(fy) / (256 / nYFractions) + tx := int(fx) / (64 / nXFractions) + ty := int(fy) / (64 / nYFractions) t := ((tg*nXFractions)+tx)*nYFractions + ty // Check for a cache hit. if e := c.cache[t]; e.valid && e.glyph == glyph { @@ -233,24 +225,25 @@ func (c *Context) glyph(glyph truetype.Index, p raster.Point) ( // above and to the right of the point, but some may be below or to the left. // For example, drawing a string that starts with a 'J' in an italic font may // affect pixels below and left of the point. -// p is a raster.Point and can therefore represent sub-pixel positions. -func (c *Context) DrawString(s string, p raster.Point) (raster.Point, error) { - if c.font == nil { - return raster.Point{}, errors.New("freetype: DrawText called with a nil font") +// +// p is a fixed.Point26_6 and can therefore represent sub-pixel positions. +func (c *Context) DrawString(s string, p fixed.Point26_6) (fixed.Point26_6, error) { + if c.f == nil { + return fixed.Point26_6{}, errors.New("freetype: DrawText called with a nil font") } prev, hasPrev := truetype.Index(0), false for _, rune := range s { - index := c.font.Index(rune) + index := c.f.Index(rune) if hasPrev { - kern := raster.Fix32(c.font.Kerning(c.scale, prev, index)) << 2 - if c.hinting != NoHinting { - kern = (kern + 128) &^ 255 + kern := c.f.Kern(c.scale, prev, index) + if c.hinting != font.HintingNone { + kern = (kern + 32) &^ 63 } p.X += kern } advanceWidth, mask, offset, err := c.glyph(index, p) if err != nil { - return raster.Point{}, err + return fixed.Point26_6{}, err } p.X += advanceWidth glyphRect := mask.Bounds().Add(offset) @@ -267,16 +260,16 @@ func (c *Context) DrawString(s string, p raster.Point) (raster.Point, error) { // recalc recalculates scale and bounds values from the font size, screen // resolution and font metrics, and invalidates the glyph cache. func (c *Context) recalc() { - c.scale = int32(c.fontSize * c.dpi * (64.0 / 72.0)) - if c.font == nil { + c.scale = fixed.Int26_6(c.fontSize * c.dpi * (64.0 / 72.0)) + if c.f == nil { c.r.SetBounds(0, 0) } else { // Set the rasterizer's bounds to be big enough to handle the largest glyph. - b := c.font.Bounds(c.scale) - xmin := +int(b.XMin) >> 6 - ymin := -int(b.YMax) >> 6 - xmax := +int(b.XMax+63) >> 6 - ymax := -int(b.YMin-63) >> 6 + b := c.f.Bounds(c.scale) + xmin := +int(b.Min.X) >> 6 + ymin := -int(b.Max.Y) >> 6 + xmax := +int(b.Max.X+63) >> 6 + ymax := -int(b.Min.Y-63) >> 6 c.r.SetBounds(xmax-xmin, ymax-ymin) } for i := range c.cache { @@ -294,15 +287,15 @@ func (c *Context) SetDPI(dpi float64) { } // SetFont sets the font used to draw text. -func (c *Context) SetFont(font *truetype.Font) { - if c.font == font { +func (c *Context) SetFont(f *truetype.Font) { + if c.f == f { return } - c.font = font + c.f = f c.recalc() } -// SetFontSize sets the font size in points (as in ``a 12 point font''). +// SetFontSize sets the font size in points (as in "a 12 point font"). func (c *Context) SetFontSize(fontSize float64) { if c.fontSize == fontSize { return @@ -312,7 +305,7 @@ func (c *Context) SetFontSize(fontSize float64) { } // SetHinting sets the hinting policy. -func (c *Context) SetHinting(hinting Hinting) { +func (c *Context) SetHinting(hinting font.Hinting) { c.hinting = hinting for i := range c.cache { c.cache[i] = cacheEntry{} @@ -341,7 +334,6 @@ func (c *Context) SetClip(clip image.Rectangle) { func NewContext() *Context { return &Context{ r: raster.NewRasterizer(0, 0), - glyphBuf: truetype.NewGlyphBuf(), fontSize: 12, dpi: 72, scale: 12 << 6, diff --git a/_third_party/code.google.com/p/freetype-go/freetype/freetype_test.go b/_third_party/github.com/golang/freetype/freetype_test.go similarity index 88% rename from _third_party/code.google.com/p/freetype-go/freetype/freetype_test.go rename to _third_party/github.com/golang/freetype/freetype_test.go index 39f56b36d9..348c411ab2 100644 --- a/_third_party/code.google.com/p/freetype-go/freetype/freetype_test.go +++ b/_third_party/github.com/golang/freetype/freetype_test.go @@ -15,17 +15,17 @@ import ( ) func BenchmarkDrawString(b *testing.B) { - data, err := ioutil.ReadFile("../licenses/gpl.txt") + data, err := ioutil.ReadFile("licenses/gpl.txt") if err != nil { b.Fatal(err) } lines := strings.Split(string(data), "\n") - data, err = ioutil.ReadFile("../testdata/luxisr.ttf") + data, err = ioutil.ReadFile("testdata/luxisr.ttf") if err != nil { b.Fatal(err) } - font, err := ParseFont(data) + f, err := ParseFont(data) if err != nil { b.Fatal(err) } @@ -37,7 +37,7 @@ func BenchmarkDrawString(b *testing.B) { c.SetDst(dst) c.SetClip(dst.Bounds()) c.SetSrc(image.Black) - c.SetFont(font) + c.SetFont(f) var ms runtime.MemStats runtime.ReadMemStats(&ms) diff --git a/_third_party/code.google.com/p/freetype-go/freetype/raster/geom.go b/_third_party/github.com/golang/freetype/raster/geom.go similarity index 51% rename from _third_party/code.google.com/p/freetype-go/freetype/raster/geom.go rename to _third_party/github.com/golang/freetype/raster/geom.go index 63c86e6ab0..f3696ea983 100644 --- a/_third_party/code.google.com/p/freetype-go/freetype/raster/geom.go +++ b/_third_party/github.com/golang/freetype/raster/geom.go @@ -8,36 +8,12 @@ package raster import ( "fmt" "math" -) - -// A Fix32 is a 24.8 fixed point number. -type Fix32 int32 - -// A Fix64 is a 48.16 fixed point number. -type Fix64 int64 - -// String returns a human-readable representation of a 24.8 fixed point number. -// For example, the number one-and-a-quarter becomes "1:064". -func (x Fix32) String() string { - if x < 0 { - x = -x - return fmt.Sprintf("-%d:%03d", int32(x/256), int32(x%256)) - } - return fmt.Sprintf("%d:%03d", int32(x/256), int32(x%256)) -} -// String returns a human-readable representation of a 48.16 fixed point number. -// For example, the number one-and-a-quarter becomes "1:16384". -func (x Fix64) String() string { - if x < 0 { - x = -x - return fmt.Sprintf("-%d:%05d", int64(x/65536), int64(x%65536)) - } - return fmt.Sprintf("%d:%05d", int64(x/65536), int64(x%65536)) -} + "golang.org/x/image/math/fixed" +) // maxAbs returns the maximum of abs(a) and abs(b). -func maxAbs(a, b Fix32) Fix32 { +func maxAbs(a, b fixed.Int26_6) fixed.Int26_6 { if a < 0 { a = -a } @@ -50,132 +26,112 @@ func maxAbs(a, b Fix32) Fix32 { return a } -// A Point represents a two-dimensional point or vector, in 24.8 fixed point -// format. -type Point struct { - X, Y Fix32 -} - -// String returns a human-readable representation of a Point. -func (p Point) String() string { - return "(" + p.X.String() + ", " + p.Y.String() + ")" -} - -// Add returns the vector p + q. -func (p Point) Add(q Point) Point { - return Point{p.X + q.X, p.Y + q.Y} -} - -// Sub returns the vector p - q. -func (p Point) Sub(q Point) Point { - return Point{p.X - q.X, p.Y - q.Y} -} - -// Mul returns the vector k * p. -func (p Point) Mul(k Fix32) Point { - return Point{p.X * k / 256, p.Y * k / 256} -} - -// Neg returns the vector -p, or equivalently p rotated by 180 degrees. -func (p Point) Neg() Point { - return Point{-p.X, -p.Y} +// pNeg returns the vector -p, or equivalently p rotated by 180 degrees. +func pNeg(p fixed.Point26_6) fixed.Point26_6 { + return fixed.Point26_6{-p.X, -p.Y} } -// Dot returns the dot product p·q. -func (p Point) Dot(q Point) Fix64 { +// pDot returns the dot product p·q. +func pDot(p fixed.Point26_6, q fixed.Point26_6) fixed.Int52_12 { px, py := int64(p.X), int64(p.Y) qx, qy := int64(q.X), int64(q.Y) - return Fix64(px*qx + py*qy) + return fixed.Int52_12(px*qx + py*qy) } -// Len returns the length of the vector p. -func (p Point) Len() Fix32 { +// pLen returns the length of the vector p. +func pLen(p fixed.Point26_6) fixed.Int26_6 { // TODO(nigeltao): use fixed point math. x := float64(p.X) y := float64(p.Y) - return Fix32(math.Sqrt(x*x + y*y)) + return fixed.Int26_6(math.Sqrt(x*x + y*y)) } -// Norm returns the vector p normalized to the given length, or the zero Point -// if p is degenerate. -func (p Point) Norm(length Fix32) Point { - d := p.Len() +// pNorm returns the vector p normalized to the given length, or zero if p is +// degenerate. +func pNorm(p fixed.Point26_6, length fixed.Int26_6) fixed.Point26_6 { + d := pLen(p) if d == 0 { - return Point{} + return fixed.Point26_6{} } s, t := int64(length), int64(d) x := int64(p.X) * s / t y := int64(p.Y) * s / t - return Point{Fix32(x), Fix32(y)} + return fixed.Point26_6{fixed.Int26_6(x), fixed.Int26_6(y)} } -// Rot45CW returns the vector p rotated clockwise by 45 degrees. +// pRot45CW returns the vector p rotated clockwise by 45 degrees. +// // Note that the Y-axis grows downwards, so {1, 0}.Rot45CW is {1/√2, 1/√2}. -func (p Point) Rot45CW() Point { +func pRot45CW(p fixed.Point26_6) fixed.Point26_6 { // 181/256 is approximately 1/√2, or sin(π/4). px, py := int64(p.X), int64(p.Y) qx := (+px - py) * 181 / 256 qy := (+px + py) * 181 / 256 - return Point{Fix32(qx), Fix32(qy)} + return fixed.Point26_6{fixed.Int26_6(qx), fixed.Int26_6(qy)} } -// Rot90CW returns the vector p rotated clockwise by 90 degrees. +// pRot90CW returns the vector p rotated clockwise by 90 degrees. +// // Note that the Y-axis grows downwards, so {1, 0}.Rot90CW is {0, 1}. -func (p Point) Rot90CW() Point { - return Point{-p.Y, p.X} +func pRot90CW(p fixed.Point26_6) fixed.Point26_6 { + return fixed.Point26_6{-p.Y, p.X} } -// Rot135CW returns the vector p rotated clockwise by 135 degrees. +// pRot135CW returns the vector p rotated clockwise by 135 degrees. +// // Note that the Y-axis grows downwards, so {1, 0}.Rot135CW is {-1/√2, 1/√2}. -func (p Point) Rot135CW() Point { +func pRot135CW(p fixed.Point26_6) fixed.Point26_6 { // 181/256 is approximately 1/√2, or sin(π/4). px, py := int64(p.X), int64(p.Y) qx := (-px - py) * 181 / 256 qy := (+px - py) * 181 / 256 - return Point{Fix32(qx), Fix32(qy)} + return fixed.Point26_6{fixed.Int26_6(qx), fixed.Int26_6(qy)} } -// Rot45CCW returns the vector p rotated counter-clockwise by 45 degrees. +// pRot45CCW returns the vector p rotated counter-clockwise by 45 degrees. +// // Note that the Y-axis grows downwards, so {1, 0}.Rot45CCW is {1/√2, -1/√2}. -func (p Point) Rot45CCW() Point { +func pRot45CCW(p fixed.Point26_6) fixed.Point26_6 { // 181/256 is approximately 1/√2, or sin(π/4). px, py := int64(p.X), int64(p.Y) qx := (+px + py) * 181 / 256 qy := (-px + py) * 181 / 256 - return Point{Fix32(qx), Fix32(qy)} + return fixed.Point26_6{fixed.Int26_6(qx), fixed.Int26_6(qy)} } -// Rot90CCW returns the vector p rotated counter-clockwise by 90 degrees. +// pRot90CCW returns the vector p rotated counter-clockwise by 90 degrees. +// // Note that the Y-axis grows downwards, so {1, 0}.Rot90CCW is {0, -1}. -func (p Point) Rot90CCW() Point { - return Point{p.Y, -p.X} +func pRot90CCW(p fixed.Point26_6) fixed.Point26_6 { + return fixed.Point26_6{p.Y, -p.X} } -// Rot135CCW returns the vector p rotated counter-clockwise by 135 degrees. +// pRot135CCW returns the vector p rotated counter-clockwise by 135 degrees. +// // Note that the Y-axis grows downwards, so {1, 0}.Rot135CCW is {-1/√2, -1/√2}. -func (p Point) Rot135CCW() Point { +func pRot135CCW(p fixed.Point26_6) fixed.Point26_6 { // 181/256 is approximately 1/√2, or sin(π/4). px, py := int64(p.X), int64(p.Y) qx := (-px + py) * 181 / 256 qy := (-px - py) * 181 / 256 - return Point{Fix32(qx), Fix32(qy)} + return fixed.Point26_6{fixed.Int26_6(qx), fixed.Int26_6(qy)} } // An Adder accumulates points on a curve. type Adder interface { // Start starts a new curve at the given point. - Start(a Point) + Start(a fixed.Point26_6) // Add1 adds a linear segment to the current curve. - Add1(b Point) + Add1(b fixed.Point26_6) // Add2 adds a quadratic segment to the current curve. - Add2(b, c Point) + Add2(b, c fixed.Point26_6) // Add3 adds a cubic segment to the current curve. - Add3(b, c, d Point) + Add3(b, c, d fixed.Point26_6) } // A Path is a sequence of curves, and a curve is a start point followed by a // sequence of linear, quadratic or cubic segments. -type Path []Fix32 +type Path []fixed.Int26_6 // String returns a human-readable representation of a Path. func (p Path) String() string { @@ -186,16 +142,16 @@ func (p Path) String() string { } switch p[i] { case 0: - s += "S0" + fmt.Sprint([]Fix32(p[i+1:i+3])) + s += "S0" + fmt.Sprint([]fixed.Int26_6(p[i+1:i+3])) i += 4 case 1: - s += "A1" + fmt.Sprint([]Fix32(p[i+1:i+3])) + s += "A1" + fmt.Sprint([]fixed.Int26_6(p[i+1:i+3])) i += 4 case 2: - s += "A2" + fmt.Sprint([]Fix32(p[i+1:i+5])) + s += "A2" + fmt.Sprint([]fixed.Int26_6(p[i+1:i+5])) i += 6 case 3: - s += "A3" + fmt.Sprint([]Fix32(p[i+1:i+7])) + s += "A3" + fmt.Sprint([]fixed.Int26_6(p[i+1:i+7])) i += 8 default: panic("freetype/raster: bad path") @@ -210,22 +166,22 @@ func (p *Path) Clear() { } // Start starts a new curve at the given point. -func (p *Path) Start(a Point) { +func (p *Path) Start(a fixed.Point26_6) { *p = append(*p, 0, a.X, a.Y, 0) } // Add1 adds a linear segment to the current curve. -func (p *Path) Add1(b Point) { +func (p *Path) Add1(b fixed.Point26_6) { *p = append(*p, 1, b.X, b.Y, 1) } // Add2 adds a quadratic segment to the current curve. -func (p *Path) Add2(b, c Point) { +func (p *Path) Add2(b, c fixed.Point26_6) { *p = append(*p, 2, b.X, b.Y, c.X, c.Y, 2) } // Add3 adds a cubic segment to the current curve. -func (p *Path) Add3(b, c, d Point) { +func (p *Path) Add3(b, c, d fixed.Point26_6) { *p = append(*p, 3, b.X, b.Y, c.X, c.Y, d.X, d.Y, 3) } @@ -235,18 +191,18 @@ func (p *Path) AddPath(q Path) { } // AddStroke adds a stroked Path. -func (p *Path) AddStroke(q Path, width Fix32, cr Capper, jr Joiner) { +func (p *Path) AddStroke(q Path, width fixed.Int26_6, cr Capper, jr Joiner) { Stroke(p, q, width, cr, jr) } // firstPoint returns the first point in a non-empty Path. -func (p Path) firstPoint() Point { - return Point{p[1], p[2]} +func (p Path) firstPoint() fixed.Point26_6 { + return fixed.Point26_6{p[1], p[2]} } // lastPoint returns the last point in a non-empty Path. -func (p Path) lastPoint() Point { - return Point{p[len(p)-3], p[len(p)-2]} +func (p Path) lastPoint() fixed.Point26_6 { + return fixed.Point26_6{p[len(p)-3], p[len(p)-2]} } // addPathReversed adds q reversed to p. @@ -266,13 +222,22 @@ func addPathReversed(p Adder, q Path) { return case 1: i -= 4 - p.Add1(Point{q[i-2], q[i-1]}) + p.Add1( + fixed.Point26_6{q[i-2], q[i-1]}, + ) case 2: i -= 6 - p.Add2(Point{q[i+2], q[i+3]}, Point{q[i-2], q[i-1]}) + p.Add2( + fixed.Point26_6{q[i+2], q[i+3]}, + fixed.Point26_6{q[i-2], q[i-1]}, + ) case 3: i -= 8 - p.Add3(Point{q[i+4], q[i+5]}, Point{q[i+2], q[i+3]}, Point{q[i-2], q[i-1]}) + p.Add3( + fixed.Point26_6{q[i+4], q[i+5]}, + fixed.Point26_6{q[i+2], q[i+3]}, + fixed.Point26_6{q[i-2], q[i-1]}, + ) default: panic("freetype/raster: bad path") } diff --git a/_third_party/code.google.com/p/freetype-go/freetype/raster/paint.go b/_third_party/github.com/golang/freetype/raster/paint.go similarity index 80% rename from _third_party/code.google.com/p/freetype-go/freetype/raster/paint.go rename to _third_party/github.com/golang/freetype/raster/paint.go index 13cccc1926..185d36a8e5 100644 --- a/_third_party/code.google.com/p/freetype-go/freetype/raster/paint.go +++ b/_third_party/github.com/golang/freetype/raster/paint.go @@ -13,17 +13,17 @@ import ( ) // A Span is a horizontal segment of pixels with constant alpha. X0 is an -// inclusive bound and X1 is exclusive, the same as for slices. A fully -// opaque Span has A == 1<<32 - 1. +// inclusive bound and X1 is exclusive, the same as for slices. A fully opaque +// Span has Alpha == 0xffff. type Span struct { Y, X0, X1 int - A uint32 + Alpha uint32 } // A Painter knows how to paint a batch of Spans. Rasterization may involve -// Painting multiple batches, and done will be true for the final batch. -// The Spans' Y values are monotonically increasing during a rasterization. -// Paint may use all of ss as scratch space during the call. +// Painting multiple batches, and done will be true for the final batch. The +// Spans' Y values are monotonically increasing during a rasterization. Paint +// may use all of ss as scratch space during the call. type Painter interface { Paint(ss []Span, done bool) } @@ -34,13 +34,13 @@ type PainterFunc func(ss []Span, done bool) // Paint just delegates the call to f. func (f PainterFunc) Paint(ss []Span, done bool) { f(ss, done) } -// An AlphaOverPainter is a Painter that paints Spans onto an image.Alpha -// using the Over Porter-Duff composition operator. +// An AlphaOverPainter is a Painter that paints Spans onto a *image.Alpha using +// the Over Porter-Duff composition operator. type AlphaOverPainter struct { Image *image.Alpha } -// Paint satisfies the Painter interface by painting ss onto an image.Alpha. +// Paint satisfies the Painter interface. func (r AlphaOverPainter) Paint(ss []Span, done bool) { b := r.Image.Bounds() for _, s := range ss { @@ -61,7 +61,7 @@ func (r AlphaOverPainter) Paint(ss []Span, done bool) { } base := (s.Y-r.Image.Rect.Min.Y)*r.Image.Stride - r.Image.Rect.Min.X p := r.Image.Pix[base+s.X0 : base+s.X1] - a := int(s.A >> 24) + a := int(s.Alpha >> 8) for i, c := range p { v := int(c) p[i] = uint8((v*255 + (255-v)*a) / 255) @@ -74,13 +74,13 @@ func NewAlphaOverPainter(m *image.Alpha) AlphaOverPainter { return AlphaOverPainter{m} } -// An AlphaSrcPainter is a Painter that paints Spans onto an image.Alpha -// using the Src Porter-Duff composition operator. +// An AlphaSrcPainter is a Painter that paints Spans onto a *image.Alpha using +// the Src Porter-Duff composition operator. type AlphaSrcPainter struct { Image *image.Alpha } -// Paint satisfies the Painter interface by painting ss onto an image.Alpha. +// Paint satisfies the Painter interface. func (r AlphaSrcPainter) Paint(ss []Span, done bool) { b := r.Image.Bounds() for _, s := range ss { @@ -101,7 +101,7 @@ func (r AlphaSrcPainter) Paint(ss []Span, done bool) { } base := (s.Y-r.Image.Rect.Min.Y)*r.Image.Stride - r.Image.Rect.Min.X p := r.Image.Pix[base+s.X0 : base+s.X1] - color := uint8(s.A >> 24) + color := uint8(s.Alpha >> 8) for i := range p { p[i] = color } @@ -113,16 +113,17 @@ func NewAlphaSrcPainter(m *image.Alpha) AlphaSrcPainter { return AlphaSrcPainter{m} } +// An RGBAPainter is a Painter that paints Spans onto a *image.RGBA. type RGBAPainter struct { - // The image to compose onto. + // Image is the image to compose onto. Image *image.RGBA - // The Porter-Duff composition operator. + // Op is the Porter-Duff composition operator. Op draw.Op - // The 16-bit color to paint the spans. + // cr, cg, cb and ca are the 16-bit color to paint the spans. cr, cg, cb, ca uint32 } -// Paint satisfies the Painter interface by painting ss onto an image.RGBA. +// Paint satisfies the Painter interface. func (r *RGBAPainter) Paint(ss []Span, done bool) { b := r.Image.Bounds() for _, s := range ss { @@ -141,8 +142,8 @@ func (r *RGBAPainter) Paint(ss []Span, done bool) { if s.X0 >= s.X1 { continue } - // This code is similar to drawGlyphOver in $GOROOT/src/pkg/image/draw/draw.go. - ma := s.A >> 16 + // This code mimics drawGlyphOver in $GOROOT/src/image/draw/draw.go. + ma := s.Alpha const m = 1<<16 - 1 i0 := (s.Y-r.Image.Rect.Min.Y)*r.Image.Stride + (s.X0-r.Image.Rect.Min.X)*4 i1 := i0 + (s.X1-s.X0)*4 @@ -192,7 +193,7 @@ func (m *MonochromePainter) Paint(ss []Span, done bool) { // We compact the ss slice, discarding any Spans whose alpha quantizes to zero. j := 0 for _, s := range ss { - if s.A >= 1<<31 { + if s.Alpha >= 0x8000 { if m.y == s.Y && m.x1 == s.X0 { m.x1 = s.X1 } else { @@ -237,33 +238,28 @@ func NewMonochromePainter(p Painter) *MonochromePainter { // A GammaCorrectionPainter wraps another Painter, performing gamma-correction // on each Span's alpha value. type GammaCorrectionPainter struct { - // The wrapped Painter. + // Painter is the wrapped Painter. Painter Painter - // Precomputed alpha values for linear interpolation, with fully opaque == 1<<16-1. + // a is the precomputed alpha values for linear interpolation, with fully + // opaque == 0xffff. a [256]uint16 - // Whether gamma correction is a no-op. + // gammaIsOne is whether gamma correction is a no-op. gammaIsOne bool } -// Paint delegates to the wrapped Painter after performing gamma-correction -// on each Span. +// Paint delegates to the wrapped Painter after performing gamma-correction on +// each Span. func (g *GammaCorrectionPainter) Paint(ss []Span, done bool) { if !g.gammaIsOne { - const ( - M = 0x1010101 // 255*M == 1<<32-1 - N = 0x8080 // N = M>>9, and N < 1<<16-1 - ) + const n = 0x101 for i, s := range ss { - if s.A == 0 || s.A == 1<<32-1 { + if s.Alpha == 0 || s.Alpha == 0xffff { continue } - p, q := s.A/M, (s.A%M)>>9 + p, q := s.Alpha/n, s.Alpha%n // The resultant alpha is a linear interpolation of g.a[p] and g.a[p+1]. - a := uint32(g.a[p])*(N-q) + uint32(g.a[p+1])*q - a = (a + N/2) / N - // Convert the alpha from 16-bit (which is g.a's range) to 32-bit. - a |= a << 16 - ss[i].A = a + a := uint32(g.a[p])*(n-q) + uint32(g.a[p+1])*q + ss[i].Alpha = (a + n/2) / n } } g.Painter.Paint(ss, done) @@ -271,11 +267,10 @@ func (g *GammaCorrectionPainter) Paint(ss []Span, done bool) { // SetGamma sets the gamma value. func (g *GammaCorrectionPainter) SetGamma(gamma float64) { - if gamma == 1.0 { - g.gammaIsOne = true + g.gammaIsOne = gamma == 1 + if g.gammaIsOne { return } - g.gammaIsOne = false for i := 0; i < 256; i++ { a := float64(i) / 0xff a = math.Pow(a, gamma) diff --git a/_third_party/code.google.com/p/freetype-go/freetype/raster/raster.go b/_third_party/github.com/golang/freetype/raster/raster.go similarity index 71% rename from _third_party/code.google.com/p/freetype-go/freetype/raster/raster.go rename to _third_party/github.com/golang/freetype/raster/raster.go index 45af7eaa20..3503b650eb 100644 --- a/_third_party/code.google.com/p/freetype-go/freetype/raster/raster.go +++ b/_third_party/github.com/golang/freetype/raster/raster.go @@ -3,20 +3,22 @@ // FreeType License or the GNU General Public License version 2 (or // any later version), both of which can be found in the LICENSE file. -// The raster package provides an anti-aliasing 2-D rasterizer. +// Package raster provides an anti-aliasing 2-D rasterizer. // -// It is part of the larger Freetype-Go suite of font-related packages, -// but the raster package is not specific to font rasterization, and can -// be used standalone without any other Freetype-Go package. +// It is part of the larger Freetype suite of font-related packages, but the +// raster package is not specific to font rasterization, and can be used +// standalone without any other Freetype package. // -// Rasterization is done by the same area/coverage accumulation algorithm -// as the Freetype "smooth" module, and the Anti-Grain Geometry library. -// A description of the area/coverage algorithm is at +// Rasterization is done by the same area/coverage accumulation algorithm as +// the Freetype "smooth" module, and the Anti-Grain Geometry library. A +// description of the area/coverage algorithm is at // http://projects.tuxee.net/cl-vectors/section-the-cl-aa-algorithm -package raster +package raster // import "bosun.org/_third_party/github.com/golang/freetype/raster" import ( "strconv" + + "golang.org/x/image/math/fixed" ) // A cell is part of a linked list (for a given yi co-ordinate) of accumulated @@ -41,7 +43,7 @@ type Rasterizer struct { splitScale2, splitScale3 int // The current pen position. - a Point + a fixed.Point26_6 // The current cell and its area/coverage being accumulated. xi, yi int area, cover int @@ -114,14 +116,14 @@ func (r *Rasterizer) setCell(xi, yi int) { } // scan accumulates area/coverage for the yi'th scanline, going from -// x0 to x1 in the horizontal direction (in 24.8 fixed point co-ordinates) +// x0 to x1 in the horizontal direction (in 26.6 fixed point co-ordinates) // and from y0f to y1f fractional vertical units within that scanline. -func (r *Rasterizer) scan(yi int, x0, y0f, x1, y1f Fix32) { - // Break the 24.8 fixed point X co-ordinates into integral and fractional parts. - x0i := int(x0) / 256 - x0f := x0 - Fix32(256*x0i) - x1i := int(x1) / 256 - x1f := x1 - Fix32(256*x1i) +func (r *Rasterizer) scan(yi int, x0, y0f, x1, y1f fixed.Int26_6) { + // Break the 26.6 fixed point X co-ordinates into integral and fractional parts. + x0i := int(x0) / 64 + x0f := x0 - fixed.Int26_6(64*x0i) + x1i := int(x1) / 64 + x1f := x1 - fixed.Int26_6(64*x1i) // A perfectly horizontal scan. if y0f == y1f { @@ -137,17 +139,17 @@ func (r *Rasterizer) scan(yi int, x0, y0f, x1, y1f Fix32) { } // There are at least two cells. Apart from the first and last cells, // all intermediate cells go through the full width of the cell, - // or 256 units in 24.8 fixed point format. + // or 64 units in 26.6 fixed point format. var ( - p, q, edge0, edge1 Fix32 + p, q, edge0, edge1 fixed.Int26_6 xiDelta int ) if dx > 0 { - p, q = (256-x0f)*dy, dx - edge0, edge1, xiDelta = 0, 256, 1 + p, q = (64-x0f)*dy, dx + edge0, edge1, xiDelta = 0, 64, 1 } else { p, q = x0f*dy, -dx - edge0, edge1, xiDelta = 256, 0, -1 + edge0, edge1, xiDelta = 64, 0, -1 } yDelta, yRem := p/q, p%q if yRem < 0 { @@ -162,7 +164,7 @@ func (r *Rasterizer) scan(yi int, x0, y0f, x1, y1f Fix32) { r.setCell(xi, yi) if xi != x1i { // Do all the intermediate cells. - p = 256 * (y1f - y + yDelta) + p = 64 * (y1f - y + yDelta) fullDelta, fullRem := p/q, p%q if fullRem < 0 { fullDelta -= 1 @@ -176,7 +178,7 @@ func (r *Rasterizer) scan(yi int, x0, y0f, x1, y1f Fix32) { yDelta += 1 yRem -= q } - r.area += int(256 * yDelta) + r.area += int(64 * yDelta) r.cover += int(yDelta) xi, y = xi+xiDelta, y+yDelta r.setCell(xi, yi) @@ -189,21 +191,22 @@ func (r *Rasterizer) scan(yi int, x0, y0f, x1, y1f Fix32) { } // Start starts a new curve at the given point. -func (r *Rasterizer) Start(a Point) { - r.setCell(int(a.X/256), int(a.Y/256)) +func (r *Rasterizer) Start(a fixed.Point26_6) { + r.setCell(int(a.X/64), int(a.Y/64)) r.a = a } // Add1 adds a linear segment to the current curve. -func (r *Rasterizer) Add1(b Point) { +func (r *Rasterizer) Add1(b fixed.Point26_6) { x0, y0 := r.a.X, r.a.Y x1, y1 := b.X, b.Y dx, dy := x1-x0, y1-y0 - // Break the 24.8 fixed point Y co-ordinates into integral and fractional parts. - y0i := int(y0) / 256 - y0f := y0 - Fix32(256*y0i) - y1i := int(y1) / 256 - y1f := y1 - Fix32(256*y1i) + // Break the 26.6 fixed point Y co-ordinates into integral and fractional + // parts. + y0i := int(y0) / 64 + y0f := y0 - fixed.Int26_6(64*y0i) + y1i := int(y1) / 64 + y1f := y1 - fixed.Int26_6(64*y1i) if y0i == y1i { // There is only one scanline. @@ -213,16 +216,16 @@ func (r *Rasterizer) Add1(b Point) { // This is a vertical line segment. We avoid calling r.scan and instead // manipulate r.area and r.cover directly. var ( - edge0, edge1 Fix32 + edge0, edge1 fixed.Int26_6 yiDelta int ) if dy > 0 { - edge0, edge1, yiDelta = 0, 256, 1 + edge0, edge1, yiDelta = 0, 64, 1 } else { - edge0, edge1, yiDelta = 256, 0, -1 + edge0, edge1, yiDelta = 64, 0, -1 } - x0i, yi := int(x0)/256, y0i - x0fTimes2 := (int(x0) - (256 * x0i)) * 2 + x0i, yi := int(x0)/64, y0i + x0fTimes2 := (int(x0) - (64 * x0i)) * 2 // Do the first pixel. dcover := int(edge1 - y0f) darea := int(x0fTimes2 * dcover) @@ -246,19 +249,19 @@ func (r *Rasterizer) Add1(b Point) { r.cover += dcover } else { - // There are at least two scanlines. Apart from the first and last scanlines, - // all intermediate scanlines go through the full height of the row, or 256 - // units in 24.8 fixed point format. + // There are at least two scanlines. Apart from the first and last + // scanlines, all intermediate scanlines go through the full height of + // the row, or 64 units in 26.6 fixed point format. var ( - p, q, edge0, edge1 Fix32 + p, q, edge0, edge1 fixed.Int26_6 yiDelta int ) if dy > 0 { - p, q = (256-y0f)*dx, dy - edge0, edge1, yiDelta = 0, 256, 1 + p, q = (64-y0f)*dx, dy + edge0, edge1, yiDelta = 0, 64, 1 } else { p, q = y0f*dx, -dy - edge0, edge1, yiDelta = 256, 0, -1 + edge0, edge1, yiDelta = 64, 0, -1 } xDelta, xRem := p/q, p%q if xRem < 0 { @@ -269,10 +272,10 @@ func (r *Rasterizer) Add1(b Point) { x, yi := x0, y0i r.scan(yi, x, y0f, x+xDelta, edge1) x, yi = x+xDelta, yi+yiDelta - r.setCell(int(x)/256, yi) + r.setCell(int(x)/64, yi) if yi != y1i { // Do all the intermediate scanlines. - p = 256 * dx + p = 64 * dx fullDelta, fullRem := p/q, p%q if fullRem < 0 { fullDelta -= 1 @@ -288,7 +291,7 @@ func (r *Rasterizer) Add1(b Point) { } r.scan(yi, x, edge0, x+xDelta, edge1) x, yi = x+xDelta, yi+yiDelta - r.setCell(int(x)/256, yi) + r.setCell(int(x)/64, yi) } } // Do the last scanline. @@ -299,23 +302,25 @@ func (r *Rasterizer) Add1(b Point) { } // Add2 adds a quadratic segment to the current curve. -func (r *Rasterizer) Add2(b, c Point) { - // Calculate nSplit (the number of recursive decompositions) based on how `curvy' it is. - // Specifically, how much the middle point b deviates from (a+c)/2. - dev := maxAbs(r.a.X-2*b.X+c.X, r.a.Y-2*b.Y+c.Y) / Fix32(r.splitScale2) +func (r *Rasterizer) Add2(b, c fixed.Point26_6) { + // Calculate nSplit (the number of recursive decompositions) based on how + // 'curvy' it is. Specifically, how much the middle point b deviates from + // (a+c)/2. + dev := maxAbs(r.a.X-2*b.X+c.X, r.a.Y-2*b.Y+c.Y) / fixed.Int26_6(r.splitScale2) nsplit := 0 for dev > 0 { dev /= 4 nsplit++ } - // dev is 32-bit, and nsplit++ every time we shift off 2 bits, so maxNsplit is 16. + // dev is 32-bit, and nsplit++ every time we shift off 2 bits, so maxNsplit + // is 16. const maxNsplit = 16 if nsplit > maxNsplit { panic("freetype/raster: Add2 nsplit too large: " + strconv.Itoa(nsplit)) } // Recursively decompose the curve nSplit levels deep. var ( - pStack [2*maxNsplit + 3]Point + pStack [2*maxNsplit + 3]fixed.Point26_6 sStack [maxNsplit + 1]int i int ) @@ -327,8 +332,9 @@ func (r *Rasterizer) Add2(b, c Point) { s := sStack[i] p := pStack[2*i:] if s > 0 { - // Split the quadratic curve p[:3] into an equivalent set of two shorter curves: - // p[:3] and p[2:5]. The new p[4] is the old p[2], and p[0] is unchanged. + // Split the quadratic curve p[:3] into an equivalent set of two + // shorter curves: p[:3] and p[2:5]. The new p[4] is the old p[2], + // and p[0] is unchanged. mx := p[1].X p[4].X = p[2].X p[3].X = (p[4].X + mx) / 2 @@ -344,10 +350,11 @@ func (r *Rasterizer) Add2(b, c Point) { sStack[i+1] = s - 1 i++ } else { - // Replace the level-0 quadratic with a two-linear-piece approximation. + // Replace the level-0 quadratic with a two-linear-piece + // approximation. midx := (p[0].X + 2*p[1].X + p[2].X) / 4 midy := (p[0].Y + 2*p[1].Y + p[2].Y) / 4 - r.Add1(Point{midx, midy}) + r.Add1(fixed.Point26_6{midx, midy}) r.Add1(p[0]) i-- } @@ -355,24 +362,26 @@ func (r *Rasterizer) Add2(b, c Point) { } // Add3 adds a cubic segment to the current curve. -func (r *Rasterizer) Add3(b, c, d Point) { - // Calculate nSplit (the number of recursive decompositions) based on how `curvy' it is. - dev2 := maxAbs(r.a.X-3*(b.X+c.X)+d.X, r.a.Y-3*(b.Y+c.Y)+d.Y) / Fix32(r.splitScale2) - dev3 := maxAbs(r.a.X-2*b.X+d.X, r.a.Y-2*b.Y+d.Y) / Fix32(r.splitScale3) +func (r *Rasterizer) Add3(b, c, d fixed.Point26_6) { + // Calculate nSplit (the number of recursive decompositions) based on how + // 'curvy' it is. + dev2 := maxAbs(r.a.X-3*(b.X+c.X)+d.X, r.a.Y-3*(b.Y+c.Y)+d.Y) / fixed.Int26_6(r.splitScale2) + dev3 := maxAbs(r.a.X-2*b.X+d.X, r.a.Y-2*b.Y+d.Y) / fixed.Int26_6(r.splitScale3) nsplit := 0 for dev2 > 0 || dev3 > 0 { dev2 /= 8 dev3 /= 4 nsplit++ } - // devN is 32-bit, and nsplit++ every time we shift off 2 bits, so maxNsplit is 16. + // devN is 32-bit, and nsplit++ every time we shift off 2 bits, so + // maxNsplit is 16. const maxNsplit = 16 if nsplit > maxNsplit { panic("freetype/raster: Add3 nsplit too large: " + strconv.Itoa(nsplit)) } // Recursively decompose the curve nSplit levels deep. var ( - pStack [3*maxNsplit + 4]Point + pStack [3*maxNsplit + 4]fixed.Point26_6 sStack [maxNsplit + 1]int i int ) @@ -385,8 +394,9 @@ func (r *Rasterizer) Add3(b, c, d Point) { s := sStack[i] p := pStack[3*i:] if s > 0 { - // Split the cubic curve p[:4] into an equivalent set of two shorter curves: - // p[:4] and p[3:7]. The new p[6] is the old p[3], and p[0] is unchanged. + // Split the cubic curve p[:4] into an equivalent set of two + // shorter curves: p[:4] and p[3:7]. The new p[6] is the old p[3], + // and p[0] is unchanged. m01x := (p[0].X + p[1].X) / 2 m12x := (p[1].X + p[2].X) / 2 m23x := (p[2].X + p[3].X) / 2 @@ -413,7 +423,7 @@ func (r *Rasterizer) Add3(b, c, d Point) { // Replace the level-0 cubic with a two-linear-piece approximation. midx := (p[0].X + 3*(p[1].X+p[2].X) + p[3].X) / 8 midy := (p[0].Y + 3*(p[1].Y+p[2].Y) + p[3].Y) / 8 - r.Add1(Point{midx, midy}) + r.Add1(fixed.Point26_6{midx, midy}) r.Add1(p[0]) i-- } @@ -425,16 +435,27 @@ func (r *Rasterizer) AddPath(p Path) { for i := 0; i < len(p); { switch p[i] { case 0: - r.Start(Point{p[i+1], p[i+2]}) + r.Start( + fixed.Point26_6{p[i+1], p[i+2]}, + ) i += 4 case 1: - r.Add1(Point{p[i+1], p[i+2]}) + r.Add1( + fixed.Point26_6{p[i+1], p[i+2]}, + ) i += 4 case 2: - r.Add2(Point{p[i+1], p[i+2]}, Point{p[i+3], p[i+4]}) + r.Add2( + fixed.Point26_6{p[i+1], p[i+2]}, + fixed.Point26_6{p[i+3], p[i+4]}, + ) i += 6 case 3: - r.Add3(Point{p[i+1], p[i+2]}, Point{p[i+3], p[i+4]}, Point{p[i+5], p[i+6]}) + r.Add3( + fixed.Point26_6{p[i+1], p[i+2]}, + fixed.Point26_6{p[i+3], p[i+4]}, + fixed.Point26_6{p[i+5], p[i+6]}, + ) i += 8 default: panic("freetype/raster: bad path") @@ -443,43 +464,45 @@ func (r *Rasterizer) AddPath(p Path) { } // AddStroke adds a stroked Path. -func (r *Rasterizer) AddStroke(q Path, width Fix32, cr Capper, jr Joiner) { +func (r *Rasterizer) AddStroke(q Path, width fixed.Int26_6, cr Capper, jr Joiner) { Stroke(r, q, width, cr, jr) } -// Converts an area value to a uint32 alpha value. A completely filled pixel -// corresponds to an area of 256*256*2, and an alpha of 1<<32-1. The +// areaToAlpha converts an area value to a uint32 alpha value. A completely +// filled pixel corresponds to an area of 64*64*2, and an alpha of 0xffff. The // conversion of area values greater than this depends on the winding rule: // even-odd or non-zero. func (r *Rasterizer) areaToAlpha(area int) uint32 { - // The C Freetype implementation (version 2.3.12) does "alpha := area>>1" without - // the +1. Round-to-nearest gives a more symmetric result than round-down. - // The C implementation also returns 8-bit alpha, not 32-bit alpha. + // The C Freetype implementation (version 2.3.12) does "alpha := area>>1" + // without the +1. Round-to-nearest gives a more symmetric result than + // round-down. The C implementation also returns 8-bit alpha, not 16-bit + // alpha. a := (area + 1) >> 1 if a < 0 { a = -a } alpha := uint32(a) if r.UseNonZeroWinding { - if alpha > 0xffff { - alpha = 0xffff + if alpha > 0x0fff { + alpha = 0x0fff } } else { - alpha &= 0x1ffff - if alpha > 0x10000 { - alpha = 0x20000 - alpha - } else if alpha == 0x10000 { - alpha = 0x0ffff + alpha &= 0x1fff + if alpha > 0x1000 { + alpha = 0x2000 - alpha + } else if alpha == 0x1000 { + alpha = 0x0fff } } - alpha |= alpha << 16 - return alpha + // alpha is now in the range [0x0000, 0x0fff]. Convert that 12-bit alpha to + // 16-bit alpha. + return alpha<<4 | alpha>>8 } -// Rasterize converts r's accumulated curves into Spans for p. The Spans -// passed to p are non-overlapping, and sorted by Y and then X. They all -// have non-zero width (and 0 <= X0 < X1 <= r.width) and non-zero A, except -// for the final Span, which has Y, X0, X1 and A all equal to zero. +// Rasterize converts r's accumulated curves into Spans for p. The Spans passed +// to p are non-overlapping, and sorted by Y and then X. They all have non-zero +// width (and 0 <= X0 < X1 <= r.width) and non-zero A, except for the final +// Span, which has Y, X0, X1 and A all equal to zero. func (r *Rasterizer) Rasterize(p Painter) { r.saveCell() s := 0 @@ -487,7 +510,7 @@ func (r *Rasterizer) Rasterize(p Painter) { xi, cover := 0, 0 for c := r.cellIndex[yi]; c != -1; c = r.cell[c].next { if cover != 0 && r.cell[c].xi > xi { - alpha := r.areaToAlpha(cover * 256 * 2) + alpha := r.areaToAlpha(cover * 64 * 2) if alpha != 0 { xi0, xi1 := xi, r.cell[c].xi if xi0 < 0 { @@ -503,7 +526,7 @@ func (r *Rasterizer) Rasterize(p Painter) { } } cover += r.cell[c].cover - alpha := r.areaToAlpha(cover*256*2 - r.cell[c].area) + alpha := r.areaToAlpha(cover*64*2 - r.cell[c].area) xi = r.cell[c].xi + 1 if alpha != 0 { xi0, xi1 := r.cell[c].xi, xi @@ -529,7 +552,7 @@ func (r *Rasterizer) Rasterize(p Painter) { // Clear cancels any previous calls to r.Start or r.AddXxx. func (r *Rasterizer) Clear() { - r.a = Point{} + r.a = fixed.Point26_6{} r.xi = 0 r.yi = 0 r.area = 0 @@ -541,7 +564,7 @@ func (r *Rasterizer) Clear() { } // SetBounds sets the maximum width and height of the rasterized image and -// calls Clear. The width and height are in pixels, not Fix32 units. +// calls Clear. The width and height are in pixels, not fixed.Int26_6 units. func (r *Rasterizer) SetBounds(width, height int) { if width < 0 { width = 0 @@ -549,10 +572,9 @@ func (r *Rasterizer) SetBounds(width, height int) { if height < 0 { height = 0 } - // Use the same ssN heuristic as the C Freetype implementation. - // The C implementation uses the values 32, 16, but those are in - // 26.6 fixed point units, and we use 24.8 fixed point everywhere. - ss2, ss3 := 128, 64 + // Use the same ssN heuristic as the C Freetype (version 2.4.0) + // implementation. + ss2, ss3 := 32, 16 if width > 24 || height > 24 { ss2, ss3 = 2*ss2, 2*ss3 if width > 120 || height > 120 { diff --git a/_third_party/code.google.com/p/freetype-go/freetype/raster/stroke.go b/_third_party/github.com/golang/freetype/raster/stroke.go similarity index 70% rename from _third_party/code.google.com/p/freetype-go/freetype/raster/stroke.go rename to _third_party/github.com/golang/freetype/raster/stroke.go index d49b1cee9d..8d43797573 100644 --- a/_third_party/code.google.com/p/freetype-go/freetype/raster/stroke.go +++ b/_third_party/github.com/golang/freetype/raster/stroke.go @@ -5,21 +5,25 @@ package raster +import ( + "golang.org/x/image/math/fixed" +) + // Two points are considered practically equal if the square of the distance -// between them is less than one quarter (i.e. 16384 / 65536 in Fix64). -const epsilon = 16384 +// between them is less than one quarter (i.e. 1024 / 4096). +const epsilon = fixed.Int52_12(1024) // A Capper signifies how to begin or end a stroked path. type Capper interface { // Cap adds a cap to p given a pivot point and the normal vector of a // terminal segment. The normal's length is half of the stroke width. - Cap(p Adder, halfWidth Fix32, pivot, n1 Point) + Cap(p Adder, halfWidth fixed.Int26_6, pivot, n1 fixed.Point26_6) } // The CapperFunc type adapts an ordinary function to be a Capper. -type CapperFunc func(Adder, Fix32, Point, Point) +type CapperFunc func(Adder, fixed.Int26_6, fixed.Point26_6, fixed.Point26_6) -func (f CapperFunc) Cap(p Adder, halfWidth Fix32, pivot, n1 Point) { +func (f CapperFunc) Cap(p Adder, halfWidth fixed.Int26_6, pivot, n1 fixed.Point26_6) { f(p, halfWidth, pivot, n1) } @@ -28,24 +32,24 @@ type Joiner interface { // Join adds a join to the two sides of a stroked path given a pivot // point and the normal vectors of the trailing and leading segments. // Both normals have length equal to half of the stroke width. - Join(lhs, rhs Adder, halfWidth Fix32, pivot, n0, n1 Point) + Join(lhs, rhs Adder, halfWidth fixed.Int26_6, pivot, n0, n1 fixed.Point26_6) } // The JoinerFunc type adapts an ordinary function to be a Joiner. -type JoinerFunc func(lhs, rhs Adder, halfWidth Fix32, pivot, n0, n1 Point) +type JoinerFunc func(lhs, rhs Adder, halfWidth fixed.Int26_6, pivot, n0, n1 fixed.Point26_6) -func (f JoinerFunc) Join(lhs, rhs Adder, halfWidth Fix32, pivot, n0, n1 Point) { +func (f JoinerFunc) Join(lhs, rhs Adder, halfWidth fixed.Int26_6, pivot, n0, n1 fixed.Point26_6) { f(lhs, rhs, halfWidth, pivot, n0, n1) } // RoundCapper adds round caps to a stroked path. var RoundCapper Capper = CapperFunc(roundCapper) -func roundCapper(p Adder, halfWidth Fix32, pivot, n1 Point) { +func roundCapper(p Adder, halfWidth fixed.Int26_6, pivot, n1 fixed.Point26_6) { // The cubic Bézier approximation to a circle involves the magic number // (√2 - 1) * 4/3, which is approximately 141/256. const k = 141 - e := n1.Rot90CCW() + e := pRot90CCW(n1) side := pivot.Add(e) start, end := pivot.Sub(n1), pivot.Add(n1) d, e := n1.Mul(k), e.Mul(k) @@ -56,15 +60,15 @@ func roundCapper(p Adder, halfWidth Fix32, pivot, n1 Point) { // ButtCapper adds butt caps to a stroked path. var ButtCapper Capper = CapperFunc(buttCapper) -func buttCapper(p Adder, halfWidth Fix32, pivot, n1 Point) { +func buttCapper(p Adder, halfWidth fixed.Int26_6, pivot, n1 fixed.Point26_6) { p.Add1(pivot.Add(n1)) } // SquareCapper adds square caps to a stroked path. var SquareCapper Capper = CapperFunc(squareCapper) -func squareCapper(p Adder, halfWidth Fix32, pivot, n1 Point) { - e := n1.Rot90CCW() +func squareCapper(p Adder, halfWidth fixed.Int26_6, pivot, n1 fixed.Point26_6) { + e := pRot90CCW(n1) side := pivot.Add(e) p.Add1(side.Sub(n1)) p.Add1(side.Add(n1)) @@ -74,50 +78,51 @@ func squareCapper(p Adder, halfWidth Fix32, pivot, n1 Point) { // RoundJoiner adds round joins to a stroked path. var RoundJoiner Joiner = JoinerFunc(roundJoiner) -func roundJoiner(lhs, rhs Adder, haflWidth Fix32, pivot, n0, n1 Point) { - dot := n0.Rot90CW().Dot(n1) +func roundJoiner(lhs, rhs Adder, haflWidth fixed.Int26_6, pivot, n0, n1 fixed.Point26_6) { + dot := pDot(pRot90CW(n0), n1) if dot >= 0 { addArc(lhs, pivot, n0, n1) rhs.Add1(pivot.Sub(n1)) } else { lhs.Add1(pivot.Add(n1)) - addArc(rhs, pivot, n0.Neg(), n1.Neg()) + addArc(rhs, pivot, pNeg(n0), pNeg(n1)) } } // BevelJoiner adds bevel joins to a stroked path. var BevelJoiner Joiner = JoinerFunc(bevelJoiner) -func bevelJoiner(lhs, rhs Adder, haflWidth Fix32, pivot, n0, n1 Point) { +func bevelJoiner(lhs, rhs Adder, haflWidth fixed.Int26_6, pivot, n0, n1 fixed.Point26_6) { lhs.Add1(pivot.Add(n1)) rhs.Add1(pivot.Sub(n1)) } // addArc adds a circular arc from pivot+n0 to pivot+n1 to p. The shorter of -// the two possible arcs is taken, i.e. the one spanning <= 180 degrees. -// The two vectors n0 and n1 must be of equal length. -func addArc(p Adder, pivot, n0, n1 Point) { +// the two possible arcs is taken, i.e. the one spanning <= 180 degrees. The +// two vectors n0 and n1 must be of equal length. +func addArc(p Adder, pivot, n0, n1 fixed.Point26_6) { // r2 is the square of the length of n0. - r2 := n0.Dot(n0) + r2 := pDot(n0, n0) if r2 < epsilon { // The arc radius is so small that we collapse to a straight line. p.Add1(pivot.Add(n1)) return } // We approximate the arc by 0, 1, 2 or 3 45-degree quadratic segments plus - // a final quadratic segment from s to n1. Each 45-degree segment has control - // points {1, 0}, {1, tan(π/8)} and {1/√2, 1/√2} suitably scaled, rotated and - // translated. tan(π/8) is approximately 106/256. + // a final quadratic segment from s to n1. Each 45-degree segment has + // control points {1, 0}, {1, tan(π/8)} and {1/√2, 1/√2} suitably scaled, + // rotated and translated. tan(π/8) is approximately 106/256. const tpo8 = 106 - var s Point - // We determine which octant the angle between n0 and n1 is in via three dot products. - // m0, m1 and m2 are n0 rotated clockwise by 45, 90 and 135 degrees. - m0 := n0.Rot45CW() - m1 := n0.Rot90CW() - m2 := m0.Rot90CW() - if m1.Dot(n1) >= 0 { - if n0.Dot(n1) >= 0 { - if m2.Dot(n1) <= 0 { + var s fixed.Point26_6 + // We determine which octant the angle between n0 and n1 is in via three + // dot products. m0, m1 and m2 are n0 rotated clockwise by 45, 90 and 135 + // degrees. + m0 := pRot45CW(n0) + m1 := pRot90CW(n0) + m2 := pRot90CW(m0) + if pDot(m1, n1) >= 0 { + if pDot(n0, n1) >= 0 { + if pDot(m2, n1) <= 0 { // n1 is between 0 and 45 degrees clockwise of n0. s = n0 } else { @@ -129,7 +134,7 @@ func addArc(p Adder, pivot, n0, n1 Point) { pm1, n0t := pivot.Add(m1), n0.Mul(tpo8) p.Add2(pivot.Add(n0).Add(m1.Mul(tpo8)), pivot.Add(m0)) p.Add2(pm1.Add(n0t), pm1) - if m0.Dot(n1) >= 0 { + if pDot(m0, n1) >= 0 { // n1 is between 90 and 135 degrees clockwise of n0. s = m1 } else { @@ -139,63 +144,66 @@ func addArc(p Adder, pivot, n0, n1 Point) { } } } else { - if n0.Dot(n1) >= 0 { - if m0.Dot(n1) >= 0 { + if pDot(n0, n1) >= 0 { + if pDot(m0, n1) >= 0 { // n1 is between 0 and 45 degrees counter-clockwise of n0. s = n0 } else { // n1 is between 45 and 90 degrees counter-clockwise of n0. p.Add2(pivot.Add(n0).Sub(m1.Mul(tpo8)), pivot.Sub(m2)) - s = m2.Neg() + s = pNeg(m2) } } else { pm1, n0t := pivot.Sub(m1), n0.Mul(tpo8) p.Add2(pivot.Add(n0).Sub(m1.Mul(tpo8)), pivot.Sub(m2)) p.Add2(pm1.Add(n0t), pm1) - if m2.Dot(n1) <= 0 { + if pDot(m2, n1) <= 0 { // n1 is between 90 and 135 degrees counter-clockwise of n0. - s = m1.Neg() + s = pNeg(m1) } else { // n1 is between 135 and 180 degrees counter-clockwise of n0. p.Add2(pm1.Sub(n0t), pivot.Sub(m0)) - s = m0.Neg() + s = pNeg(m0) } } } // The final quadratic segment has two endpoints s and n1 and the middle - // control point is a multiple of s.Add(n1), i.e. it is on the angle bisector - // of those two points. The multiple ranges between 128/256 and 150/256 as - // the angle between s and n1 ranges between 0 and 45 degrees. - // When the angle is 0 degrees (i.e. s and n1 are coincident) then s.Add(n1) - // is twice s and so the middle control point of the degenerate quadratic - // segment should be half s.Add(n1), and half = 128/256. + // control point is a multiple of s.Add(n1), i.e. it is on the angle + // bisector of those two points. The multiple ranges between 128/256 and + // 150/256 as the angle between s and n1 ranges between 0 and 45 degrees. + // + // When the angle is 0 degrees (i.e. s and n1 are coincident) then + // s.Add(n1) is twice s and so the middle control point of the degenerate + // quadratic segment should be half s.Add(n1), and half = 128/256. + // // When the angle is 45 degrees then 150/256 is the ratio of the lengths of // the two vectors {1, tan(π/8)} and {1 + 1/√2, 1/√2}. + // // d is the normalized dot product between s and n1. Since the angle ranges // between 0 and 45 degrees then d ranges between 256/256 and 181/256. - d := 256 * s.Dot(n1) / r2 - multiple := Fix32(150 - 22*(d-181)/(256-181)) + d := 256 * pDot(s, n1) / r2 + multiple := fixed.Int26_6(150-(150-128)*(d-181)/(256-181)) >> 2 p.Add2(pivot.Add(s.Add(n1).Mul(multiple)), pivot.Add(n1)) } // midpoint returns the midpoint of two Points. -func midpoint(a, b Point) Point { - return Point{(a.X + b.X) / 2, (a.Y + b.Y) / 2} +func midpoint(a, b fixed.Point26_6) fixed.Point26_6 { + return fixed.Point26_6{(a.X + b.X) / 2, (a.Y + b.Y) / 2} } // angleGreaterThan45 returns whether the angle between two vectors is more // than 45 degrees. -func angleGreaterThan45(v0, v1 Point) bool { - v := v0.Rot45CCW() - return v.Dot(v1) < 0 || v.Rot90CW().Dot(v1) < 0 +func angleGreaterThan45(v0, v1 fixed.Point26_6) bool { + v := pRot45CCW(v0) + return pDot(v, v1) < 0 || pDot(pRot90CW(v), v1) < 0 } // interpolate returns the point (1-t)*a + t*b. -func interpolate(a, b Point, t Fix64) Point { - s := 65536 - t - x := s*Fix64(a.X) + t*Fix64(b.X) - y := s*Fix64(a.Y) + t*Fix64(b.Y) - return Point{Fix32(x >> 16), Fix32(y >> 16)} +func interpolate(a, b fixed.Point26_6, t fixed.Int52_12) fixed.Point26_6 { + s := 1<<12 - t + x := s*fixed.Int52_12(a.X) + t*fixed.Int52_12(b.X) + y := s*fixed.Int52_12(a.Y) + t*fixed.Int52_12(b.Y) + return fixed.Point26_6{fixed.Int26_6(x >> 12), fixed.Int26_6(y >> 12)} } // curviest2 returns the value of t for which the quadratic parametric curve @@ -212,15 +220,15 @@ func interpolate(a, b Point, t Fix64) Point { // (x′²+y′²) is extreme. The first order condition is that // 2*x′*x″+2*y′*y″ = 0, or (dx+ex*t)*ex + (dy+ey*t)*ey = 0. // Solving for t gives t = -(dx*ex+dy*ey) / (ex*ex+ey*ey). -func curviest2(a, b, c Point) Fix64 { +func curviest2(a, b, c fixed.Point26_6) fixed.Int52_12 { dx := int64(b.X - a.X) dy := int64(b.Y - a.Y) ex := int64(c.X - 2*b.X + a.X) ey := int64(c.Y - 2*b.Y + a.Y) if ex == 0 && ey == 0 { - return 32768 + return 2048 } - return Fix64(-65536 * (dx*ex + dy*ey) / (ex*ex + ey*ey)) + return fixed.Int52_12(-4096 * (dx*ex + dy*ey) / (ex*ex + ey*ey)) } // A stroker holds state for stroking a path. @@ -228,7 +236,7 @@ type stroker struct { // p is the destination that records the stroked path. p Adder // u is the half-width of the stroke. - u Fix32 + u fixed.Int26_6 // cr and jr specify how to end and connect path segments. cr Capper jr Joiner @@ -238,19 +246,19 @@ type stroker struct { r Path // a is the most recent segment point. anorm is the segment normal of // length u at that point. - a, anorm Point + a, anorm fixed.Point26_6 } // addNonCurvy2 adds a quadratic segment to the stroker, where the segment // defined by (k.a, b, c) achieves maximum curvature at either k.a or c. -func (k *stroker) addNonCurvy2(b, c Point) { +func (k *stroker) addNonCurvy2(b, c fixed.Point26_6) { // We repeatedly divide the segment at its middle until it is straight // enough to approximate the stroke by just translating the control points. // ds and ps are stacks of depths and points. t is the top of the stack. const maxDepth = 5 var ( ds [maxDepth + 1]int - ps [2*maxDepth + 3]Point + ps [2*maxDepth + 3]fixed.Point26_6 t int ) // Initially the ps stack has one quadratic segment of depth zero. @@ -259,7 +267,7 @@ func (k *stroker) addNonCurvy2(b, c Point) { ps[1] = b ps[0] = c anorm := k.anorm - var cnorm Point + var cnorm fixed.Point26_6 for { depth := ds[t] @@ -268,14 +276,14 @@ func (k *stroker) addNonCurvy2(b, c Point) { c := ps[2*t+0] ab := b.Sub(a) bc := c.Sub(b) - abIsSmall := ab.Dot(ab) < Fix64(1<<16) - bcIsSmall := bc.Dot(bc) < Fix64(1<<16) + abIsSmall := pDot(ab, ab) < fixed.Int52_12(1<<12) + bcIsSmall := pDot(bc, bc) < fixed.Int52_12(1<<12) if abIsSmall && bcIsSmall { // Approximate the segment by a circular arc. - cnorm = bc.Norm(k.u).Rot90CCW() + cnorm = pRot90CCW(pNorm(bc, k.u)) mac := midpoint(a, c) addArc(k.p, mac, anorm, cnorm) - addArc(&k.r, mac, anorm.Neg(), cnorm.Neg()) + addArc(&k.r, mac, pNeg(anorm), pNeg(cnorm)) } else if depth < maxDepth && angleGreaterThan45(ab, bc) { // Divide the segment in two and push both halves on the stack. mab := midpoint(a, b) @@ -290,8 +298,8 @@ func (k *stroker) addNonCurvy2(b, c Point) { continue } else { // Translate the control points. - bnorm := c.Sub(a).Norm(k.u).Rot90CCW() - cnorm = bc.Norm(k.u).Rot90CCW() + bnorm := pRot90CCW(pNorm(c.Sub(a), k.u)) + cnorm = pRot90CCW(pNorm(bc, k.u)) k.p.Add2(b.Add(bnorm), c.Add(cnorm)) k.r.Add2(b.Sub(bnorm), c.Sub(cnorm)) } @@ -306,8 +314,8 @@ func (k *stroker) addNonCurvy2(b, c Point) { } // Add1 adds a linear segment to the stroker. -func (k *stroker) Add1(b Point) { - bnorm := b.Sub(k.a).Norm(k.u).Rot90CCW() +func (k *stroker) Add1(b fixed.Point26_6) { + bnorm := pRot90CCW(pNorm(b.Sub(k.a), k.u)) if len(k.r) == 0 { k.p.Start(k.a.Add(bnorm)) k.r.Start(k.a.Sub(bnorm)) @@ -320,10 +328,10 @@ func (k *stroker) Add1(b Point) { } // Add2 adds a quadratic segment to the stroker. -func (k *stroker) Add2(b, c Point) { +func (k *stroker) Add2(b, c fixed.Point26_6) { ab := b.Sub(k.a) bc := c.Sub(b) - abnorm := ab.Norm(k.u).Rot90CCW() + abnorm := pRot90CCW(pNorm(ab, k.u)) if len(k.r) == 0 { k.p.Start(k.a.Add(abnorm)) k.r.Start(k.a.Sub(abnorm)) @@ -332,10 +340,10 @@ func (k *stroker) Add2(b, c Point) { } // Approximate nearly-degenerate quadratics by linear segments. - abIsSmall := ab.Dot(ab) < epsilon - bcIsSmall := bc.Dot(bc) < epsilon + abIsSmall := pDot(ab, ab) < epsilon + bcIsSmall := pDot(bc, bc) < epsilon if abIsSmall || bcIsSmall { - acnorm := c.Sub(k.a).Norm(k.u).Rot90CCW() + acnorm := pRot90CCW(pNorm(c.Sub(k.a), k.u)) k.p.Add1(c.Add(acnorm)) k.r.Add1(c.Sub(acnorm)) k.a, k.anorm = c, acnorm @@ -345,7 +353,7 @@ func (k *stroker) Add2(b, c Point) { // The quadratic segment (k.a, b, c) has a point of maximum curvature. // If this occurs at an end point, we process the segment as a whole. t := curviest2(k.a, b, c) - if t <= 0 || t >= 65536 { + if t <= 0 || 4096 <= t { k.addNonCurvy2(b, c) return } @@ -359,13 +367,13 @@ func (k *stroker) Add2(b, c Point) { // If the vectors ab and bc are close to being in opposite directions, // then the decomposition can become unstable, so we approximate the // quadratic segment by two linear segments joined by an arc. - bcnorm := bc.Norm(k.u).Rot90CCW() - if abnorm.Dot(bcnorm) < -Fix64(k.u)*Fix64(k.u)*2047/2048 { - pArc := abnorm.Dot(bc) < 0 + bcnorm := pRot90CCW(pNorm(bc, k.u)) + if pDot(abnorm, bcnorm) < -fixed.Int52_12(k.u)*fixed.Int52_12(k.u)*2047/2048 { + pArc := pDot(abnorm, bc) < 0 k.p.Add1(mabc.Add(abnorm)) if pArc { - z := abnorm.Rot90CW() + z := pRot90CW(abnorm) addArc(k.p, mabc, abnorm, z) addArc(k.p, mabc, z, bcnorm) } @@ -374,9 +382,9 @@ func (k *stroker) Add2(b, c Point) { k.r.Add1(mabc.Sub(abnorm)) if !pArc { - z := abnorm.Rot90CW() - addArc(&k.r, mabc, abnorm.Neg(), z) - addArc(&k.r, mabc, z, bcnorm.Neg()) + z := pRot90CW(abnorm) + addArc(&k.r, mabc, pNeg(abnorm), z) + addArc(&k.r, mabc, z, pNeg(bcnorm)) } k.r.Add1(mabc.Sub(bcnorm)) k.r.Add1(c.Sub(bcnorm)) @@ -391,7 +399,7 @@ func (k *stroker) Add2(b, c Point) { } // Add3 adds a cubic segment to the stroker. -func (k *stroker) Add3(b, c, d Point) { +func (k *stroker) Add3(b, c, d fixed.Point26_6) { panic("freetype/raster: stroke unimplemented for cubic segments") } @@ -402,17 +410,26 @@ func (k *stroker) stroke(q Path) { // path is accumulated in k.r. Once we've finished adding the LHS to k.p, // we add the RHS in reverse order. k.r = make(Path, 0, len(q)) - k.a = Point{q[1], q[2]} + k.a = fixed.Point26_6{q[1], q[2]} for i := 4; i < len(q); { switch q[i] { case 1: - k.Add1(Point{q[i+1], q[i+2]}) + k.Add1( + fixed.Point26_6{q[i+1], q[i+2]}, + ) i += 4 case 2: - k.Add2(Point{q[i+1], q[i+2]}, Point{q[i+3], q[i+4]}) + k.Add2( + fixed.Point26_6{q[i+1], q[i+2]}, + fixed.Point26_6{q[i+3], q[i+4]}, + ) i += 6 case 3: - k.Add3(Point{q[i+1], q[i+2]}, Point{q[i+3], q[i+4]}, Point{q[i+5], q[i+6]}) + k.Add3( + fixed.Point26_6{q[i+1], q[i+2]}, + fixed.Point26_6{q[i+3], q[i+4]}, + fixed.Point26_6{q[i+5], q[i+6]}, + ) i += 8 default: panic("freetype/raster: bad path") @@ -423,16 +440,16 @@ func (k *stroker) stroke(q Path) { } // TODO(nigeltao): if q is a closed curve then we should join the first and // last segments instead of capping them. - k.cr.Cap(k.p, k.u, q.lastPoint(), k.anorm.Neg()) + k.cr.Cap(k.p, k.u, q.lastPoint(), pNeg(k.anorm)) addPathReversed(k.p, k.r) pivot := q.firstPoint() - k.cr.Cap(k.p, k.u, pivot, pivot.Sub(Point{k.r[1], k.r[2]})) + k.cr.Cap(k.p, k.u, pivot, pivot.Sub(fixed.Point26_6{k.r[1], k.r[2]})) } // Stroke adds q stroked with the given width to p. The result is typically // self-intersecting and should be rasterized with UseNonZeroWinding. // cr and jr may be nil, which defaults to a RoundCapper or RoundJoiner. -func Stroke(p Adder, q Path, width Fix32, cr Capper, jr Joiner) { +func Stroke(p Adder, q Path, width fixed.Int26_6, cr Capper, jr Joiner) { if len(q) == 0 { return } diff --git a/_third_party/github.com/golang/freetype/truetype/face.go b/_third_party/github.com/golang/freetype/truetype/face.go new file mode 100644 index 0000000000..0f056e6872 --- /dev/null +++ b/_third_party/github.com/golang/freetype/truetype/face.go @@ -0,0 +1,495 @@ +// Copyright 2015 The Freetype-Go Authors. All rights reserved. +// Use of this source code is governed by your choice of either the +// FreeType License or the GNU General Public License version 2 (or +// any later version), both of which can be found in the LICENSE file. + +package truetype + +import ( + "image" + + "bosun.org/_third_party/github.com/golang/freetype/raster" + "golang.org/x/image/font" + "golang.org/x/image/math/fixed" +) + +func powerOf2(i int) bool { + return i != 0 && (i&(i-1)) == 0 +} + +// Options are optional arguments to NewFace. +type Options struct { + // Size is the font size in points, as in "a 10 point font size". + // + // A zero value means to use a 12 point font size. + Size float64 + + // DPI is the dots-per-inch resolution. + // + // A zero value means to use 72 DPI. + DPI float64 + + // Hinting is how to quantize the glyph nodes. + // + // A zero value means to use no hinting. + Hinting font.Hinting + + // GlyphCacheEntries is the number of entries in the glyph mask image + // cache. + // + // If non-zero, it must be a power of 2. + // + // A zero value means to use 512 entries. + GlyphCacheEntries int + + // SubPixelsX is the number of sub-pixel locations a glyph's dot is + // quantized to, in the horizontal direction. For example, a value of 8 + // means that the dot is quantized to 1/8th of a pixel. This quantization + // only affects the glyph mask image, not its bounding box or advance + // width. A higher value gives a more faithful glyph image, but reduces the + // effectiveness of the glyph cache. + // + // If non-zero, it must be a power of 2, and be between 1 and 64 inclusive. + // + // A zero value means to use 4 sub-pixel locations. + SubPixelsX int + + // SubPixelsY is the number of sub-pixel locations a glyph's dot is + // quantized to, in the vertical direction. For example, a value of 8 + // means that the dot is quantized to 1/8th of a pixel. This quantization + // only affects the glyph mask image, not its bounding box or advance + // width. A higher value gives a more faithful glyph image, but reduces the + // effectiveness of the glyph cache. + // + // If non-zero, it must be a power of 2, and be between 1 and 64 inclusive. + // + // A zero value means to use 1 sub-pixel location. + SubPixelsY int +} + +func (o *Options) size() float64 { + if o != nil && o.Size > 0 { + return o.Size + } + return 12 +} + +func (o *Options) dpi() float64 { + if o != nil && o.DPI > 0 { + return o.DPI + } + return 72 +} + +func (o *Options) hinting() font.Hinting { + if o != nil { + switch o.Hinting { + case font.HintingVertical, font.HintingFull: + // TODO: support vertical hinting. + return font.HintingFull + } + } + return font.HintingNone +} + +func (o *Options) glyphCacheEntries() int { + if o != nil && powerOf2(o.GlyphCacheEntries) { + return o.GlyphCacheEntries + } + // 512 is 128 * 4 * 1, which lets us cache 128 glyphs at 4 * 1 subpixel + // locations in the X and Y direction. + return 512 +} + +func (o *Options) subPixelsX() (value uint32, halfQuantum, mask fixed.Int26_6) { + if o != nil { + switch o.SubPixelsX { + case 1, 2, 4, 8, 16, 32, 64: + return subPixels(o.SubPixelsX) + } + } + // This default value of 4 isn't based on anything scientific, merely as + // small a number as possible that looks almost as good as no quantization, + // or returning subPixels(64). + return subPixels(4) +} + +func (o *Options) subPixelsY() (value uint32, halfQuantum, mask fixed.Int26_6) { + if o != nil { + switch o.SubPixelsX { + case 1, 2, 4, 8, 16, 32, 64: + return subPixels(o.SubPixelsX) + } + } + // This default value of 1 isn't based on anything scientific, merely that + // vertical sub-pixel glyph rendering is pretty rare. Baseline locations + // can usually afford to snap to the pixel grid, so the vertical direction + // doesn't have the deal with the horizontal's fractional advance widths. + return subPixels(1) +} + +// subPixels returns q and the bias and mask that leads to q quantized +// sub-pixel locations per full pixel. +// +// For example, q == 4 leads to a bias of 8 and a mask of 0xfffffff0, or -16, +// because we want to round fractions of fixed.Int26_6 as: +// - 0 to 7 rounds to 0. +// - 8 to 23 rounds to 16. +// - 24 to 39 rounds to 32. +// - 40 to 55 rounds to 48. +// - 56 to 63 rounds to 64. +// which means to add 8 and then bitwise-and with -16, in two's complement +// representation. +// +// When q == 1, we want bias == 32 and mask == -64. +// When q == 2, we want bias == 16 and mask == -32. +// When q == 4, we want bias == 8 and mask == -16. +// ... +// When q == 64, we want bias == 0 and mask == -1. (The no-op case). +// The pattern is clear. +func subPixels(q int) (value uint32, bias, mask fixed.Int26_6) { + return uint32(q), 32 / fixed.Int26_6(q), -64 / fixed.Int26_6(q) +} + +// glyphCacheEntry caches the arguments and return values of rasterize. +type glyphCacheEntry struct { + key glyphCacheKey + val glyphCacheVal +} + +type glyphCacheKey struct { + index Index + fx, fy uint8 +} + +type glyphCacheVal struct { + advanceWidth fixed.Int26_6 + offset image.Point + gw int + gh int +} + +type indexCacheEntry struct { + rune rune + index Index +} + +// NewFace returns a new font.Face for the given Font. +func NewFace(f *Font, opts *Options) font.Face { + a := &face{ + f: f, + hinting: opts.hinting(), + scale: fixed.Int26_6(0.5 + (opts.size() * opts.dpi() * 64 / 72)), + glyphCache: make([]glyphCacheEntry, opts.glyphCacheEntries()), + } + a.subPixelX, a.subPixelBiasX, a.subPixelMaskX = opts.subPixelsX() + a.subPixelY, a.subPixelBiasY, a.subPixelMaskY = opts.subPixelsY() + + // Fill the cache with invalid entries. Valid glyph cache entries have fx + // and fy in the range [0, 64). Valid index cache entries have rune >= 0. + for i := range a.glyphCache { + a.glyphCache[i].key.fy = 0xff + } + for i := range a.indexCache { + a.indexCache[i].rune = -1 + } + + // Set the rasterizer's bounds to be big enough to handle the largest glyph. + b := f.Bounds(a.scale) + xmin := +int(b.Min.X) >> 6 + ymin := -int(b.Max.Y) >> 6 + xmax := +int(b.Max.X+63) >> 6 + ymax := -int(b.Min.Y-63) >> 6 + a.maxw = xmax - xmin + a.maxh = ymax - ymin + a.masks = image.NewAlpha(image.Rect(0, 0, a.maxw, a.maxh*len(a.glyphCache))) + a.r.SetBounds(a.maxw, a.maxh) + a.p = facePainter{a} + + return a +} + +type face struct { + f *Font + hinting font.Hinting + scale fixed.Int26_6 + subPixelX uint32 + subPixelBiasX fixed.Int26_6 + subPixelMaskX fixed.Int26_6 + subPixelY uint32 + subPixelBiasY fixed.Int26_6 + subPixelMaskY fixed.Int26_6 + masks *image.Alpha + glyphCache []glyphCacheEntry + r raster.Rasterizer + p raster.Painter + paintOffset int + maxw int + maxh int + glyphBuf GlyphBuf + indexCache [indexCacheLen]indexCacheEntry + + // TODO: clip rectangle? +} + +const indexCacheLen = 256 + +func (a *face) index(r rune) Index { + const mask = indexCacheLen - 1 + c := &a.indexCache[r&mask] + if c.rune == r { + return c.index + } + i := a.f.Index(r) + c.rune = r + c.index = i + return i +} + +// Close satisfies the font.Face interface. +func (a *face) Close() error { return nil } + +// Kern satisfies the font.Face interface. +func (a *face) Kern(r0, r1 rune) fixed.Int26_6 { + i0 := a.index(r0) + i1 := a.index(r1) + kern := a.f.Kern(a.scale, i0, i1) + if a.hinting != font.HintingNone { + kern = (kern + 32) &^ 63 + } + return kern +} + +// Glyph satisfies the font.Face interface. +func (a *face) Glyph(dot fixed.Point26_6, r rune) ( + dr image.Rectangle, mask image.Image, maskp image.Point, advance fixed.Int26_6, ok bool) { + + // Quantize to the sub-pixel granularity. + dotX := (dot.X + a.subPixelBiasX) & a.subPixelMaskX + dotY := (dot.Y + a.subPixelBiasY) & a.subPixelMaskY + + // Split the coordinates into their integer and fractional parts. + ix, fx := int(dotX>>6), dotX&0x3f + iy, fy := int(dotY>>6), dotY&0x3f + + index := a.index(r) + cIndex := uint32(index) + cIndex = cIndex*a.subPixelX - uint32(fx/a.subPixelMaskX) + cIndex = cIndex*a.subPixelY - uint32(fy/a.subPixelMaskY) + cIndex &= uint32(len(a.glyphCache) - 1) + a.paintOffset = a.maxh * int(cIndex) + k := glyphCacheKey{ + index: index, + fx: uint8(fx), + fy: uint8(fy), + } + var v glyphCacheVal + if a.glyphCache[cIndex].key != k { + var ok bool + v, ok = a.rasterize(index, fx, fy) + if !ok { + return image.Rectangle{}, nil, image.Point{}, 0, false + } + a.glyphCache[cIndex] = glyphCacheEntry{k, v} + } else { + v = a.glyphCache[cIndex].val + } + + dr.Min = image.Point{ + X: ix + v.offset.X, + Y: iy + v.offset.Y, + } + dr.Max = image.Point{ + X: dr.Min.X + v.gw, + Y: dr.Min.Y + v.gh, + } + return dr, a.masks, image.Point{Y: a.paintOffset}, v.advanceWidth, true +} + +func (a *face) GlyphBounds(r rune) (bounds fixed.Rectangle26_6, advance fixed.Int26_6, ok bool) { + if err := a.glyphBuf.Load(a.f, a.scale, a.index(r), a.hinting); err != nil { + return fixed.Rectangle26_6{}, 0, false + } + xmin := +a.glyphBuf.Bounds.Min.X + ymin := -a.glyphBuf.Bounds.Max.Y + xmax := +a.glyphBuf.Bounds.Max.X + ymax := -a.glyphBuf.Bounds.Min.Y + if xmin > xmax || ymin > ymax { + return fixed.Rectangle26_6{}, 0, false + } + return fixed.Rectangle26_6{ + Min: fixed.Point26_6{ + X: xmin, + Y: ymin, + }, + Max: fixed.Point26_6{ + X: xmax, + Y: ymax, + }, + }, a.glyphBuf.AdvanceWidth, true +} + +func (a *face) GlyphAdvance(r rune) (advance fixed.Int26_6, ok bool) { + if err := a.glyphBuf.Load(a.f, a.scale, a.index(r), a.hinting); err != nil { + return 0, false + } + return a.glyphBuf.AdvanceWidth, true +} + +// rasterize returns the advance width, integer-pixel offset to render at, and +// the width and height of the given glyph at the given sub-pixel offsets. +// +// The 26.6 fixed point arguments fx and fy must be in the range [0, 1). +func (a *face) rasterize(index Index, fx, fy fixed.Int26_6) (v glyphCacheVal, ok bool) { + if err := a.glyphBuf.Load(a.f, a.scale, index, a.hinting); err != nil { + return glyphCacheVal{}, false + } + // Calculate the integer-pixel bounds for the glyph. + xmin := int(fx+a.glyphBuf.Bounds.Min.X) >> 6 + ymin := int(fy-a.glyphBuf.Bounds.Max.Y) >> 6 + xmax := int(fx+a.glyphBuf.Bounds.Max.X+0x3f) >> 6 + ymax := int(fy-a.glyphBuf.Bounds.Min.Y+0x3f) >> 6 + if xmin > xmax || ymin > ymax { + return glyphCacheVal{}, false + } + // A TrueType's glyph's nodes can have negative co-ordinates, but the + // rasterizer clips anything left of x=0 or above y=0. xmin and ymin are + // the pixel offsets, based on the font's FUnit metrics, that let a + // negative co-ordinate in TrueType space be non-negative in rasterizer + // space. xmin and ymin are typically <= 0. + fx -= fixed.Int26_6(xmin << 6) + fy -= fixed.Int26_6(ymin << 6) + // Rasterize the glyph's vectors. + a.r.Clear() + pixOffset := a.paintOffset * a.maxw + clear(a.masks.Pix[pixOffset : pixOffset+a.maxw*a.maxh]) + e0 := 0 + for _, e1 := range a.glyphBuf.Ends { + a.drawContour(a.glyphBuf.Points[e0:e1], fx, fy) + e0 = e1 + } + a.r.Rasterize(a.p) + return glyphCacheVal{ + a.glyphBuf.AdvanceWidth, + image.Point{xmin, ymin}, + xmax - xmin, + ymax - ymin, + }, true +} + +func clear(pix []byte) { + for i := range pix { + pix[i] = 0 + } +} + +// drawContour draws the given closed contour with the given offset. +func (a *face) drawContour(ps []Point, dx, dy fixed.Int26_6) { + if len(ps) == 0 { + return + } + + // The low bit of each point's Flags value is whether the point is on the + // curve. Truetype fonts only have quadratic Bézier curves, not cubics. + // Thus, two consecutive off-curve points imply an on-curve point in the + // middle of those two. + // + // See http://chanae.walon.org/pub/ttf/ttf_glyphs.htm for more details. + + // ps[0] is a truetype.Point measured in FUnits and positive Y going + // upwards. start is the same thing measured in fixed point units and + // positive Y going downwards, and offset by (dx, dy). + start := fixed.Point26_6{ + X: dx + ps[0].X, + Y: dy - ps[0].Y, + } + var others []Point + if ps[0].Flags&0x01 != 0 { + others = ps[1:] + } else { + last := fixed.Point26_6{ + X: dx + ps[len(ps)-1].X, + Y: dy - ps[len(ps)-1].Y, + } + if ps[len(ps)-1].Flags&0x01 != 0 { + start = last + others = ps[:len(ps)-1] + } else { + start = fixed.Point26_6{ + X: (start.X + last.X) / 2, + Y: (start.Y + last.Y) / 2, + } + others = ps + } + } + a.r.Start(start) + q0, on0 := start, true + for _, p := range others { + q := fixed.Point26_6{ + X: dx + p.X, + Y: dy - p.Y, + } + on := p.Flags&0x01 != 0 + if on { + if on0 { + a.r.Add1(q) + } else { + a.r.Add2(q0, q) + } + } else { + if on0 { + // No-op. + } else { + mid := fixed.Point26_6{ + X: (q0.X + q.X) / 2, + Y: (q0.Y + q.Y) / 2, + } + a.r.Add2(q0, mid) + } + } + q0, on0 = q, on + } + // Close the curve. + if on0 { + a.r.Add1(start) + } else { + a.r.Add2(q0, start) + } +} + +// facePainter is like a raster.AlphaSrcPainter, with an additional Y offset +// (face.paintOffset) to the painted spans. +type facePainter struct { + a *face +} + +func (p facePainter) Paint(ss []raster.Span, done bool) { + m := p.a.masks + b := m.Bounds() + b.Min.Y = p.a.paintOffset + b.Max.Y = p.a.paintOffset + p.a.maxh + for _, s := range ss { + s.Y += p.a.paintOffset + if s.Y < b.Min.Y { + continue + } + if s.Y >= b.Max.Y { + return + } + if s.X0 < b.Min.X { + s.X0 = b.Min.X + } + if s.X1 > b.Max.X { + s.X1 = b.Max.X + } + if s.X0 >= s.X1 { + continue + } + base := (s.Y-m.Rect.Min.Y)*m.Stride - m.Rect.Min.X + p := m.Pix[base+s.X0 : base+s.X1] + color := uint8(s.Alpha >> 8) + for i := range p { + p[i] = color + } + } +} diff --git a/_third_party/github.com/golang/freetype/truetype/face_test.go b/_third_party/github.com/golang/freetype/truetype/face_test.go new file mode 100644 index 0000000000..856581dff4 --- /dev/null +++ b/_third_party/github.com/golang/freetype/truetype/face_test.go @@ -0,0 +1,48 @@ +// Copyright 2015 The Freetype-Go Authors. All rights reserved. +// Use of this source code is governed by your choice of either the +// FreeType License or the GNU General Public License version 2 (or +// any later version), both of which can be found in the LICENSE file. + +package truetype + +import ( + "image" + "image/draw" + "io/ioutil" + "strings" + "testing" + + "golang.org/x/image/font" + "golang.org/x/image/math/fixed" +) + +func BenchmarkDrawString(b *testing.B) { + data, err := ioutil.ReadFile("../licenses/gpl.txt") + if err != nil { + b.Fatal(err) + } + lines := strings.Split(string(data), "\n") + data, err = ioutil.ReadFile("../testdata/luxisr.ttf") + if err != nil { + b.Fatal(err) + } + f, err := Parse(data) + if err != nil { + b.Fatal(err) + } + dst := image.NewRGBA(image.Rect(0, 0, 800, 600)) + draw.Draw(dst, dst.Bounds(), image.White, image.ZP, draw.Src) + d := &font.Drawer{ + Dst: dst, + Src: image.Black, + Face: NewFace(f, nil), + } + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j, line := range lines { + d.Dot = fixed.P(0, (j*16)%600) + d.DrawString(line) + } + } +} diff --git a/_third_party/code.google.com/p/freetype-go/freetype/truetype/glyph.go b/_third_party/github.com/golang/freetype/truetype/glyph.go similarity index 63% rename from _third_party/code.google.com/p/freetype-go/freetype/truetype/glyph.go rename to _third_party/github.com/golang/freetype/truetype/glyph.go index b5f3278510..c2935a58eb 100644 --- a/_third_party/code.google.com/p/freetype-go/freetype/truetype/glyph.go +++ b/_third_party/github.com/golang/freetype/truetype/glyph.go @@ -5,23 +5,18 @@ package truetype -// Hinting is the policy for snapping a glyph's contours to pixel boundaries. -type Hinting int32 - -const ( - // NoHinting means to not perform any hinting. - NoHinting Hinting = iota - // FullHinting means to use the font's hinting instructions. - FullHinting - - // TODO: implement VerticalHinting. +import ( + "golang.org/x/image/font" + "golang.org/x/image/math/fixed" ) -// A Point is a co-ordinate pair plus whether it is ``on'' a contour or an -// ``off'' control point. +// TODO: implement VerticalHinting. + +// A Point is a co-ordinate pair plus whether it is 'on' a contour or an 'off' +// control point. type Point struct { - X, Y int32 - // The Flags' LSB means whether or not this Point is ``on'' the contour. + X, Y fixed.Int26_6 + // The Flags' LSB means whether or not this Point is 'on' the contour. // Other bits are reserved for internal use. Flags uint32 } @@ -30,23 +25,23 @@ type Point struct { // series of glyphs from a Font. type GlyphBuf struct { // AdvanceWidth is the glyph's advance width. - AdvanceWidth int32 - // B is the glyph's bounding box. - B Bounds - // Point contains all Points from all contours of the glyph. If - // hinting was used to load a glyph then Unhinted contains those - // Points before they were hinted, and InFontUnits contains those - // Points before they were hinted and scaled. - Point, Unhinted, InFontUnits []Point - // End is the point indexes of the end point of each countour. The - // length of End is the number of contours in the glyph. The i'th - // contour consists of points Point[End[i-1]:End[i]], where End[-1] - // is interpreted to mean zero. - End []int + AdvanceWidth fixed.Int26_6 + // Bounds is the glyph's bounding box. + Bounds fixed.Rectangle26_6 + // Points contains all Points from all contours of the glyph. If hinting + // was used to load a glyph then Unhinted contains those Points before they + // were hinted, and InFontUnits contains those Points before they were + // hinted and scaled. + Points, Unhinted, InFontUnits []Point + // Ends is the point indexes of the end point of each contour. The length + // of Ends is the number of contours in the glyph. The i'th contour + // consists of points Points[Ends[i-1]:Ends[i]], where Ends[-1] is + // interpreted to mean zero. + Ends []int font *Font - scale int32 - hinting Hinting + scale fixed.Int26_6 + hinting font.Hinting hinter hinter // phantomPoints are the co-ordinates of the synthetic phantom points // used for hinting and bounding box calculations. @@ -54,7 +49,7 @@ type GlyphBuf struct { // pp1x is the X co-ordinate of the first phantom point. The '1' is // using 1-based indexing; pp1x is almost always phantomPoints[0].X. // TODO: eliminate this and consistently use phantomPoints[0].X. - pp1x int32 + pp1x fixed.Int26_6 // metricsSet is whether the glyph's metrics have been set yet. For a // compound glyph, a sub-glyph may override the outer glyph's metrics. metricsSet bool @@ -84,14 +79,14 @@ const ( flagThisYIsSame = flagPositiveYShortVector ) -// Load loads a glyph's contours from a Font, overwriting any previously -// loaded contours for this GlyphBuf. scale is the number of 26.6 fixed point -// units in 1 em, i is the glyph index, and h is the hinting policy. -func (g *GlyphBuf) Load(f *Font, scale int32, i Index, h Hinting) error { - g.Point = g.Point[:0] +// Load loads a glyph's contours from a Font, overwriting any previously loaded +// contours for this GlyphBuf. scale is the number of 26.6 fixed point units in +// 1 em, i is the glyph index, and h is the hinting policy. +func (g *GlyphBuf) Load(f *Font, scale fixed.Int26_6, i Index, h font.Hinting) error { + g.Points = g.Points[:0] g.Unhinted = g.Unhinted[:0] g.InFontUnits = g.InFontUnits[:0] - g.End = g.End[:0] + g.Ends = g.Ends[:0] g.font = f g.hinting = h g.scale = scale @@ -99,7 +94,7 @@ func (g *GlyphBuf) Load(f *Font, scale int32, i Index, h Hinting) error { g.phantomPoints = [4]Point{} g.metricsSet = false - if h != NoHinting { + if h != font.HintingNone { if err := g.hinter.init(f, scale); err != nil { return err } @@ -111,22 +106,22 @@ func (g *GlyphBuf) Load(f *Font, scale int32, i Index, h Hinting) error { // and should be cleaned up once we have all the testScaling tests passing, // plus additional tests for Freetype-Go's bounding boxes matching C Freetype's. pp1x := g.pp1x - if h != NoHinting { + if h != font.HintingNone { pp1x = g.phantomPoints[0].X } if pp1x != 0 { - for i := range g.Point { - g.Point[i].X -= pp1x + for i := range g.Points { + g.Points[i].X -= pp1x } } advanceWidth := g.phantomPoints[1].X - g.phantomPoints[0].X - if h != NoHinting { + if h != font.HintingNone { if len(f.hdmx) >= 8 { if n := u32(f.hdmx, 4); n > 3+uint32(i) { for hdmx := f.hdmx[8:]; uint32(len(hdmx)) >= n; hdmx = hdmx[n:] { - if int32(hdmx[0]) == scale>>6 { - advanceWidth = int32(hdmx[2+i]) << 6 + if fixed.Int26_6(hdmx[0]) == scale>>6 { + advanceWidth = fixed.Int26_6(hdmx[2+i]) << 6 break } } @@ -136,46 +131,46 @@ func (g *GlyphBuf) Load(f *Font, scale int32, i Index, h Hinting) error { } g.AdvanceWidth = advanceWidth - // Set g.B to the 'control box', which is the bounding box of the Bézier - // curves' control points. This is easier to calculate, no smaller than - // and often equal to the tightest possible bounding box of the curves + // Set g.Bounds to the 'control box', which is the bounding box of the + // Bézier curves' control points. This is easier to calculate, no smaller + // than and often equal to the tightest possible bounding box of the curves // themselves. This approach is what C Freetype does. We can't just scale // the nominal bounding box in the glyf data as the hinting process and // phantom point adjustment may move points outside of that box. - if len(g.Point) == 0 { - g.B = Bounds{} + if len(g.Points) == 0 { + g.Bounds = fixed.Rectangle26_6{} } else { - p := g.Point[0] - g.B.XMin = p.X - g.B.XMax = p.X - g.B.YMin = p.Y - g.B.YMax = p.Y - for _, p := range g.Point[1:] { - if g.B.XMin > p.X { - g.B.XMin = p.X - } else if g.B.XMax < p.X { - g.B.XMax = p.X + p := g.Points[0] + g.Bounds.Min.X = p.X + g.Bounds.Max.X = p.X + g.Bounds.Min.Y = p.Y + g.Bounds.Max.Y = p.Y + for _, p := range g.Points[1:] { + if g.Bounds.Min.X > p.X { + g.Bounds.Min.X = p.X + } else if g.Bounds.Max.X < p.X { + g.Bounds.Max.X = p.X } - if g.B.YMin > p.Y { - g.B.YMin = p.Y - } else if g.B.YMax < p.Y { - g.B.YMax = p.Y + if g.Bounds.Min.Y > p.Y { + g.Bounds.Min.Y = p.Y + } else if g.Bounds.Max.Y < p.Y { + g.Bounds.Max.Y = p.Y } } // Snap the box to the grid, if hinting is on. - if h != NoHinting { - g.B.XMin &^= 63 - g.B.YMin &^= 63 - g.B.XMax += 63 - g.B.XMax &^= 63 - g.B.YMax += 63 - g.B.YMax &^= 63 + if h != font.HintingNone { + g.Bounds.Min.X &^= 63 + g.Bounds.Min.Y &^= 63 + g.Bounds.Max.X += 63 + g.Bounds.Max.X &^= 63 + g.Bounds.Max.Y += 63 + g.Bounds.Max.Y &^= 63 } } return nil } -func (g *GlyphBuf) load(recursion int32, i Index, useMyMetrics bool) (err error) { +func (g *GlyphBuf) load(recursion uint32, i Index, useMyMetrics bool) (err error) { // The recursion limit here is arbitrary, but defends against malformed glyphs. if recursion >= 32 { return UnsupportedError("excessive compound glyph recursion") @@ -193,16 +188,16 @@ func (g *GlyphBuf) load(recursion int32, i Index, useMyMetrics bool) (err error) // Decode the contour count and nominal bounding box, from the first // 10 bytes of the glyf data. boundsYMin and boundsXMax, at offsets 4 // and 6, are unused. - glyf, ne, boundsXMin, boundsYMax := []byte(nil), 0, int32(0), int32(0) + glyf, ne, boundsXMin, boundsYMax := []byte(nil), 0, fixed.Int26_6(0), fixed.Int26_6(0) if g0+10 <= g1 { glyf = g.font.glyf[g0:g1] ne = int(int16(u16(glyf, 0))) - boundsXMin = int32(int16(u16(glyf, 2))) - boundsYMax = int32(int16(u16(glyf, 8))) + boundsXMin = fixed.Int26_6(int16(u16(glyf, 2))) + boundsYMax = fixed.Int26_6(int16(u16(glyf, 8))) } // Create the phantom points. - uhm, pp1x := g.font.unscaledHMetric(i), int32(0) + uhm, pp1x := g.font.unscaledHMetric(i), fixed.Int26_6(0) uvm := g.font.unscaledVMetric(i, boundsYMax) g.phantomPoints = [4]Point{ {X: boundsXMin - uhm.LeftSideBearing}, @@ -211,9 +206,9 @@ func (g *GlyphBuf) load(recursion int32, i Index, useMyMetrics bool) (err error) {X: uhm.AdvanceWidth / 2, Y: boundsYMax + uvm.TopSideBearing - uvm.AdvanceHeight}, } if len(glyf) == 0 { - g.addPhantomsAndScale(len(g.Point), len(g.Point), true, true) - copy(g.phantomPoints[:], g.Point[len(g.Point)-4:]) - g.Point = g.Point[:len(g.Point)-4] + g.addPhantomsAndScale(len(g.Points), len(g.Points), true, true) + copy(g.phantomPoints[:], g.Points[len(g.Points)-4:]) + g.Points = g.Points[:len(g.Points)-4] return nil } @@ -229,18 +224,18 @@ func (g *GlyphBuf) load(recursion int32, i Index, useMyMetrics bool) (err error) return err } } else { - np0, ne0 := len(g.Point), len(g.End) + np0, ne0 := len(g.Points), len(g.Ends) program := g.loadSimple(glyf, ne) g.addPhantomsAndScale(np0, np0, true, true) - pp1x = g.Point[len(g.Point)-4].X - if g.hinting != NoHinting { + pp1x = g.Points[len(g.Points)-4].X + if g.hinting != font.HintingNone { if len(program) != 0 { err := g.hinter.run( program, - g.Point[np0:], + g.Points[np0:], g.Unhinted[np0:], g.InFontUnits[np0:], - g.End[ne0:], + g.Ends[ne0:], ) if err != nil { return err @@ -251,15 +246,15 @@ func (g *GlyphBuf) load(recursion int32, i Index, useMyMetrics bool) (err error) g.Unhinted = g.Unhinted[:len(g.Unhinted)-4] } if useMyMetrics { - copy(g.phantomPoints[:], g.Point[len(g.Point)-4:]) + copy(g.phantomPoints[:], g.Points[len(g.Points)-4:]) } - g.Point = g.Point[:len(g.Point)-4] + g.Points = g.Points[:len(g.Points)-4] if np0 != 0 { - // The hinting program expects the []End values to be indexed relative - // to the inner glyph, not the outer glyph, so we delay adding np0 until - // after the hinting program (if any) has run. - for i := ne0; i < len(g.End); i++ { - g.End[i] += np0 + // The hinting program expects the []Ends values to be indexed + // relative to the inner glyph, not the outer glyph, so we delay + // adding np0 until after the hinting program (if any) has run. + for i := ne0; i < len(g.Ends); i++ { + g.Ends[i] += np0 } } } @@ -277,7 +272,7 @@ const loadOffset = 10 func (g *GlyphBuf) loadSimple(glyf []byte, ne int) (program []byte) { offset := loadOffset for i := 0; i < ne; i++ { - g.End = append(g.End, 1+int(u16(glyf, offset))) + g.Ends = append(g.Ends, 1+int(u16(glyf, offset))) offset += 2 } @@ -287,20 +282,20 @@ func (g *GlyphBuf) loadSimple(glyf []byte, ne int) (program []byte) { program = glyf[offset : offset+instrLen] offset += instrLen - np0 := len(g.Point) - np1 := np0 + int(g.End[len(g.End)-1]) + np0 := len(g.Points) + np1 := np0 + int(g.Ends[len(g.Ends)-1]) // Decode the flags. for i := np0; i < np1; { c := uint32(glyf[offset]) offset++ - g.Point = append(g.Point, Point{Flags: c}) + g.Points = append(g.Points, Point{Flags: c}) i++ if c&flagRepeat != 0 { count := glyf[offset] offset++ for ; count > 0; count-- { - g.Point = append(g.Point, Point{Flags: c}) + g.Points = append(g.Points, Point{Flags: c}) i++ } } @@ -309,7 +304,7 @@ func (g *GlyphBuf) loadSimple(glyf []byte, ne int) (program []byte) { // Decode the co-ordinates. var x int16 for i := np0; i < np1; i++ { - f := g.Point[i].Flags + f := g.Points[i].Flags if f&flagXShortVector != 0 { dx := int16(glyf[offset]) offset++ @@ -322,11 +317,11 @@ func (g *GlyphBuf) loadSimple(glyf []byte, ne int) (program []byte) { x += int16(u16(glyf, offset)) offset += 2 } - g.Point[i].X = int32(x) + g.Points[i].X = fixed.Int26_6(x) } var y int16 for i := np0; i < np1; i++ { - f := g.Point[i].Flags + f := g.Points[i].Flags if f&flagYShortVector != 0 { dy := int16(glyf[offset]) offset++ @@ -339,13 +334,13 @@ func (g *GlyphBuf) loadSimple(glyf []byte, ne int) (program []byte) { y += int16(u16(glyf, offset)) offset += 2 } - g.Point[i].Y = int32(y) + g.Points[i].Y = fixed.Int26_6(y) } return program } -func (g *GlyphBuf) loadCompound(recursion int32, uhm HMetric, i Index, +func (g *GlyphBuf) loadCompound(recursion uint32, uhm HMetric, i Index, glyf []byte, useMyMetrics bool) error { // Flags for decoding a compound glyph. These flags are documented at @@ -363,19 +358,19 @@ func (g *GlyphBuf) loadCompound(recursion int32, uhm HMetric, i Index, flagUseMyMetrics flagOverlapCompound ) - np0, ne0 := len(g.Point), len(g.End) + np0, ne0 := len(g.Points), len(g.Ends) offset := loadOffset for { flags := u16(glyf, offset) component := Index(u16(glyf, offset+2)) - dx, dy, transform, hasTransform := int32(0), int32(0), [4]int32{}, false + dx, dy, transform, hasTransform := fixed.Int26_6(0), fixed.Int26_6(0), [4]int16{}, false if flags&flagArg1And2AreWords != 0 { - dx = int32(int16(u16(glyf, offset+4))) - dy = int32(int16(u16(glyf, offset+6))) + dx = fixed.Int26_6(int16(u16(glyf, offset+4))) + dy = fixed.Int26_6(int16(u16(glyf, offset+6))) offset += 8 } else { - dx = int32(int16(int8(glyf[offset+4]))) - dy = int32(int16(int8(glyf[offset+5]))) + dx = fixed.Int26_6(int16(int8(glyf[offset+4]))) + dy = fixed.Int26_6(int16(int8(glyf[offset+5]))) offset += 6 } if flags&flagArgsAreXYValues == 0 { @@ -385,23 +380,23 @@ func (g *GlyphBuf) loadCompound(recursion int32, uhm HMetric, i Index, hasTransform = true switch { case flags&flagWeHaveAScale != 0: - transform[0] = int32(int16(u16(glyf, offset+0))) + transform[0] = int16(u16(glyf, offset+0)) transform[3] = transform[0] offset += 2 case flags&flagWeHaveAnXAndYScale != 0: - transform[0] = int32(int16(u16(glyf, offset+0))) - transform[3] = int32(int16(u16(glyf, offset+2))) + transform[0] = int16(u16(glyf, offset+0)) + transform[3] = int16(u16(glyf, offset+2)) offset += 4 case flags&flagWeHaveATwoByTwo != 0: - transform[0] = int32(int16(u16(glyf, offset+0))) - transform[1] = int32(int16(u16(glyf, offset+2))) - transform[2] = int32(int16(u16(glyf, offset+4))) - transform[3] = int32(int16(u16(glyf, offset+6))) + transform[0] = int16(u16(glyf, offset+0)) + transform[1] = int16(u16(glyf, offset+2)) + transform[2] = int16(u16(glyf, offset+4)) + transform[3] = int16(u16(glyf, offset+6)) offset += 8 } } savedPP := g.phantomPoints - np0 := len(g.Point) + np0 := len(g.Points) componentUMM := useMyMetrics && (flags&flagUseMyMetrics != 0) if err := g.load(recursion+1, component, componentUMM); err != nil { return err @@ -410,12 +405,14 @@ func (g *GlyphBuf) loadCompound(recursion int32, uhm HMetric, i Index, g.phantomPoints = savedPP } if hasTransform { - for j := np0; j < len(g.Point); j++ { - p := &g.Point[j] - newX := int32((int64(p.X)*int64(transform[0])+1<<13)>>14) + - int32((int64(p.Y)*int64(transform[2])+1<<13)>>14) - newY := int32((int64(p.X)*int64(transform[1])+1<<13)>>14) + - int32((int64(p.Y)*int64(transform[3])+1<<13)>>14) + for j := np0; j < len(g.Points); j++ { + p := &g.Points[j] + newX := 0 + + fixed.Int26_6((int64(p.X)*int64(transform[0])+1<<13)>>14) + + fixed.Int26_6((int64(p.Y)*int64(transform[2])+1<<13)>>14) + newY := 0 + + fixed.Int26_6((int64(p.X)*int64(transform[1])+1<<13)>>14) + + fixed.Int26_6((int64(p.Y)*int64(transform[3])+1<<13)>>14) p.X, p.Y = newX, newY } } @@ -425,8 +422,8 @@ func (g *GlyphBuf) loadCompound(recursion int32, uhm HMetric, i Index, dx = (dx + 32) &^ 63 dy = (dy + 32) &^ 63 } - for j := np0; j < len(g.Point); j++ { - p := &g.Point[j] + for j := np0; j < len(g.Points); j++ { + p := &g.Points[j] p.X += dx p.Y += dy } @@ -437,14 +434,14 @@ func (g *GlyphBuf) loadCompound(recursion int32, uhm HMetric, i Index, } instrLen := 0 - if g.hinting != NoHinting && offset+2 <= len(glyf) { + if g.hinting != font.HintingNone && offset+2 <= len(glyf) { instrLen = int(u16(glyf, offset)) offset += 2 } - g.addPhantomsAndScale(np0, len(g.Point), false, instrLen > 0) - points, ends := g.Point[np0:], g.End[ne0:] - g.Point = g.Point[:len(g.Point)-4] + g.addPhantomsAndScale(np0, len(g.Points), false, instrLen > 0) + points, ends := g.Points[np0:], g.Ends[ne0:] + g.Points = g.Points[:len(g.Points)-4] for j := range points { points[j].Flags &^= flagTouchedX | flagTouchedY } @@ -483,17 +480,17 @@ func (g *GlyphBuf) loadCompound(recursion int32, uhm HMetric, i Index, func (g *GlyphBuf) addPhantomsAndScale(np0, np1 int, simple, adjust bool) { // Add the four phantom points. - g.Point = append(g.Point, g.phantomPoints[:]...) + g.Points = append(g.Points, g.phantomPoints[:]...) // Scale the points. - if simple && g.hinting != NoHinting { - g.InFontUnits = append(g.InFontUnits, g.Point[np1:]...) + if simple && g.hinting != font.HintingNone { + g.InFontUnits = append(g.InFontUnits, g.Points[np1:]...) } - for i := np1; i < len(g.Point); i++ { - p := &g.Point[i] + for i := np1; i < len(g.Points); i++ { + p := &g.Points[i] p.X = g.font.scale(g.scale * p.X) p.Y = g.font.scale(g.scale * p.Y) } - if g.hinting == NoHinting { + if g.hinting == font.HintingNone { return } // Round the 1st phantom point to the grid, shifting all other points equally. @@ -502,29 +499,19 @@ func (g *GlyphBuf) addPhantomsAndScale(np0, np1 int, simple, adjust bool) { // we update the compatibility tests to C Freetype 2.5.3. // See http://git.savannah.gnu.org/cgit/freetype/freetype2.git/commit/?id=05c786d990390a7ca18e62962641dac740bacb06 if adjust { - pp1x := g.Point[len(g.Point)-4].X + pp1x := g.Points[len(g.Points)-4].X if dx := ((pp1x + 32) &^ 63) - pp1x; dx != 0 { - for i := np0; i < len(g.Point); i++ { - g.Point[i].X += dx + for i := np0; i < len(g.Points); i++ { + g.Points[i].X += dx } } } if simple { - g.Unhinted = append(g.Unhinted, g.Point[np1:]...) + g.Unhinted = append(g.Unhinted, g.Points[np1:]...) } // Round the 2nd and 4th phantom point to the grid. - p := &g.Point[len(g.Point)-3] + p := &g.Points[len(g.Points)-3] p.X = (p.X + 32) &^ 63 - p = &g.Point[len(g.Point)-1] + p = &g.Points[len(g.Points)-1] p.Y = (p.Y + 32) &^ 63 } - -// TODO: is this necessary? The zero-valued GlyphBuf is perfectly usable. - -// NewGlyphBuf returns a newly allocated GlyphBuf. -func NewGlyphBuf() *GlyphBuf { - return &GlyphBuf{ - Point: make([]Point, 0, 256), - End: make([]int, 0, 32), - } -} diff --git a/_third_party/code.google.com/p/freetype-go/freetype/truetype/hint.go b/_third_party/github.com/golang/freetype/truetype/hint.go similarity index 89% rename from _third_party/code.google.com/p/freetype-go/freetype/truetype/hint.go rename to _third_party/github.com/golang/freetype/truetype/hint.go index 26c631436d..0315de511c 100644 --- a/_third_party/code.google.com/p/freetype-go/freetype/truetype/hint.go +++ b/_third_party/github.com/golang/freetype/truetype/hint.go @@ -11,6 +11,8 @@ package truetype import ( "errors" "math" + + "golang.org/x/image/math/fixed" ) const ( @@ -47,7 +49,7 @@ type hinter struct { // Changing the font will require running the new font's fpgm bytecode. // Changing either will require running the font's prep bytecode. font *Font - scale int32 + scale fixed.Int26_6 // gs and defaultGS are the current and default graphics state. The // default graphics state is the global default graphics state after @@ -61,7 +63,7 @@ type hinter struct { // scaledCVT is the lazily initialized scaled Control Value Table. scaledCVTInitialized bool - scaledCVT []f26dot6 + scaledCVT []fixed.Int26_6 } // graphicsState is described at https://developer.apple.com/fonts/TTRefMan/RM04/Chap4.html @@ -71,15 +73,15 @@ type graphicsState struct { // Reference points and zone pointers. rp, zp [3]int32 // Control Value / Single Width Cut-In. - controlValueCutIn, singleWidthCutIn, singleWidth f26dot6 + controlValueCutIn, singleWidthCutIn, singleWidth fixed.Int26_6 // Delta base / shift. deltaBase, deltaShift int32 // Minimum distance. - minDist f26dot6 + minDist fixed.Int26_6 // Loop count. loop int32 // Rounding policy. - roundPeriod, roundPhase, roundThreshold f26dot6 + roundPeriod, roundPhase, roundThreshold fixed.Int26_6 roundSuper45 bool // Auto-flip. autoFlip bool @@ -90,13 +92,13 @@ var globalDefaultGS = graphicsState{ fv: [2]f2dot14{0x4000, 0}, dv: [2]f2dot14{0x4000, 0}, zp: [3]int32{1, 1, 1}, - controlValueCutIn: (17 << 6) / 16, // 17/16 as an f26dot6. + controlValueCutIn: (17 << 6) / 16, // 17/16 as a fixed.Int26_6. deltaBase: 9, deltaShift: 3, - minDist: 1 << 6, // 1 as an f26dot6. + minDist: 1 << 6, // 1 as a fixed.Int26_6. loop: 1, - roundPeriod: 1 << 6, // 1 as an f26dot6. - roundThreshold: 1 << 5, // 1/2 as an f26dot6. + roundPeriod: 1 << 6, // 1 as a fixed.Int26_6. + roundThreshold: 1 << 5, // 1/2 as a fixed.Int26_6. roundSuper45: false, autoFlip: true, } @@ -113,7 +115,7 @@ func resetTwilightPoints(f *Font, p []Point) []Point { return p } -func (h *hinter) init(f *Font, scale int32) error { +func (h *hinter) init(f *Font, scale fixed.Int26_6) error { h.points[twilightZone][0] = resetTwilightPoints(f, h.points[twilightZone][0]) h.points[twilightZone][1] = resetTwilightPoints(f, h.points[twilightZone][1]) h.points[twilightZone][2] = resetTwilightPoints(f, h.points[twilightZone][2]) @@ -315,8 +317,8 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, mulDiv(int64(dy), int64(dbx), 0x40) rx := mulDiv(val, int64(dax), discriminant) ry := mulDiv(val, int64(day), discriminant) - p.X = a0.X + int32(rx) - p.Y = a0.Y + int32(ry) + p.X = a0.X + fixed.Int26_6(rx) + p.Y = a0.Y + fixed.Int26_6(ry) } else { p.X = (a0.X + a1.X + b0.X + b1.X) / 4 p.Y = (a0.Y + a1.Y + b0.Y + b1.Y) / 4 @@ -358,7 +360,7 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, case opSMD: top-- - h.gs.minDist = f26dot6(h.stack[top]) + h.gs.minDist = fixed.Int26_6(h.stack[top]) case opELSE: opcode = 1 @@ -371,15 +373,15 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, case opSCVTCI: top-- - h.gs.controlValueCutIn = f26dot6(h.stack[top]) + h.gs.controlValueCutIn = fixed.Int26_6(h.stack[top]) case opSSWCI: top-- - h.gs.singleWidthCutIn = f26dot6(h.stack[top]) + h.gs.singleWidthCutIn = fixed.Int26_6(h.stack[top]) case opSSW: top-- - h.gs.singleWidth = f26dot6(h.font.scale(h.scale * h.stack[top])) + h.gs.singleWidth = h.font.scale(h.scale * fixed.Int26_6(h.stack[top])) case opDUP: if top >= len(h.stack) { @@ -422,7 +424,7 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, if p == nil || q == nil { return errors.New("truetype: hinting: point out of range") } - d := dotProduct(f26dot6(q.X-p.X), f26dot6(q.Y-p.Y), h.gs.pv) / 2 + d := dotProduct(fixed.Int26_6(q.X-p.X), fixed.Int26_6(q.Y-p.Y), h.gs.pv) / 2 h.move(p, +d, true) h.move(q, -d, true) @@ -500,9 +502,9 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, if p == nil { return errors.New("truetype: hinting: point out of range") } - distance := f26dot6(0) + distance := fixed.Int26_6(0) if opcode == opMDAP1 { - distance = dotProduct(f26dot6(p.X), f26dot6(p.Y), h.gs.pv) + distance = dotProduct(p.X, p.Y, h.gs.pv) // TODO: metrics compensation. distance = h.round(distance) - distance } @@ -608,7 +610,7 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, case opSHPIX: top-- - d := f26dot6(h.stack[top]) + d := fixed.Int26_6(h.stack[top]) if top < int(h.gs.loop) { return errors.New("truetype: hinting: stack underflow") } @@ -633,22 +635,22 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, } p := h.point(1, pointType, h.gs.rp[2]) oldP := h.point(0, pointType, h.gs.rp[1]) - oldRange := dotProduct(f26dot6(p.X-oldP.X), f26dot6(p.Y-oldP.Y), h.gs.dv) + oldRange := dotProduct(p.X-oldP.X, p.Y-oldP.Y, h.gs.dv) p = h.point(1, current, h.gs.rp[2]) curP := h.point(0, current, h.gs.rp[1]) - curRange := dotProduct(f26dot6(p.X-curP.X), f26dot6(p.Y-curP.Y), h.gs.pv) + curRange := dotProduct(p.X-curP.X, p.Y-curP.Y, h.gs.pv) for ; h.gs.loop != 0; h.gs.loop-- { top-- i := h.stack[top] p = h.point(2, pointType, i) - oldDist := dotProduct(f26dot6(p.X-oldP.X), f26dot6(p.Y-oldP.Y), h.gs.dv) + oldDist := dotProduct(p.X-oldP.X, p.Y-oldP.Y, h.gs.dv) p = h.point(2, current, i) - curDist := dotProduct(f26dot6(p.X-curP.X), f26dot6(p.Y-curP.Y), h.gs.pv) - newDist := f26dot6(0) + curDist := dotProduct(p.X-curP.X, p.Y-curP.Y, h.gs.pv) + newDist := fixed.Int26_6(0) if oldDist != 0 { if oldRange != 0 { - newDist = f26dot6(mulDiv(int64(oldDist), int64(curRange), int64(oldRange))) + newDist = fixed.Int26_6(mulDiv(int64(oldDist), int64(curRange), int64(oldRange))) } else { newDist = -oldDist } @@ -660,7 +662,7 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, case opMSIRP0, opMSIRP1: top -= 2 i := h.stack[top] - distance := f26dot6(h.stack[top+1]) + distance := fixed.Int26_6(h.stack[top+1]) // TODO: special case h.gs.zp[1] == 0 in C Freetype. ref := h.point(0, current, h.gs.rp[0]) @@ -668,7 +670,7 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, if ref == nil || p == nil { return errors.New("truetype: hinting: point out of range") } - curDist := dotProduct(f26dot6(p.X-ref.X), f26dot6(p.Y-ref.Y), h.gs.pv) + curDist := dotProduct(p.X-ref.X, p.Y-ref.Y, h.gs.pv) // Set-RP0 bit. if opcode == opMSIRP1 { @@ -694,7 +696,7 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, if p == nil { return errors.New("truetype: hinting: point out of range") } - h.move(p, -dotProduct(f26dot6(p.X-ref.X), f26dot6(p.Y-ref.Y), h.gs.pv), true) + h.move(p, -dotProduct(p.X-ref.X, p.Y-ref.Y, h.gs.pv), true) } h.gs.loop = 1 @@ -711,14 +713,14 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, if h.gs.zp[0] == 0 { p := h.point(0, unhinted, i) q := h.point(0, current, i) - p.X = int32((int64(distance) * int64(h.gs.fv[0])) >> 14) - p.Y = int32((int64(distance) * int64(h.gs.fv[1])) >> 14) + p.X = fixed.Int26_6((int64(distance) * int64(h.gs.fv[0])) >> 14) + p.Y = fixed.Int26_6((int64(distance) * int64(h.gs.fv[1])) >> 14) *q = *p } p := h.point(0, current, i) - oldDist := dotProduct(f26dot6(p.X), f26dot6(p.Y), h.gs.pv) + oldDist := dotProduct(p.X, p.Y, h.gs.pv) if opcode == opMIAP1 { - if (distance - oldDist).abs() > h.gs.controlValueCutIn { + if fabs(distance-oldDist) > h.gs.controlValueCutIn { distance = oldDist } // TODO: metrics compensation. @@ -753,7 +755,7 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, case opWCVTP: top -= 2 - h.setScaledCVT(h.stack[top], f26dot6(h.stack[top+1])) + h.setScaledCVT(h.stack[top], fixed.Int26_6(h.stack[top+1])) case opRCVT: h.stack[top-1] = int32(h.getScaledCVT(h.stack[top-1])) @@ -762,11 +764,11 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, i := h.stack[top-1] if opcode == opGC0 { p := h.point(2, current, i) - h.stack[top-1] = int32(dotProduct(f26dot6(p.X), f26dot6(p.Y), h.gs.pv)) + h.stack[top-1] = int32(dotProduct(p.X, p.Y, h.gs.pv)) } else { p := h.point(2, unhinted, i) // Using dv as per C Freetype. - h.stack[top-1] = int32(dotProduct(f26dot6(p.X), f26dot6(p.Y), h.gs.dv)) + h.stack[top-1] = int32(dotProduct(p.X, p.Y, h.gs.dv)) } case opSCFS: @@ -776,8 +778,8 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, if p == nil { return errors.New("truetype: hinting: point out of range") } - c := dotProduct(f26dot6(p.X), f26dot6(p.Y), h.gs.pv) - h.move(p, f26dot6(h.stack[top+1])-c, true) + c := dotProduct(p.X, p.Y, h.gs.pv) + h.move(p, fixed.Int26_6(h.stack[top+1])-c, true) if h.gs.zp[2] != 0 { break } @@ -807,9 +809,9 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, if p == nil || q == nil { return errors.New("truetype: hinting: point out of range") } - d := int32(dotProduct(f26dot6(p.X-q.X), f26dot6(p.Y-q.Y), v)) + d := int32(dotProduct(p.X-q.X, p.Y-q.Y, v)) if scale { - d = int32(int64(d*h.scale) / int64(h.font.fUnitsPerEm)) + d = int32(int64(d*int32(h.scale)) / int64(h.font.fUnitsPerEm)) } h.stack[top-1] = d @@ -818,7 +820,7 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, return errors.New("truetype: hinting: stack overflow") } // For MPS, point size should be irrelevant; we return the PPEM. - h.stack[top] = h.scale >> 6 + h.stack[top] = int32(h.scale) >> 6 top++ case opFLIPON, opFLIPOFF: @@ -852,7 +854,7 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, h.stack[top-1] = bool2int32(h.stack[top-1] != h.stack[top]) case opODD, opEVEN: - i := h.round(f26dot6(h.stack[top-1])) >> 6 + i := h.round(fixed.Int26_6(h.stack[top-1])) >> 6 h.stack[top-1] = int32(i&1) ^ int32(opcode-opODD) case opIF: @@ -900,11 +902,11 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, if h.stack[top] == 0 { return errors.New("truetype: hinting: division by zero") } - h.stack[top-1] = int32(f26dot6(h.stack[top-1]).div(f26dot6(h.stack[top]))) + h.stack[top-1] = int32(fdiv(fixed.Int26_6(h.stack[top-1]), fixed.Int26_6(h.stack[top]))) case opMUL: top-- - h.stack[top-1] = int32(f26dot6(h.stack[top-1]).mul(f26dot6(h.stack[top]))) + h.stack[top-1] = int32(fmul(fixed.Int26_6(h.stack[top-1]), fixed.Int26_6(h.stack[top]))) case opABS: if h.stack[top-1] < 0 { @@ -924,7 +926,7 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, case opROUND00, opROUND01, opROUND10, opROUND11: // The four flavors of opROUND are equivalent. See the comment below on // opNROUND for the rationale. - h.stack[top-1] = int32(h.round(f26dot6(h.stack[top-1]))) + h.stack[top-1] = int32(h.round(fixed.Int26_6(h.stack[top-1]))) case opNROUND00, opNROUND01, opNROUND10, opNROUND11: // No-op. The spec says to add one of four "compensations for the engine @@ -935,7 +937,7 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, case opWCVTF: top -= 2 - h.setScaledCVT(h.stack[top], f26dot6(h.font.scale(h.scale*h.stack[top+1]))) + h.setScaledCVT(h.stack[top], h.font.scale(h.scale*fixed.Int26_6(h.stack[top+1]))) case opDELTAP2, opDELTAP3, opDELTAC1, opDELTAC2, opDELTAC3: goto delta @@ -957,9 +959,9 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, h.gs.roundPeriod *= 46341 h.gs.roundPeriod /= 65536 } - h.gs.roundPhase = h.gs.roundPeriod * f26dot6((h.stack[top]>>4)&0x03) / 4 + h.gs.roundPhase = h.gs.roundPeriod * fixed.Int26_6((h.stack[top]>>4)&0x03) / 4 if x := h.stack[top] & 0x0f; x != 0 { - h.gs.roundThreshold = h.gs.roundPeriod * f26dot6(x-4) / 8 + h.gs.roundThreshold = h.gs.roundPeriod * fixed.Int26_6(x-4) / 8 } else { h.gs.roundThreshold = h.gs.roundPeriod - 1 } @@ -1135,20 +1137,20 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, return errors.New("truetype: hinting: point out of range") } - oldDist := f26dot6(0) + oldDist := fixed.Int26_6(0) if h.gs.zp[0] == 0 || h.gs.zp[1] == 0 { p0 := h.point(1, unhinted, i) p1 := h.point(0, unhinted, h.gs.rp[0]) - oldDist = dotProduct(f26dot6(p0.X-p1.X), f26dot6(p0.Y-p1.Y), h.gs.dv) + oldDist = dotProduct(p0.X-p1.X, p0.Y-p1.Y, h.gs.dv) } else { p0 := h.point(1, inFontUnits, i) p1 := h.point(0, inFontUnits, h.gs.rp[0]) - oldDist = dotProduct(f26dot6(p0.X-p1.X), f26dot6(p0.Y-p1.Y), h.gs.dv) - oldDist = f26dot6(h.font.scale(h.scale * int32(oldDist))) + oldDist = dotProduct(p0.X-p1.X, p0.Y-p1.Y, h.gs.dv) + oldDist = h.font.scale(h.scale * oldDist) } // Single-width cut-in test. - if x := (oldDist - h.gs.singleWidth).abs(); x < h.gs.singleWidthCutIn { + if x := fabs(oldDist - h.gs.singleWidth); x < h.gs.singleWidthCutIn { if oldDist >= 0 { oldDist = +h.gs.singleWidth } else { @@ -1184,7 +1186,7 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, } // Move the point. - oldDist = dotProduct(f26dot6(p.X-ref.X), f26dot6(p.Y-ref.Y), h.gs.pv) + oldDist = dotProduct(p.X-ref.X, p.Y-ref.Y, h.gs.pv) h.move(p, distance-oldDist, true) } else { @@ -1193,7 +1195,7 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, top -= 2 i := h.stack[top] cvtDist := h.getScaledCVT(h.stack[top+1]) - if (cvtDist - h.gs.singleWidth).abs() < h.gs.singleWidthCutIn { + if fabs(cvtDist-h.gs.singleWidth) < h.gs.singleWidthCutIn { if cvtDist >= 0 { cvtDist = +h.gs.singleWidth } else { @@ -1212,14 +1214,14 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, if ref == nil || p == nil { return errors.New("truetype: hinting: point out of range") } - oldDist := dotProduct(f26dot6(p.X-ref.X), f26dot6(p.Y-ref.Y), h.gs.dv) + oldDist := dotProduct(p.X-ref.X, p.Y-ref.Y, h.gs.dv) ref = h.point(0, current, h.gs.rp[0]) p = h.point(1, current, i) if ref == nil || p == nil { return errors.New("truetype: hinting: point out of range") } - curDist := dotProduct(f26dot6(p.X-ref.X), f26dot6(p.Y-ref.Y), h.gs.pv) + curDist := dotProduct(p.X-ref.X, p.Y-ref.Y, h.gs.pv) if h.gs.autoFlip && oldDist^cvtDist < 0 { cvtDist = -cvtDist @@ -1231,7 +1233,7 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, if opcode&0x04 != 0 { // The CVT value is only used if close enough to oldDist. if (h.gs.zp[0] == h.gs.zp[1]) && - ((cvtDist - oldDist).abs() > h.gs.controlValueCutIn) { + (fabs(cvtDist-oldDist) > h.gs.controlValueCutIn) { distance = oldDist } @@ -1358,7 +1360,7 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, c += 32 } c += h.gs.deltaBase - if ppem := (h.scale + 1<<5) >> 6; ppem != c { + if ppem := (int32(h.scale) + 1<<5) >> 6; ppem != c { continue } b = (b & 0x0f) - 8 @@ -1371,13 +1373,13 @@ func (h *hinter) run(program []byte, pCurrent, pUnhinted, pInFontUnits []Point, if a < 0 || len(h.scaledCVT) <= int(a) { return errors.New("truetype: hinting: index out of range") } - h.scaledCVT[a] += f26dot6(b) + h.scaledCVT[a] += fixed.Int26_6(b) } else { p := h.point(0, current, h.stack[top+1]) if p == nil { return errors.New("truetype: hinting: point out of range") } - h.move(p, f26dot6(b), true) + h.move(p, fixed.Int26_6(b), true) } } pc++ @@ -1395,16 +1397,16 @@ func (h *hinter) initializeScaledCVT() { if n < 32 { n = 32 } - h.scaledCVT = make([]f26dot6, len(h.font.cvt)/2, n) + h.scaledCVT = make([]fixed.Int26_6, len(h.font.cvt)/2, n) } for i := range h.scaledCVT { unscaled := uint16(h.font.cvt[2*i])<<8 | uint16(h.font.cvt[2*i+1]) - h.scaledCVT[i] = f26dot6(h.font.scale(h.scale * int32(int16(unscaled)))) + h.scaledCVT[i] = h.font.scale(h.scale * fixed.Int26_6(int16(unscaled))) } } // getScaledCVT returns the scaled value from the font's Control Value Table. -func (h *hinter) getScaledCVT(i int32) f26dot6 { +func (h *hinter) getScaledCVT(i int32) fixed.Int26_6 { if !h.scaledCVTInitialized { h.initializeScaledCVT() } @@ -1415,7 +1417,7 @@ func (h *hinter) getScaledCVT(i int32) f26dot6 { } // setScaledCVT overrides the scaled value from the font's Control Value Table. -func (h *hinter) setScaledCVT(i int32, v f26dot6) { +func (h *hinter) setScaledCVT(i int32, v fixed.Int26_6) { if !h.scaledCVTInitialized { h.initializeScaledCVT() } @@ -1433,11 +1435,11 @@ func (h *hinter) point(zonePointer uint32, pt pointType, i int32) *Point { return &points[i] } -func (h *hinter) move(p *Point, distance f26dot6, touch bool) { +func (h *hinter) move(p *Point, distance fixed.Int26_6, touch bool) { fvx := int64(h.gs.fv[0]) pvx := int64(h.gs.pv[0]) if fvx == 0x4000 && pvx == 0x4000 { - p.X += int32(distance) + p.X += fixed.Int26_6(distance) if touch { p.Flags |= flagTouchedX } @@ -1447,7 +1449,7 @@ func (h *hinter) move(p *Point, distance f26dot6, touch bool) { fvy := int64(h.gs.fv[1]) pvy := int64(h.gs.pv[1]) if fvy == 0x4000 && pvy == 0x4000 { - p.Y += int32(distance) + p.Y += fixed.Int26_6(distance) if touch { p.Flags |= flagTouchedY } @@ -1457,14 +1459,14 @@ func (h *hinter) move(p *Point, distance f26dot6, touch bool) { fvDotPv := (fvx*pvx + fvy*pvy) >> 14 if fvx != 0 { - p.X += int32(mulDiv(fvx, int64(distance), fvDotPv)) + p.X += fixed.Int26_6(mulDiv(fvx, int64(distance), fvDotPv)) if touch { p.Flags |= flagTouchedX } } if fvy != 0 { - p.Y += int32(mulDiv(fvy, int64(distance), fvDotPv)) + p.Y += fixed.Int26_6(mulDiv(fvy, int64(distance), fvDotPv)) if touch { p.Flags |= flagTouchedY } @@ -1480,7 +1482,7 @@ func (h *hinter) iupInterp(interpY bool, p1, p2, ref1, ref2 int) { return } - var ifu1, ifu2 int32 + var ifu1, ifu2 fixed.Int26_6 if interpY { ifu1 = h.points[glyphZone][inFontUnits][ref1].Y ifu2 = h.points[glyphZone][inFontUnits][ref2].Y @@ -1493,7 +1495,7 @@ func (h *hinter) iupInterp(interpY bool, p1, p2, ref1, ref2 int) { ref1, ref2 = ref2, ref1 } - var unh1, unh2, delta1, delta2 int32 + var unh1, unh2, delta1, delta2 fixed.Int26_6 if interpY { unh1 = h.points[glyphZone][unhinted][ref1].Y unh2 = h.points[glyphZone][unhinted][ref2].Y @@ -1506,7 +1508,7 @@ func (h *hinter) iupInterp(interpY bool, p1, p2, ref1, ref2 int) { delta2 = h.points[glyphZone][current][ref2].X - unh2 } - var xy, ifuXY int32 + var xy, ifuXY fixed.Int26_6 if ifu1 == ifu2 { for i := p1; i <= p2; i++ { if interpY { @@ -1555,7 +1557,7 @@ func (h *hinter) iupInterp(interpY bool, p1, p2, ref1, ref2 int) { } else { numer -= 0x8000 } - xy = unh1 + delta1 + int32(numer/0x10000) + xy = unh1 + delta1 + fixed.Int26_6(numer/0x10000) } if interpY { @@ -1567,7 +1569,7 @@ func (h *hinter) iupInterp(interpY bool, p1, p2, ref1, ref2 int) { } func (h *hinter) iupShift(interpY bool, p1, p2, p int) { - var delta int32 + var delta fixed.Int26_6 if interpY { delta = h.points[glyphZone][current][p].Y - h.points[glyphZone][unhinted][p].Y } else { @@ -1588,7 +1590,7 @@ func (h *hinter) iupShift(interpY bool, p1, p2, p int) { } } -func (h *hinter) displacement(useZP1 bool) (zonePointer uint32, i int32, d f26dot6, ok bool) { +func (h *hinter) displacement(useZP1 bool) (zonePointer uint32, i int32, d fixed.Int26_6, ok bool) { zonePointer, i = uint32(0), h.gs.rp[1] if useZP1 { zonePointer, i = 1, h.gs.rp[2] @@ -1598,7 +1600,7 @@ func (h *hinter) displacement(useZP1 bool) (zonePointer uint32, i int32, d f26do if p == nil || q == nil { return 0, 0, 0, false } - d = dotProduct(f26dot6(p.X-q.X), f26dot6(p.Y-q.Y), h.gs.pv) + d = dotProduct(p.X-q.X, p.Y-q.Y, h.gs.pv) return zonePointer, i, d, true } @@ -1649,25 +1651,22 @@ func normalize(x, y f2dot14) [2]f2dot14 { return [2]f2dot14{f2dot14(fx), f2dot14(fy)} } -// f26dot6 is a 26.6 fixed point number. -type f26dot6 int32 - -// abs returns abs(x) in 26.6 fixed point arithmetic. -func (x f26dot6) abs() f26dot6 { +// fabs returns abs(x) in 26.6 fixed point arithmetic. +func fabs(x fixed.Int26_6) fixed.Int26_6 { if x < 0 { return -x } return x } -// div returns x/y in 26.6 fixed point arithmetic. -func (x f26dot6) div(y f26dot6) f26dot6 { - return f26dot6((int64(x) << 6) / int64(y)) +// fdiv returns x/y in 26.6 fixed point arithmetic. +func fdiv(x, y fixed.Int26_6) fixed.Int26_6 { + return fixed.Int26_6((int64(x) << 6) / int64(y)) } -// mul returns x*y in 26.6 fixed point arithmetic. -func (x f26dot6) mul(y f26dot6) f26dot6 { - return f26dot6((int64(x)*int64(y) + 1<<5) >> 6) +// fmul returns x*y in 26.6 fixed point arithmetic. +func fmul(x, y fixed.Int26_6) fixed.Int26_6 { + return fixed.Int26_6((int64(x)*int64(y) + 1<<5) >> 6) } // dotProduct returns the dot product of [x, y] and q. It is almost the same as @@ -1675,10 +1674,10 @@ func (x f26dot6) mul(y f26dot6) f26dot6 { // py := int64(y) // qx := int64(q[0]) // qy := int64(q[1]) -// return f26dot6((px*qx + py*qy + 1<<13) >> 14) +// return fixed.Int26_6((px*qx + py*qy + 1<<13) >> 14) // except that the computation is done with 32-bit integers to produce exactly // the same rounding behavior as C Freetype. -func dotProduct(x, y f26dot6, q [2]f2dot14) f26dot6 { +func dotProduct(x, y fixed.Int26_6, q [2]f2dot14) fixed.Int26_6 { // Compute x*q[0] as 64-bit value. l := uint32((int32(x) & 0xFFFF) * int32(q[0])) m := (int32(x) >> 16) * int32(q[0]) @@ -1706,7 +1705,7 @@ func dotProduct(x, y f26dot6, q [2]f2dot14) f26dot6 { l = lo + 0x2000 hi += bool2int32(l < lo) - return f26dot6((uint32(hi) << 18) | (l >> 14)) + return fixed.Int26_6((uint32(hi) << 18) | (l >> 14)) } // mulDiv returns x*y/z, rounded to the nearest integer. @@ -1725,7 +1724,7 @@ func mulDiv(x, y, z int64) int64 { // round rounds the given number. The rounding algorithm is described at // https://developer.apple.com/fonts/TTRefMan/RM02/Chap2.html#rounding -func (h *hinter) round(x f26dot6) f26dot6 { +func (h *hinter) round(x fixed.Int26_6) fixed.Int26_6 { if h.gs.roundPeriod == 0 { // Rounding is off. return x diff --git a/_third_party/code.google.com/p/freetype-go/freetype/truetype/hint_test.go b/_third_party/github.com/golang/freetype/truetype/hint_test.go similarity index 99% rename from _third_party/code.google.com/p/freetype-go/freetype/truetype/hint_test.go rename to _third_party/github.com/golang/freetype/truetype/hint_test.go index c8b8d604df..7eb43dde07 100644 --- a/_third_party/code.google.com/p/freetype-go/freetype/truetype/hint_test.go +++ b/_third_party/github.com/golang/freetype/truetype/hint_test.go @@ -9,6 +9,8 @@ import ( "reflect" "strings" "testing" + + "golang.org/x/image/math/fixed" ) func TestBytecode(t *testing.T) { @@ -589,7 +591,7 @@ func TestMove(t *testing.T) { h, p := hinter{}, Point{} testCases := []struct { pvX, pvY, fvX, fvY f2dot14 - wantX, wantY int32 + wantX, wantY fixed.Int26_6 }{ {+0x4000, +0x0000, +0x4000, +0x0000, +1000, +0}, {+0x4000, +0x0000, -0x4000, +0x0000, +1000, +0}, diff --git a/_third_party/code.google.com/p/freetype-go/freetype/truetype/opcodes.go b/_third_party/github.com/golang/freetype/truetype/opcodes.go similarity index 100% rename from _third_party/code.google.com/p/freetype-go/freetype/truetype/opcodes.go rename to _third_party/github.com/golang/freetype/truetype/opcodes.go diff --git a/_third_party/code.google.com/p/freetype-go/freetype/truetype/truetype.go b/_third_party/github.com/golang/freetype/truetype/truetype.go similarity index 70% rename from _third_party/code.google.com/p/freetype-go/freetype/truetype/truetype.go rename to _third_party/github.com/golang/freetype/truetype/truetype.go index 96ceef5479..76b911782a 100644 --- a/_third_party/code.google.com/p/freetype-go/freetype/truetype/truetype.go +++ b/_third_party/github.com/golang/freetype/truetype/truetype.go @@ -9,36 +9,68 @@ // // Some of a font's methods provide lengths or co-ordinates, e.g. bounds, font // metrics and control points. All these methods take a scale parameter, which -// is the number of device units in 1 em. For example, if 1 em is 10 pixels and -// 1 pixel is 64 units, then scale is 640. If the device space involves pixels, -// 64 units per pixel is recommended, since that is what the bytecode hinter -// uses when snapping point co-ordinates to the pixel grid. +// is the number of pixels in 1 em, expressed as a 26.6 fixed point value. For +// example, if 1 em is 10 pixels then scale is fixed.I(10), which is equal to +// fixed.Int26_6(10 << 6). // // To measure a TrueType font in ideal FUnit space, use scale equal to // font.FUnitsPerEm(). -package truetype +package truetype // import "bosun.org/_third_party/github.com/golang/freetype/truetype" import ( "fmt" + + "golang.org/x/image/math/fixed" ) // An Index is a Font's index of a rune. type Index uint16 -// A Bounds holds the co-ordinate range of one or more glyphs. -// The endpoints are inclusive. -type Bounds struct { - XMin, YMin, XMax, YMax int32 -} +// A NameID identifies a name table entry. +// +// See https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html +type NameID uint16 + +const ( + NameIDCopyright NameID = 0 + NameIDFontFamily = 1 + NameIDFontSubfamily = 2 + NameIDUniqueSubfamilyID = 3 + NameIDFontFullName = 4 + NameIDNameTableVersion = 5 + NameIDPostscriptName = 6 + NameIDTrademarkNotice = 7 + NameIDManufacturerName = 8 + NameIDDesignerName = 9 + NameIDFontDescription = 10 + NameIDFontVendorURL = 11 + NameIDFontDesignerURL = 12 + NameIDFontLicense = 13 + NameIDFontLicenseURL = 14 + NameIDPreferredFamily = 16 + NameIDPreferredSubfamily = 17 + NameIDCompatibleName = 18 + NameIDSampleText = 19 +) + +const ( + // A 32-bit encoding consists of a most-significant 16-bit Platform ID and a + // least-significant 16-bit Platform Specific ID. The magic numbers are + // specified at https://www.microsoft.com/typography/otspec/name.htm + unicodeEncoding = 0x00000003 // PID = 0 (Unicode), PSID = 3 (Unicode 2.0) + microsoftSymbolEncoding = 0x00030000 // PID = 3 (Microsoft), PSID = 0 (Symbol) + microsoftUCS2Encoding = 0x00030001 // PID = 3 (Microsoft), PSID = 1 (UCS-2) + microsoftUCS4Encoding = 0x0003000a // PID = 3 (Microsoft), PSID = 10 (UCS-4) +) // An HMetric holds the horizontal metrics of a single glyph. type HMetric struct { - AdvanceWidth, LeftSideBearing int32 + AdvanceWidth, LeftSideBearing fixed.Int26_6 } // A VMetric holds the vertical metrics of a single glyph. type VMetric struct { - AdvanceHeight, TopSideBearing int32 + AdvanceHeight, TopSideBearing fixed.Int26_6 } // A FormatError reports that the input is not a valid TrueType font. @@ -83,6 +115,51 @@ func readTable(ttf []byte, offsetLength []byte) ([]byte, error) { return ttf[offset:end], nil } +// parseSubtables returns the offset and platformID of the best subtable in +// table, where best favors a Unicode cmap encoding, and failing that, a +// Microsoft cmap encoding. offset is the offset of the first subtable in +// table, and size is the size of each subtable. +// +// If pred is non-nil, then only subtables that satisfy that predicate will be +// considered. +func parseSubtables(table []byte, name string, offset, size int, pred func([]byte) bool) ( + bestOffset int, bestPID uint32, retErr error) { + + if len(table) < 4 { + return 0, 0, FormatError(name + " too short") + } + nSubtables := int(u16(table, 2)) + if len(table) < size*nSubtables+offset { + return 0, 0, FormatError(name + " too short") + } + ok := false + for i := 0; i < nSubtables; i, offset = i+1, offset+size { + if pred != nil && !pred(table[offset:]) { + continue + } + // We read the 16-bit Platform ID and 16-bit Platform Specific ID as a single uint32. + // All values are big-endian. + pidPsid := u32(table, offset) + // We prefer the Unicode cmap encoding. Failing to find that, we fall + // back onto the Microsoft cmap encoding. + if pidPsid == unicodeEncoding { + bestOffset, bestPID, ok = offset, pidPsid>>16, true + break + + } else if pidPsid == microsoftSymbolEncoding || + pidPsid == microsoftUCS2Encoding || + pidPsid == microsoftUCS4Encoding { + + bestOffset, bestPID, ok = offset, pidPsid>>16, true + // We don't break out of the for loop, so that Unicode can override Microsoft. + } + } + if !ok { + return 0, 0, UnsupportedError(name + " encoding") + } + return bestOffset, bestPID, nil +} + const ( locaOffsetFormatUnknown int = iota locaOffsetFormatShort @@ -98,7 +175,7 @@ type cm struct { type Font struct { // Tables sliced from the TTF data. The different tables are documented // at http://developer.apple.com/fonts/TTRefMan/RM06/Chap6.html - cmap, cvt, fpgm, glyf, hdmx, head, hhea, hmtx, kern, loca, maxp, os2, prep, vmtx []byte + cmap, cvt, fpgm, glyf, hdmx, head, hhea, hmtx, kern, loca, maxp, name, os2, prep, vmtx []byte cmapIndexes []byte @@ -107,7 +184,7 @@ type Font struct { locaOffsetFormat int nGlyph, nHMetric, nKern int fUnitsPerEm int32 - bounds Bounds + bounds fixed.Rectangle26_6 // Values from the maxp section. maxTwilightPoints, maxStorage, maxFunctionDefs, maxStackElements uint16 } @@ -117,46 +194,13 @@ func (f *Font) parseCmap() error { cmapFormat4 = 4 cmapFormat12 = 12 languageIndependent = 0 - - // A 32-bit encoding consists of a most-significant 16-bit Platform ID and a - // least-significant 16-bit Platform Specific ID. The magic numbers are - // specified at https://www.microsoft.com/typography/otspec/name.htm - unicodeEncoding = 0x00000003 // PID = 0 (Unicode), PSID = 3 (Unicode 2.0) - microsoftSymbolEncoding = 0x00030000 // PID = 3 (Microsoft), PSID = 0 (Symbol) - microsoftUCS2Encoding = 0x00030001 // PID = 3 (Microsoft), PSID = 1 (UCS-2) - microsoftUCS4Encoding = 0x0003000a // PID = 3 (Microsoft), PSID = 10 (UCS-4) ) - if len(f.cmap) < 4 { - return FormatError("cmap too short") - } - nsubtab := int(u16(f.cmap, 2)) - if len(f.cmap) < 8*nsubtab+4 { - return FormatError("cmap too short") - } - offset, found, x := 0, false, 4 - for i := 0; i < nsubtab; i++ { - // We read the 16-bit Platform ID and 16-bit Platform Specific ID as a single uint32. - // All values are big-endian. - pidPsid, o := u32(f.cmap, x), u32(f.cmap, x+4) - x += 8 - // We prefer the Unicode cmap encoding. Failing to find that, we fall - // back onto the Microsoft cmap encoding. - if pidPsid == unicodeEncoding { - offset, found = int(o), true - break - - } else if pidPsid == microsoftSymbolEncoding || - pidPsid == microsoftUCS2Encoding || - pidPsid == microsoftUCS4Encoding { - - offset, found = int(o), true - // We don't break out of the for loop, so that Unicode can override Microsoft. - } - } - if !found { - return UnsupportedError("cmap encoding") + offset, _, err := parseSubtables(f.cmap, "cmap", 4, 8, nil) + if err != nil { + return err } + offset = int(u32(f.cmap, offset+4)) if offset <= 0 || offset > len(f.cmap) { return FormatError("bad cmap offset") } @@ -226,10 +270,10 @@ func (f *Font) parseHead() error { return FormatError(fmt.Sprintf("bad head length: %d", len(f.head))) } f.fUnitsPerEm = int32(u16(f.head, 18)) - f.bounds.XMin = int32(int16(u16(f.head, 36))) - f.bounds.YMin = int32(int16(u16(f.head, 38))) - f.bounds.XMax = int32(int16(u16(f.head, 40))) - f.bounds.YMax = int32(int16(u16(f.head, 42))) + f.bounds.Min.X = fixed.Int26_6(int16(u16(f.head, 36))) + f.bounds.Min.Y = fixed.Int26_6(int16(u16(f.head, 38))) + f.bounds.Max.X = fixed.Int26_6(int16(u16(f.head, 40))) + f.bounds.Max.Y = fixed.Int26_6(int16(u16(f.head, 42))) switch i := u16(f.head, 50); i { case 0: f.locaOffsetFormat = locaOffsetFormatShort @@ -306,22 +350,22 @@ func (f *Font) parseMaxp() error { } // scale returns x divided by f.fUnitsPerEm, rounded to the nearest integer. -func (f *Font) scale(x int32) int32 { +func (f *Font) scale(x fixed.Int26_6) fixed.Int26_6 { if x >= 0 { - x += f.fUnitsPerEm / 2 + x += fixed.Int26_6(f.fUnitsPerEm) / 2 } else { - x -= f.fUnitsPerEm / 2 + x -= fixed.Int26_6(f.fUnitsPerEm) / 2 } - return x / f.fUnitsPerEm + return x / fixed.Int26_6(f.fUnitsPerEm) } // Bounds returns the union of a Font's glyphs' bounds. -func (f *Font) Bounds(scale int32) Bounds { +func (f *Font) Bounds(scale fixed.Int26_6) fixed.Rectangle26_6 { b := f.bounds - b.XMin = f.scale(scale * b.XMin) - b.YMin = f.scale(scale * b.YMin) - b.XMax = f.scale(scale * b.XMax) - b.YMax = f.scale(scale * b.YMax) + b.Min.X = f.scale(scale * b.Min.X) + b.Min.Y = f.scale(scale * b.Min.Y) + b.Max.X = f.scale(scale * b.Max.X) + b.Max.Y = f.scale(scale * b.Max.Y) return b } @@ -350,6 +394,44 @@ func (f *Font) Index(x rune) Index { return 0 } +// Name returns the Font's name value for the given NameID. It returns "" if +// there was an error, or if that name was not found. +func (f *Font) Name(id NameID) string { + x, platformID, err := parseSubtables(f.name, "name", 6, 12, func(b []byte) bool { + return NameID(u16(b, 6)) == id + }) + if err != nil { + return "" + } + offset, length := u16(f.name, 4)+u16(f.name, x+10), u16(f.name, x+8) + // Return the ASCII value of the encoded string. + // The string is encoded as UTF-16 on non-Apple platformIDs; Apple is platformID 1. + src := f.name[offset : offset+length] + var dst []byte + if platformID != 1 { // UTF-16. + if len(src)&1 != 0 { + return "" + } + dst = make([]byte, len(src)/2) + for i := range dst { + dst[i] = printable(u16(src, 2*i)) + } + } else { // ASCII. + dst = make([]byte, len(src)) + for i, c := range src { + dst[i] = printable(uint16(c)) + } + } + return string(dst) +} + +func printable(r uint16) byte { + if 0x20 <= r && r < 0x7f { + return byte(r) + } + return '?' +} + // unscaledHMetric returns the unscaled horizontal metrics for the glyph with // the given index. func (f *Font) unscaledHMetric(i Index) (h HMetric) { @@ -360,18 +442,18 @@ func (f *Font) unscaledHMetric(i Index) (h HMetric) { if j >= f.nHMetric { p := 4 * (f.nHMetric - 1) return HMetric{ - AdvanceWidth: int32(u16(f.hmtx, p)), - LeftSideBearing: int32(int16(u16(f.hmtx, p+2*(j-f.nHMetric)+4))), + AdvanceWidth: fixed.Int26_6(u16(f.hmtx, p)), + LeftSideBearing: fixed.Int26_6(int16(u16(f.hmtx, p+2*(j-f.nHMetric)+4))), } } return HMetric{ - AdvanceWidth: int32(u16(f.hmtx, 4*j)), - LeftSideBearing: int32(int16(u16(f.hmtx, 4*j+2))), + AdvanceWidth: fixed.Int26_6(u16(f.hmtx, 4*j)), + LeftSideBearing: fixed.Int26_6(int16(u16(f.hmtx, 4*j+2))), } } // HMetric returns the horizontal metrics for the glyph with the given index. -func (f *Font) HMetric(scale int32, i Index) HMetric { +func (f *Font) HMetric(scale fixed.Int26_6, i Index) HMetric { h := f.unscaledHMetric(i) h.AdvanceWidth = f.scale(scale * h.AdvanceWidth) h.LeftSideBearing = f.scale(scale * h.LeftSideBearing) @@ -380,15 +462,15 @@ func (f *Font) HMetric(scale int32, i Index) HMetric { // unscaledVMetric returns the unscaled vertical metrics for the glyph with // the given index. yMax is the top of the glyph's bounding box. -func (f *Font) unscaledVMetric(i Index, yMax int32) (v VMetric) { +func (f *Font) unscaledVMetric(i Index, yMax fixed.Int26_6) (v VMetric) { j := int(i) if j < 0 || f.nGlyph <= j { return VMetric{} } if 4*j+4 <= len(f.vmtx) { return VMetric{ - AdvanceHeight: int32(u16(f.vmtx, 4*j)), - TopSideBearing: int32(int16(u16(f.vmtx, 4*j+2))), + AdvanceHeight: fixed.Int26_6(u16(f.vmtx, 4*j)), + TopSideBearing: fixed.Int26_6(int16(u16(f.vmtx, 4*j+2))), } } // The OS/2 table has grown over time. @@ -397,21 +479,21 @@ func (f *Font) unscaledVMetric(i Index, yMax int32) (v VMetric) { // the ascender and descender, are described at // http://www.microsoft.com/typography/otspec/os2.htm if len(f.os2) >= 72 { - sTypoAscender := int32(int16(u16(f.os2, 68))) - sTypoDescender := int32(int16(u16(f.os2, 70))) + sTypoAscender := fixed.Int26_6(int16(u16(f.os2, 68))) + sTypoDescender := fixed.Int26_6(int16(u16(f.os2, 70))) return VMetric{ AdvanceHeight: sTypoAscender - sTypoDescender, TopSideBearing: sTypoAscender - yMax, } } return VMetric{ - AdvanceHeight: f.fUnitsPerEm, + AdvanceHeight: fixed.Int26_6(f.fUnitsPerEm), TopSideBearing: 0, } } // VMetric returns the vertical metrics for the glyph with the given index. -func (f *Font) VMetric(scale int32, i Index) VMetric { +func (f *Font) VMetric(scale fixed.Int26_6, i Index) VMetric { // TODO: should 0 be bounds.YMax? v := f.unscaledVMetric(i, 0) v.AdvanceHeight = f.scale(scale * v.AdvanceHeight) @@ -419,8 +501,9 @@ func (f *Font) VMetric(scale int32, i Index) VMetric { return v } -// Kerning returns the kerning for the given glyph pair. -func (f *Font) Kerning(scale int32, i0, i1 Index) int32 { +// Kern returns the horizontal adjustment for the given glyph pair. A positive +// kern means to move the glyphs further apart. +func (f *Font) Kern(scale fixed.Int26_6, i0, i1 Index) fixed.Int26_6 { if f.nKern == 0 { return 0 } @@ -434,7 +517,7 @@ func (f *Font) Kerning(scale int32, i0, i1 Index) int32 { } else if ig > g { hi = i } else { - return f.scale(scale * int32(int16(u16(f.kern, 22+6*i)))) + return f.scale(scale * fixed.Int26_6(int16(u16(f.kern, 22+6*i)))) } } return 0 @@ -522,6 +605,8 @@ func parse(ttf []byte, offset int) (font *Font, err error) { f.loca, err = readTable(ttf, ttf[x+8:x+16]) case "maxp": f.maxp, err = readTable(ttf, ttf[x+8:x+16]) + case "name": + f.name, err = readTable(ttf, ttf[x+8:x+16]) case "OS/2": f.os2, err = readTable(ttf, ttf[x+8:x+16]) case "prep": diff --git a/_third_party/code.google.com/p/freetype-go/freetype/truetype/truetype_test.go b/_third_party/github.com/golang/freetype/truetype/truetype_test.go similarity index 72% rename from _third_party/code.google.com/p/freetype-go/freetype/truetype/truetype_test.go rename to _third_party/github.com/golang/freetype/truetype/truetype_test.go index 9ef6ec8d26..bd62d1da16 100644 --- a/_third_party/code.google.com/p/freetype-go/freetype/truetype/truetype_test.go +++ b/_third_party/github.com/golang/freetype/truetype/truetype_test.go @@ -14,65 +14,81 @@ import ( "strconv" "strings" "testing" + + "golang.org/x/image/font" + "golang.org/x/image/math/fixed" ) -func parseTestdataFont(name string) (font *Font, testdataIsOptional bool, err error) { - b, err := ioutil.ReadFile(fmt.Sprintf("../../testdata/%s.ttf", name)) +func parseTestdataFont(name string) (f *Font, testdataIsOptional bool, err error) { + b, err := ioutil.ReadFile(fmt.Sprintf("../testdata/%s.ttf", name)) if err != nil { // The "x-foo" fonts are optional tests, as they are not checked // in for copyright or file size reasons. return nil, strings.HasPrefix(name, "x-"), fmt.Errorf("%s: ReadFile: %v", name, err) } - font, err = Parse(b) + f, err = Parse(b) if err != nil { return nil, true, fmt.Errorf("%s: Parse: %v", name, err) } - return font, false, nil + return f, false, nil +} + +func mkBounds(minX, minY, maxX, maxY fixed.Int26_6) fixed.Rectangle26_6 { + return fixed.Rectangle26_6{ + Min: fixed.Point26_6{ + X: minX, + Y: minY, + }, + Max: fixed.Point26_6{ + X: maxX, + Y: maxY, + }, + } } // TestParse tests that the luxisr.ttf metrics and glyphs are parsed correctly. // The numerical values can be manually verified by examining luxisr.ttx. func TestParse(t *testing.T) { - font, _, err := parseTestdataFont("luxisr") + f, _, err := parseTestdataFont("luxisr") if err != nil { t.Fatal(err) } - if got, want := font.FUnitsPerEm(), int32(2048); got != want { + if got, want := f.FUnitsPerEm(), int32(2048); got != want { t.Errorf("FUnitsPerEm: got %v, want %v", got, want) } - fupe := font.FUnitsPerEm() - if got, want := font.Bounds(fupe), (Bounds{-441, -432, 2024, 2033}); got != want { + fupe := fixed.Int26_6(f.FUnitsPerEm()) + if got, want := f.Bounds(fupe), mkBounds(-441, -432, 2024, 2033); got != want { t.Errorf("Bounds: got %v, want %v", got, want) } - i0 := font.Index('A') - i1 := font.Index('V') + i0 := f.Index('A') + i1 := f.Index('V') if i0 != 36 || i1 != 57 { t.Fatalf("Index: i0, i1 = %d, %d, want 36, 57", i0, i1) } - if got, want := font.HMetric(fupe, i0), (HMetric{1366, 19}); got != want { + if got, want := f.HMetric(fupe, i0), (HMetric{1366, 19}); got != want { t.Errorf("HMetric: got %v, want %v", got, want) } - if got, want := font.VMetric(fupe, i0), (VMetric{2465, 553}); got != want { + if got, want := f.VMetric(fupe, i0), (VMetric{2465, 553}); got != want { t.Errorf("VMetric: got %v, want %v", got, want) } - if got, want := font.Kerning(fupe, i0, i1), int32(-144); got != want { - t.Errorf("Kerning: got %v, want %v", got, want) + if got, want := f.Kern(fupe, i0, i1), fixed.Int26_6(-144); got != want { + t.Errorf("Kern: got %v, want %v", got, want) } - g := NewGlyphBuf() - err = g.Load(font, fupe, i0, NoHinting) + g := &GlyphBuf{} + err = g.Load(f, fupe, i0, font.HintingNone) if err != nil { t.Fatalf("Load: %v", err) } g0 := &GlyphBuf{ - B: g.B, - Point: g.Point, - End: g.End, + Bounds: g.Bounds, + Points: g.Points, + Ends: g.Ends, } g1 := &GlyphBuf{ - B: Bounds{19, 0, 1342, 1480}, - Point: []Point{ + Bounds: mkBounds(19, 0, 1342, 1480), + Points: []Point{ {19, 0, 51}, {581, 1480, 1}, {789, 1480, 51}, @@ -85,7 +101,7 @@ func TestParse(t *testing.T) { {904, 566, 33}, {667, 1200, 3}, }, - End: []int{8, 11}, + Ends: []int{8, 11}, } if got, want := fmt.Sprint(g0), fmt.Sprint(g1); got != want { t.Errorf("GlyphBuf:\ngot %v\nwant %v", got, want) @@ -178,7 +194,7 @@ func TestIndex(t *testing.T) { }, } for name, wants := range testCases { - font, testdataIsOptional, err := parseTestdataFont(name) + f, testdataIsOptional, err := parseTestdataFont(name) if err != nil { if testdataIsOptional { t.Log(err) @@ -188,16 +204,39 @@ func TestIndex(t *testing.T) { continue } for r, want := range wants { - if got := font.Index(r); got != want { + if got := f.Index(r); got != want { t.Errorf("%s: Index of %q, aka %U: got %d, want %d", name, r, r, got, want) } } } } +func TestName(t *testing.T) { + testCases := map[string]string{ + "luximr": "Luxi Mono", + "luxirr": "Luxi Serif", + "luxisr": "Luxi Sans", + } + + for name, want := range testCases { + f, testdataIsOptional, err := parseTestdataFont(name) + if err != nil { + if testdataIsOptional { + t.Log(err) + } else { + t.Fatal(err) + } + continue + } + if got := f.Name(NameIDFontFamily); got != want { + t.Errorf("%s: got %q, want %q", name, got, want) + } + } +} + type scalingTestData struct { - advanceWidth int32 - bounds Bounds + advanceWidth fixed.Int26_6 + bounds fixed.Rectangle26_6 points []Point } @@ -205,23 +244,23 @@ type scalingTestData struct { // 213 -22 -111 236 555;-22 -111 1, 178 555 1, 236 555 1, 36 -111 1 // The line will not have a trailing "\n". func scalingTestParse(line string) (ret scalingTestData) { - next := func(s string) (string, int32) { + next := func(s string) (string, fixed.Int26_6) { t, i := "", strings.Index(s, " ") if i != -1 { s, t = s[:i], s[i+1:] } x, _ := strconv.Atoi(s) - return t, int32(x) + return t, fixed.Int26_6(x) } i := strings.Index(line, ";") prefix, line := line[:i], line[i+1:] prefix, ret.advanceWidth = next(prefix) - prefix, ret.bounds.XMin = next(prefix) - prefix, ret.bounds.YMin = next(prefix) - prefix, ret.bounds.XMax = next(prefix) - prefix, ret.bounds.YMax = next(prefix) + prefix, ret.bounds.Min.X = next(prefix) + prefix, ret.bounds.Min.Y = next(prefix) + prefix, ret.bounds.Max.X = next(prefix) + prefix, ret.bounds.Max.Y = next(prefix) ret.points = make([]Point, 0, 1+strings.Count(line, ",")) for len(line) > 0 { @@ -257,7 +296,7 @@ func scalingTestEquals(a, b []Point) (index int, equals bool) { var scalingTestCases = []struct { name string - size int32 + size int }{ {"luxisr", 12}, {"x-arial-bold", 11}, @@ -266,9 +305,9 @@ var scalingTestCases = []struct { {"x-times-new-roman", 13}, } -func testScaling(t *testing.T, h Hinting) { +func testScaling(t *testing.T, h font.Hinting) { for _, tc := range scalingTestCases { - font, testdataIsOptional, err := parseTestdataFont(tc.name) + f, testdataIsOptional, err := parseTestdataFont(tc.name) if err != nil { if testdataIsOptional { t.Log(err) @@ -278,19 +317,19 @@ func testScaling(t *testing.T, h Hinting) { continue } hintingStr := "sans" - if h != NoHinting { + if h != font.HintingNone { hintingStr = "with" } - f, err := os.Open(fmt.Sprintf( - "../../testdata/%s-%dpt-%s-hinting.txt", tc.name, tc.size, hintingStr)) + testFile, err := os.Open(fmt.Sprintf( + "../testdata/%s-%dpt-%s-hinting.txt", tc.name, tc.size, hintingStr)) if err != nil { t.Errorf("%s: Open: %v", tc.name, err) continue } - defer f.Close() + defer testFile.Close() wants := []scalingTestData{} - scanner := bufio.NewScanner(f) + scanner := bufio.NewScanner(testFile) if scanner.Scan() { major, minor, patch := 0, 0, 0 _, err := fmt.Sscanf(scanner.Text(), "freetype version %d.%d.%d", &major, &minor, &patch) @@ -316,16 +355,16 @@ func testScaling(t *testing.T, h Hinting) { continue } - glyphBuf := NewGlyphBuf() + glyphBuf := &GlyphBuf{} for i, want := range wants { - if err = glyphBuf.Load(font, tc.size*64, Index(i), h); err != nil { + if err = glyphBuf.Load(f, fixed.I(tc.size), Index(i), h); err != nil { t.Errorf("%s: glyph #%d: Load: %v", tc.name, i, err) continue } got := scalingTestData{ advanceWidth: glyphBuf.AdvanceWidth, - bounds: glyphBuf.B, - points: glyphBuf.Point, + bounds: glyphBuf.Bounds, + points: glyphBuf.Points, } if got.advanceWidth != want.advanceWidth { @@ -357,10 +396,5 @@ func testScaling(t *testing.T, h Hinting) { } } -func TestScalingSansHinting(t *testing.T) { - testScaling(t, NoHinting) -} - -func TestScalingWithHinting(t *testing.T) { - testScaling(t, FullHinting) -} +func TestScalingHintingNone(t *testing.T) { testScaling(t, font.HintingNone) } +func TestScalingHintingFull(t *testing.T) { testScaling(t, font.HintingFull) } diff --git a/_third_party/github.com/gorilla/mux/README.md b/_third_party/github.com/gorilla/mux/README.md index e60301b033..55dd4e59a5 100644 --- a/_third_party/github.com/gorilla/mux/README.md +++ b/_third_party/github.com/gorilla/mux/README.md @@ -1,7 +1,235 @@ mux === +[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) [![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) -gorilla/mux is a powerful URL router and dispatcher. +Package gorilla/mux implements a request router and dispatcher. -Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux +The name mux stands for "HTTP request multiplexer". Like the standard +http.ServeMux, mux.Router matches incoming requests against a list of +registered routes and calls a handler for the route that matches the URL +or other conditions. The main features are: + + * Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + * URL hosts and paths can have variables with an optional regular + expression. + * Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + * Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + * It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. + +Let's start registering a couple of URL paths and handlers: + + func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) + } + +Here we register three routes mapping URL paths to handlers. This is +equivalent to how http.HandleFunc() works: if an incoming request URL matches +one of the paths, the corresponding handler is called passing +(http.ResponseWriter, *http.Request) as parameters. + +Paths can have variables. They are defined using the format {name} or +{name:pattern}. If a regular expression pattern is not defined, the matched +variable will be anything until the next slash. For example: + + r := mux.NewRouter() + r.HandleFunc("/products/{key}", ProductHandler) + r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) + +The names are used to create a map of route variables which can be retrieved +calling mux.Vars(): + + vars := mux.Vars(request) + category := vars["category"] + +And this is all you need to know about the basic usage. More advanced options +are explained below. + +Routes can also be restricted to a domain or subdomain. Just define a host +pattern to be matched. They can also have variables: + + r := mux.NewRouter() + // Only matches if domain is "www.example.com". + r.Host("www.example.com") + // Matches a dynamic subdomain. + r.Host("{subdomain:[a-z]+}.domain.com") + +There are several other matchers that can be added. To match path prefixes: + + r.PathPrefix("/products/") + +...or HTTP methods: + + r.Methods("GET", "POST") + +...or URL schemes: + + r.Schemes("https") + +...or header values: + + r.Headers("X-Requested-With", "XMLHttpRequest") + +...or query values: + + r.Queries("key", "value") + +...or to use a custom matcher function: + + r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 + }) + +...and finally, it is possible to combine several matchers in a single route: + + r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") + +Setting the same matching conditions again and again can be boring, so we have +a way to group several routes that share the same requirements. +We call it "subrouting". + +For example, let's say we have several URLs that should only match when the +host is `www.example.com`. Create a route for that host and get a "subrouter" +from it: + + r := mux.NewRouter() + s := r.Host("www.example.com").Subrouter() + +Then register routes in the subrouter: + + s.HandleFunc("/products/", ProductsHandler) + s.HandleFunc("/products/{key}", ProductHandler) + s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) + +The three URL paths we registered above will only be tested if the domain is +`www.example.com`, because the subrouter is tested first. This is not +only convenient, but also optimizes request matching. You can create +subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define +subrouters in a central place and then parts of the app can register its +paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, +the inner routes use it as base for their paths: + + r := mux.NewRouter() + s := r.PathPrefix("/products").Subrouter() + // "/products/" + s.HandleFunc("/", ProductsHandler) + // "/products/{key}/" + s.HandleFunc("/{key}/", ProductHandler) + // "/products/{key}/details" + s.HandleFunc("/{key}/details", ProductDetailsHandler) + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, +or "reversed". We define a name calling Name() on a route. For example: + + r := mux.NewRouter() + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") + +To build a URL, get the route and call the URL() method, passing a sequence of +key/value pairs for the route variables. For the previous route, we would do: + + url, err := r.Get("article").URL("category", "technology", "id", "42") + +...and the result will be a url.URL with the following path: + + "/articles/technology/42" + +This also works for host variables: + + r := mux.NewRouter() + r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // url.String() will be "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") + +All variables defined in the route are required, and their values must +conform to the corresponding patterns. These requirements guarantee that a +generated URL will always match a registered route -- the only exception is +for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + + r.HeadersRegexp("Content-Type", "application/(text|json)") + +...and the route will match both requests with a Content-Type of `application/json` as well as +`application/text` + +There's also a way to build only the URL host or path for a route: +use the methods URLHost() or URLPath() instead. For the previous route, +we would do: + + // "http://news.domain.com/" + host, err := r.Get("article").URLHost("subdomain", "news") + + // "/articles/technology/42" + path, err := r.Get("article").URLPath("category", "technology", "id", "42") + +And if you use subrouters, host and path defined separately can be built +as well: + + r := mux.NewRouter() + s := r.Host("{subdomain}.domain.com").Subrouter() + s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") + +## Full Example + +Here's a complete, runnable example of a small mux based server: + +```go +package main + +import ( + "net/http" + + "github.com/gorilla/mux" +) + +func YourHandler(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Gorilla!\n")) +} + +func main() { + r := mux.NewRouter() + // Routes consist of a path and a handler function. + r.HandleFunc("/", YourHandler) + + // Bind to a port and pass our router in + http.ListenAndServe(":8000", r) +} +``` + +## License + +BSD licensed. See the LICENSE file for details. diff --git a/_third_party/github.com/gorilla/mux/doc.go b/_third_party/github.com/gorilla/mux/doc.go index 442babab85..49798cb5cf 100644 --- a/_third_party/github.com/gorilla/mux/doc.go +++ b/_third_party/github.com/gorilla/mux/doc.go @@ -60,8 +60,8 @@ Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: r := mux.NewRouter() - // Only matches if domain is "www.domain.com". - r.Host("www.domain.com") + // Only matches if domain is "www.example.com". + r.Host("www.example.com") // Matches a dynamic subdomain. r.Host("{subdomain:[a-z]+}.domain.com") @@ -94,7 +94,7 @@ There are several other matchers that can be added. To match path prefixes: ...and finally, it is possible to combine several matchers in a single route: r.HandleFunc("/products", ProductsHandler). - Host("www.domain.com"). + Host("www.example.com"). Methods("GET"). Schemes("http") @@ -103,11 +103,11 @@ a way to group several routes that share the same requirements. We call it "subrouting". For example, let's say we have several URLs that should only match when the -host is "www.domain.com". Create a route for that host and get a "subrouter" +host is "www.example.com". Create a route for that host and get a "subrouter" from it: r := mux.NewRouter() - s := r.Host("www.domain.com").Subrouter() + s := r.Host("www.example.com").Subrouter() Then register routes in the subrouter: @@ -116,7 +116,7 @@ Then register routes in the subrouter: s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) The three URL paths we registered above will only be tested if the domain is -"www.domain.com", because the subrouter is tested first. This is not +"www.example.com", because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. diff --git a/_third_party/github.com/gorilla/mux/mux.go b/_third_party/github.com/gorilla/mux/mux.go index 6f4a6ea591..8c1527f6c3 100644 --- a/_third_party/github.com/gorilla/mux/mux.go +++ b/_third_party/github.com/gorilla/mux/mux.go @@ -312,6 +312,10 @@ func Vars(r *http.Request) map[string]string { } // CurrentRoute returns the matched route for the current request, if any. +// This only works when called inside the handler of the matched route +// because the matched route is stored in the request context which is cleared +// after the handler returns, unless the KeepContext option is set on the +// Router. func CurrentRoute(r *http.Request) *Route { if rv := context.Get(r, routeKey); rv != nil { return rv.(*Route) diff --git a/_third_party/github.com/gorilla/mux/mux_test.go b/_third_party/github.com/gorilla/mux/mux_test.go index b56e49ffb4..5a39aa9d11 100644 --- a/_third_party/github.com/gorilla/mux/mux_test.go +++ b/_third_party/github.com/gorilla/mux/mux_test.go @@ -7,11 +7,24 @@ package mux import ( "fmt" "net/http" + "strings" "testing" "bosun.org/_third_party/github.com/gorilla/context" ) +func (r *Route) GoString() string { + matchers := make([]string, len(r.matchers)) + for i, m := range r.matchers { + matchers[i] = fmt.Sprintf("%#v", m) + } + return fmt.Sprintf("&Route{matchers:[]matcher{%s}}", strings.Join(matchers, ", ")) +} + +func (r *routeRegexp) GoString() string { + return fmt.Sprintf("&routeRegexp{template: %q, matchHost: %t, matchQuery: %t, strictSlash: %t, regexp: regexp.MustCompile(%q), reverse: %q, varsN: %v, varsR: %v", r.template, r.matchHost, r.matchQuery, r.strictSlash, r.regexp.String(), r.reverse, r.varsN, r.varsR) +} + type routeTest struct { title string // title of the test route *Route // the route being tested @@ -108,6 +121,15 @@ func TestHost(t *testing.T) { path: "", shouldMatch: true, }, + { + title: "Host route with pattern, additional capturing group, match", + route: new(Route).Host("aaa.{v1:[a-z]{2}(b|c)}.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, { title: "Host route with pattern, wrong host in request URL", route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), @@ -135,6 +157,33 @@ func TestHost(t *testing.T) { path: "", shouldMatch: false, }, + { + title: "Host route with hyphenated name and pattern, match", + route: new(Route).Host("aaa.{v-1:[a-z]{3}}.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v-1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with hyphenated name and pattern, additional capturing group, match", + route: new(Route).Host("aaa.{v-1:[a-z]{2}(b|c)}.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v-1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with multiple hyphenated names and patterns, match", + route: new(Route).Host("{v-1:[a-z]{3}}.{v-2:[a-z]{3}}.{v-3:[a-z]{3}}"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v-1": "aaa", "v-2": "bbb", "v-3": "ccc"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, { title: "Path route with single pattern with pipe, match", route: new(Route).Path("/{category:a|b/c}"), @@ -260,6 +309,42 @@ func TestPath(t *testing.T) { path: "/111/222/333", shouldMatch: false, }, + { + title: "Path route with multiple patterns with pipe, match", + route: new(Route).Path("/{category:a|(b/c)}/{product}/{id:[0-9]+}"), + request: newRequest("GET", "http://localhost/a/product_name/1"), + vars: map[string]string{"category": "a", "product": "product_name", "id": "1"}, + host: "", + path: "/a/product_name/1", + shouldMatch: true, + }, + { + title: "Path route with hyphenated name and pattern, match", + route: new(Route).Path("/111/{v-1:[0-9]{3}}/333"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v-1": "222"}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route with multiple hyphenated names and patterns, match", + route: new(Route).Path("/{v-1:[0-9]{3}}/{v-2:[0-9]{3}}/{v-3:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v-1": "111", "v-2": "222", "v-3": "333"}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route with multiple hyphenated names and patterns with pipe, match", + route: new(Route).Path("/{product-category:a|(b/c)}/{product-name}/{product-id:[0-9]+}"), + request: newRequest("GET", "http://localhost/a/product_name/1"), + vars: map[string]string{"product-category": "a", "product-name": "product_name", "product-id": "1"}, + host: "", + path: "/a/product_name/1", + shouldMatch: true, + }, } for _, test := range tests { @@ -597,6 +682,15 @@ func TestQueries(t *testing.T) { path: "", shouldMatch: false, }, + { + title: "Queries route with regexp pattern with quantifier, additional capturing group", + route: new(Route).Queries("foo", "{v1:[0-9]{1}(a|b)}"), + request: newRequest("GET", "http://localhost?foo=1a"), + vars: map[string]string{"v1": "1a"}, + host: "", + path: "", + shouldMatch: true, + }, { title: "Queries route with regexp pattern with quantifier, additional variable in query string, regexp does not match", route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), @@ -606,6 +700,42 @@ func TestQueries(t *testing.T) { path: "", shouldMatch: false, }, + { + title: "Queries route with hyphenated name, match", + route: new(Route).Queries("foo", "{v-1}"), + request: newRequest("GET", "http://localhost?foo=bar"), + vars: map[string]string{"v-1": "bar"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with multiple hyphenated names, match", + route: new(Route).Queries("foo", "{v-1}", "baz", "{v-2}"), + request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), + vars: map[string]string{"v-1": "bar", "v-2": "ding"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with hyphenate name and pattern, match", + route: new(Route).Queries("foo", "{v-1:[0-9]+}"), + request: newRequest("GET", "http://localhost?foo=10"), + vars: map[string]string{"v-1": "10"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with hyphenated name and pattern with quantifier, additional capturing group", + route: new(Route).Queries("foo", "{v-1:[0-9]{1}(a|b)}"), + request: newRequest("GET", "http://localhost?foo=1a"), + vars: map[string]string{"v-1": "1a"}, + host: "", + path: "", + shouldMatch: true, + }, { title: "Queries route with empty value, should match", route: new(Route).Queries("foo", ""), @@ -660,6 +790,15 @@ func TestQueries(t *testing.T) { path: "", shouldMatch: true, }, + { + title: "Queries route, bad submatch", + route: new(Route).Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://localhost?fffoo=bar&baz=dingggg"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, } for _, test := range tests { diff --git a/_third_party/github.com/gorilla/mux/old_test.go b/_third_party/github.com/gorilla/mux/old_test.go index 1f7c190c0f..755db483e8 100644 --- a/_third_party/github.com/gorilla/mux/old_test.go +++ b/_third_party/github.com/gorilla/mux/old_test.go @@ -545,7 +545,7 @@ func TestMatchedRouteName(t *testing.T) { router := NewRouter() route := router.NewRoute().Path("/products/").Name(routeName) - url := "http://www.domain.com/products/" + url := "http://www.example.com/products/" request, _ := http.NewRequest("GET", url, nil) var rv RouteMatch ok := router.Match(request, &rv) @@ -563,10 +563,10 @@ func TestMatchedRouteName(t *testing.T) { func TestSubRouting(t *testing.T) { // Example from docs. router := NewRouter() - subrouter := router.NewRoute().Host("www.domain.com").Subrouter() + subrouter := router.NewRoute().Host("www.example.com").Subrouter() route := subrouter.NewRoute().Path("/products/").Name("products") - url := "http://www.domain.com/products/" + url := "http://www.example.com/products/" request, _ := http.NewRequest("GET", url, nil) var rv RouteMatch ok := router.Match(request, &rv) diff --git a/_third_party/github.com/gorilla/mux/regexp.go b/_third_party/github.com/gorilla/mux/regexp.go index 7c636d0ef0..06728dd545 100644 --- a/_third_party/github.com/gorilla/mux/regexp.go +++ b/_third_party/github.com/gorilla/mux/regexp.go @@ -10,6 +10,7 @@ import ( "net/http" "net/url" "regexp" + "strconv" "strings" ) @@ -72,13 +73,14 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash tpl[idxs[i]:end]) } // Build the regexp pattern. - fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt) + varIdx := i / 2 + fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(varIdx), patt) // Build the reverse template. fmt.Fprintf(reverse, "%s%%s", raw) // Append variable name and compiled pattern. - varsN[i/2] = name - varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) + varsN[varIdx] = name + varsR[varIdx], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) if err != nil { return nil, err } @@ -224,6 +226,11 @@ func braceIndices(s string) ([]int, error) { return idxs, nil } +// varGroupName builds a capturing group name for the indexed variable. +func varGroupName(idx int) string { + return "v" + strconv.Itoa(idx) +} + // ---------------------------------------------------------------------------- // routeRegexpGroup // ---------------------------------------------------------------------------- @@ -241,8 +248,13 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) if v.host != nil { hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) if hostVars != nil { - for k, v := range v.host.varsN { - m.Vars[v] = hostVars[k+1] + subexpNames := v.host.regexp.SubexpNames() + varName := 0 + for i, name := range subexpNames[1:] { + if name != "" && name == varGroupName(varName) { + m.Vars[v.host.varsN[varName]] = hostVars[i+1] + varName++ + } } } } @@ -250,8 +262,13 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) if v.path != nil { pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) if pathVars != nil { - for k, v := range v.path.varsN { - m.Vars[v] = pathVars[k+1] + subexpNames := v.path.regexp.SubexpNames() + varName := 0 + for i, name := range subexpNames[1:] { + if name != "" && name == varGroupName(varName) { + m.Vars[v.path.varsN[varName]] = pathVars[i+1] + varName++ + } } // Check if we should redirect. if v.path.strictSlash { @@ -273,8 +290,13 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) for _, q := range v.queries { queryVars := q.regexp.FindStringSubmatch(q.getUrlQuery(req)) if queryVars != nil { - for k, v := range q.varsN { - m.Vars[v] = queryVars[k+1] + subexpNames := q.regexp.SubexpNames() + varName := 0 + for i, name := range subexpNames[1:] { + if name != "" && name == varGroupName(varName) { + m.Vars[q.varsN[varName]] = queryVars[i+1] + varName++ + } } } } diff --git a/_third_party/github.com/gorilla/mux/route.go b/_third_party/github.com/gorilla/mux/route.go index 75481b5797..913432c1c0 100644 --- a/_third_party/github.com/gorilla/mux/route.go +++ b/_third_party/github.com/gorilla/mux/route.go @@ -200,15 +200,7 @@ func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { // "X-Requested-With", "XMLHttpRequest") // // The above route will only match if both request header values match. -// Alternatively, you can provide a regular expression and match the header as follows: -// -// r.Headers("Content-Type", "application/(text|json)", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will the same as the previous example, with the addition of matching -// application/text as well. -// -// It the value is an empty string, it will match any value if the key is set. +// If the value is an empty string, it will match any value if the key is set. func (r *Route) Headers(pairs ...string) *Route { if r.err == nil { var headers map[string]string @@ -255,7 +247,7 @@ func (r *Route) HeadersRegexp(pairs ...string) *Route { // For example: // // r := mux.NewRouter() -// r.Host("www.domain.com") +// r.Host("www.example.com") // r.Host("{subdomain}.domain.com") // r.Host("{subdomain:[a-z]+}.domain.com") // @@ -414,7 +406,7 @@ func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { // It will test the inner routes only if the parent route matched. For example: // // r := mux.NewRouter() -// s := r.Host("www.domain.com").Subrouter() +// s := r.Host("www.example.com").Subrouter() // s.HandleFunc("/products/", ProductsHandler) // s.HandleFunc("/products/{key}", ProductHandler) // s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) diff --git a/_third_party/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go b/_third_party/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go index df2e49f851..bdf448d521 100644 --- a/_third_party/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go +++ b/_third_party/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go @@ -19,7 +19,7 @@ package codec import ( "testing" - vmsgpack "bosun.org/_third_party/gopkg.in/vmihailenco/msgpack.v2" + vmsgpack "gopkg.in/vmihailenco/msgpack.v2" "labix.org/v2/mgo/bson" ) diff --git a/_third_party/github.com/hashicorp/raft/config.go b/_third_party/github.com/hashicorp/raft/config.go index 047a88abe7..6b3c0b59f0 100644 --- a/_third_party/github.com/hashicorp/raft/config.go +++ b/_third_party/github.com/hashicorp/raft/config.go @@ -66,6 +66,15 @@ type Config struct { // step down as leader. LeaderLeaseTimeout time.Duration + // StartAsLeader forces Raft to start in the leader state. This should + // never be used except for testing purposes, as it can cause a split-brain. + StartAsLeader bool + + // NotifyCh is used to provide a channel that will be notified of leadership + // changes. Raft will block writing to this channel, so it should either be + // buffered or aggressively consumed. + NotifyCh chan<- bool + // LogOutput is used as a sink for logs, unless Logger is specified. // Defaults to os.Stderr. LogOutput io.Writer diff --git a/_third_party/github.com/hashicorp/raft/integ_test.go b/_third_party/github.com/hashicorp/raft/integ_test.go index 1d071e139a..03cd7e44e8 100644 --- a/_third_party/github.com/hashicorp/raft/integ_test.go +++ b/_third_party/github.com/hashicorp/raft/integ_test.go @@ -48,7 +48,7 @@ func MakeRaft(t *testing.T, conf *Config) *RaftEnv { // Set the config if conf == nil { - conf = inmemConfig() + conf = inmemConfig(t) } env.conf = conf diff --git a/_third_party/github.com/hashicorp/raft/raft.go b/_third_party/github.com/hashicorp/raft/raft.go index 40425a7e66..c7a281a768 100644 --- a/_third_party/github.com/hashicorp/raft/raft.go +++ b/_third_party/github.com/hashicorp/raft/raft.go @@ -226,6 +226,13 @@ func NewRaft(conf *Config, fsm FSM, logs LogStore, stable StableStore, snaps Sna // Initialize as a follower r.setState(Follower) + // Start as leader if specified. This should only be used + // for testing purposes. + if conf.StartAsLeader { + r.setState(Leader) + r.setLeader(r.localAddr) + } + // Restore the current term and the last log r.setCurrentTerm(currentTerm) r.setLastLogIndex(lastLog.Index) @@ -726,6 +733,14 @@ func (r *Raft) runLeader() { // Notify that we are the leader asyncNotifyBool(r.leaderCh, true) + // Push to the notify channel if given + if notify := r.conf.NotifyCh; notify != nil { + select { + case notify <- true: + case <-r.shutdownCh: + } + } + // Setup leader state r.leaderState.commitCh = make(chan struct{}, 1) r.leaderState.inflight = newInflight(r.leaderState.commitCh) @@ -766,6 +781,14 @@ func (r *Raft) runLeader() { // Notify that we are not the leader asyncNotifyBool(r.leaderCh, false) + + // Push to the notify channel if given + if notify := r.conf.NotifyCh; notify != nil { + select { + case notify <- false: + case <-r.shutdownCh: + } + } }() // Start a replication routine for each peer diff --git a/_third_party/github.com/hashicorp/raft/raft_test.go b/_third_party/github.com/hashicorp/raft/raft_test.go index 970f2cdbd3..a9102a27cf 100644 --- a/_third_party/github.com/hashicorp/raft/raft_test.go +++ b/_third_party/github.com/hashicorp/raft/raft_test.go @@ -67,15 +67,31 @@ func (m *MockSnapshot) Release() { } // Return configurations optimized for in-memory -func inmemConfig() *Config { +func inmemConfig(t *testing.T) *Config { conf := DefaultConfig() conf.HeartbeatTimeout = 50 * time.Millisecond conf.ElectionTimeout = 50 * time.Millisecond conf.LeaderLeaseTimeout = 50 * time.Millisecond conf.CommitTimeout = time.Millisecond + conf.Logger = log.New(&testLoggerAdapter{t}, "", 0) return conf } +// This can be used as the destination for a logger and it'll +// map them into calls to testing.T.Log, so that you only see +// the logging for failed tests. +type testLoggerAdapter struct { + t *testing.T +} + +func (a *testLoggerAdapter) Write(d []byte) (int, error) { + if d[len(d)-1] == '\n' { + d = d[:len(d)-1] + } + a.t.Log(string(d)) + return len(d), nil +} + type cluster struct { dirs []string stores []*InmemStore @@ -83,6 +99,7 @@ type cluster struct { snaps []*FileSnapshotStore trans []*InmemTransport rafts []*Raft + t *testing.T } func (c *cluster) Merge(other *cluster) { @@ -143,8 +160,27 @@ func (c *cluster) Leader() *Raft { return leaders[0] } +// Waits for there to be cluster size -1 followers, and then returns them +// If you just wait for a Leader sometimes you can get timing scenarios +// where a 2nd node starts an election just as the first leader was elected +// so even though you waited on the leader, it might become not leader soon +// by waiting on the followers you can be in a more stable state +func (c *cluster) Followers() []*Raft { + expFollowers := len(c.rafts) - 1 + followers := c.GetInState(Follower) + limit := time.Now().Add(200 * time.Millisecond) + for time.Now().Before(limit) && len(followers) != expFollowers { + time.Sleep(time.Millisecond) + followers = c.GetInState(Follower) + } + if len(followers) != expFollowers { + c.t.Fatalf("timeout waiting for %d followers (followers are %v)", expFollowers, followers) + } + return followers +} + func (c *cluster) FullyConnect() { - log.Printf("[WARN] Fully Connecting") + c.t.Logf("[WARN] Fully Connecting") for i, t1 := range c.trans { for j, t2 := range c.trans { if i != j { @@ -156,7 +192,7 @@ func (c *cluster) FullyConnect() { } func (c *cluster) Disconnect(a string) { - log.Printf("[WARN] Disconnecting %v", a) + c.t.Logf("[WARN] Disconnecting %v", a) for _, t := range c.trans { if t.localAddr == a { t.DisconnectAll() @@ -279,6 +315,7 @@ WAIT: func MakeCluster(n int, t *testing.T, conf *Config) *cluster { c := &cluster{} + c.t = t peers := make([]string, 0, n) // Setup the stores and transports @@ -307,7 +344,7 @@ func MakeCluster(n int, t *testing.T, conf *Config) *cluster { // Create all the rafts for i := 0; i < n; i++ { if conf == nil { - conf = inmemConfig() + conf = inmemConfig(t) } if n == 1 { conf.EnableSingleNode = true @@ -331,7 +368,7 @@ func MakeCluster(n int, t *testing.T, conf *Config) *cluster { func MakeClusterNoPeers(n int, t *testing.T, conf *Config) *cluster { c := &cluster{} - + c.t = t // Setup the stores and transports for i := 0; i < n; i++ { dir, err := ioutil.TempDir("", "raft") @@ -357,7 +394,7 @@ func MakeClusterNoPeers(n int, t *testing.T, conf *Config) *cluster { // Create all the rafts for i := 0; i < n; i++ { if conf == nil { - conf = inmemConfig() + conf = inmemConfig(t) } logs := c.stores[i] @@ -405,7 +442,7 @@ func TestRaft_AfterShutdown(t *testing.T) { } func TestRaft_SingleNode(t *testing.T) { - conf := inmemConfig() + conf := inmemConfig(t) c := MakeCluster(1, t, conf) defer c.Close() raft := c.rafts[0] @@ -453,6 +490,7 @@ func TestRaft_TripleNode(t *testing.T) { defer c.Close() // Should be one leader + c.Followers() leader := c.Leader() c.EnsureLeader(t, leader.localAddr) @@ -482,6 +520,7 @@ func TestRaft_LeaderFail(t *testing.T) { defer c.Close() // Should be one leader + c.Followers() leader := c.Leader() // Should be able to apply @@ -494,11 +533,12 @@ func TestRaft_LeaderFail(t *testing.T) { time.Sleep(30 * time.Millisecond) // Disconnect the leader now - log.Printf("[INFO] Disconnecting %v", leader) + t.Logf("[INFO] Disconnecting %v", leader) + leaderTerm := leader.getCurrentTerm() c.Disconnect(leader.localAddr) // Wait for new leader - limit := time.Now().Add(200 * time.Millisecond) + limit := time.Now().Add(300 * time.Millisecond) var newLead *Raft for time.Now().Before(limit) && newLead == nil { time.Sleep(10 * time.Millisecond) @@ -512,8 +552,8 @@ func TestRaft_LeaderFail(t *testing.T) { } // Ensure the term is greater - if newLead.getCurrentTerm() <= leader.getCurrentTerm() { - t.Fatalf("expected newer term! %d %d", newLead.getCurrentTerm(), leader.getCurrentTerm()) + if newLead.getCurrentTerm() <= leaderTerm { + t.Fatalf("expected newer term! %d %d (%v, %v)", newLead.getCurrentTerm(), leaderTerm, newLead, leader) } // Apply should work not work on old leader @@ -528,7 +568,7 @@ func TestRaft_LeaderFail(t *testing.T) { } // Reconnect the networks - log.Printf("[INFO] Reconnecting %v", leader) + t.Logf("[INFO] Reconnecting %v", leader) c.FullyConnect() // Future1 should fail @@ -562,7 +602,7 @@ func TestRaft_BehindFollower(t *testing.T) { // Disconnect one follower leader := c.Leader() - followers := c.GetInState(Follower) + followers := c.Followers() behind := followers[0] c.Disconnect(behind.localAddr) @@ -576,7 +616,7 @@ func TestRaft_BehindFollower(t *testing.T) { if err := future.Error(); err != nil { t.Fatalf("err: %v", err) } else { - log.Printf("[INFO] Finished apply without behind follower") + t.Logf("[INFO] Finished apply without behind follower") } // Check that we have a non zero last contact @@ -626,7 +666,7 @@ func TestRaft_ApplyNonLeader(t *testing.T) { func TestRaft_ApplyConcurrent(t *testing.T) { // Make the cluster - conf := inmemConfig() + conf := inmemConfig(t) conf.HeartbeatTimeout = 80 * time.Millisecond conf.ElectionTimeout = 80 * time.Millisecond c := MakeCluster(3, t, conf) @@ -670,7 +710,7 @@ func TestRaft_ApplyConcurrent(t *testing.T) { func TestRaft_ApplyConcurrent_Timeout(t *testing.T) { // Make the cluster - conf := inmemConfig() + conf := inmemConfig(t) conf.HeartbeatTimeout = 80 * time.Millisecond conf.ElectionTimeout = 80 * time.Millisecond c := MakeCluster(1, t, conf) @@ -681,13 +721,17 @@ func TestRaft_ApplyConcurrent_Timeout(t *testing.T) { // Enough enqueues should cause at least one timeout... var didTimeout int32 = 0 - for i := 0; i < 200; i++ { + for i := 0; (i < 500) && (atomic.LoadInt32(&didTimeout) == 0); i++ { go func(i int) { future := leader.Apply([]byte(fmt.Sprintf("test%d", i)), time.Microsecond) if future.Error() == ErrEnqueueTimeout { atomic.StoreInt32(&didTimeout, 1) } }(i) + // give the leaderloop some otherthings to do in order to increase the odds of a timeout + if i%5 == 0 { + leader.VerifyLeader() + } } // Wait @@ -711,7 +755,7 @@ func TestRaft_JoinNode(t *testing.T) { if err := future.Error(); err != nil { t.Fatalf("err: %v", err) } else { - log.Printf("[INFO] Applied log") + t.Logf("[INFO] Applied log") } // Make a new cluster of 1 @@ -769,7 +813,7 @@ func TestRaft_RemoveFollower(t *testing.T) { leader := c.Leader() // Wait until we have 2 followers - limit := time.Now().Add(200 * time.Millisecond) + limit := time.Now().Add(300 * time.Millisecond) var followers []*Raft for time.Now().Before(limit) && len(followers) != 2 { time.Sleep(10 * time.Millisecond) @@ -847,25 +891,15 @@ func TestRaft_RemoveLeader(t *testing.T) { func TestRaft_RemoveLeader_NoShutdown(t *testing.T) { // Make a cluster - conf := inmemConfig() + conf := inmemConfig(t) conf.ShutdownOnRemove = false c := MakeCluster(3, t, conf) defer c.Close() // Get the leader + c.Followers() leader := c.Leader() - // Wait until we have 2 followers - limit := time.Now().Add(200 * time.Millisecond) - var followers []*Raft - for time.Now().Before(limit) && len(followers) != 2 { - time.Sleep(10 * time.Millisecond) - followers = c.GetInState(Follower) - } - if len(followers) != 2 { - t.Fatalf("expected two followers: %v", followers) - } - // Remove the leader leader.RemovePeer(leader.localAddr) @@ -896,7 +930,7 @@ func TestRaft_RemoveLeader_NoShutdown(t *testing.T) { func TestRaft_RemoveLeader_SplitCluster(t *testing.T) { // Enable operation after a remove - conf := inmemConfig() + conf := inmemConfig(t) conf.EnableSingleNode = true conf.ShutdownOnRemove = false conf.DisableBootstrapAfterElect = false @@ -906,13 +940,14 @@ func TestRaft_RemoveLeader_SplitCluster(t *testing.T) { defer c.Close() // Get the leader + c.Followers() leader := c.Leader() // Remove the leader leader.RemovePeer(leader.localAddr) // Wait until we have 2 leaders - limit := time.Now().Add(200 * time.Millisecond) + limit := time.Now().Add(300 * time.Millisecond) var leaders []*Raft for time.Now().Before(limit) && len(leaders) != 2 { time.Sleep(10 * time.Millisecond) @@ -965,7 +1000,7 @@ func TestRaft_RemoveUnknownPeer(t *testing.T) { func TestRaft_SnapshotRestore(t *testing.T) { // Make the cluster - conf := inmemConfig() + conf := inmemConfig(t) conf.TrailingLogs = 10 c := MakeCluster(1, t, conf) defer c.Close() @@ -1021,7 +1056,7 @@ func TestRaft_SnapshotRestore(t *testing.T) { func TestRaft_SnapshotRestore_PeerChange(t *testing.T) { // Make the cluster - conf := inmemConfig() + conf := inmemConfig(t) conf.TrailingLogs = 10 c := MakeCluster(3, t, conf) defer c.Close() @@ -1096,7 +1131,7 @@ func TestRaft_SnapshotRestore_PeerChange(t *testing.T) { func TestRaft_AutoSnapshot(t *testing.T) { // Make the cluster - conf := inmemConfig() + conf := inmemConfig(t) conf.SnapshotInterval = 5 * time.Millisecond conf.SnapshotThreshold = 50 conf.TrailingLogs = 10 @@ -1126,7 +1161,7 @@ func TestRaft_AutoSnapshot(t *testing.T) { func TestRaft_ManualSnapshot(t *testing.T) { // Make the cluster - conf := inmemConfig() + conf := inmemConfig(t) conf.SnapshotThreshold = 50 conf.TrailingLogs = 10 c := MakeCluster(1, t, conf) @@ -1155,18 +1190,18 @@ func TestRaft_ManualSnapshot(t *testing.T) { func TestRaft_SendSnapshotFollower(t *testing.T) { // Make the cluster - conf := inmemConfig() + conf := inmemConfig(t) conf.TrailingLogs = 10 c := MakeCluster(3, t, conf) defer c.Close() // Disconnect one follower - followers := c.GetInState(Follower) + followers := c.Followers() + leader := c.Leader() behind := followers[0] c.Disconnect(behind.localAddr) // Commit a lot of things - leader := c.Leader() var future Future for i := 0; i < 100; i++ { future = leader.Apply([]byte(fmt.Sprintf("test%d", i)), 0) @@ -1176,7 +1211,7 @@ func TestRaft_SendSnapshotFollower(t *testing.T) { if err := future.Error(); err != nil { t.Fatalf("err: %v", err) } else { - log.Printf("[INFO] Finished apply without behind follower") + t.Logf("[INFO] Finished apply without behind follower") } // Snapshot, this will truncate logs! @@ -1197,7 +1232,7 @@ func TestRaft_SendSnapshotFollower(t *testing.T) { func TestRaft_ReJoinFollower(t *testing.T) { // Enable operation after a remove - conf := inmemConfig() + conf := inmemConfig(t) conf.ShutdownOnRemove = false // Make a cluster @@ -1267,7 +1302,7 @@ func TestRaft_ReJoinFollower(t *testing.T) { func TestRaft_LeaderLeaseExpire(t *testing.T) { // Make a cluster - conf := inmemConfig() + conf := inmemConfig(t) c := MakeCluster(2, t, conf) defer c.Close() @@ -1307,7 +1342,7 @@ func TestRaft_LeaderLeaseExpire(t *testing.T) { // Verify no further contact last := follower.LastContact() - time.Sleep(50 * time.Millisecond) + time.Sleep(110 * time.Millisecond) // Check that last contact has not changed if last != follower.LastContact() { @@ -1387,7 +1422,7 @@ func TestRaft_VerifyLeader_Single(t *testing.T) { func TestRaft_VerifyLeader_Fail(t *testing.T) { // Make a cluster - conf := inmemConfig() + conf := inmemConfig(t) c := MakeCluster(2, t, conf) defer c.Close() @@ -1395,15 +1430,7 @@ func TestRaft_VerifyLeader_Fail(t *testing.T) { leader := c.Leader() // Wait until we have a followers - limit := time.Now().Add(200 * time.Millisecond) - var followers []*Raft - for time.Now().Before(limit) && len(followers) != 1 { - time.Sleep(10 * time.Millisecond) - followers = c.GetInState(Follower) - } - if len(followers) != 1 { - t.Fatalf("expected a followers: %v", followers) - } + followers := c.Followers() // Force follower to different term follower := followers[0] @@ -1425,7 +1452,7 @@ func TestRaft_VerifyLeader_Fail(t *testing.T) { func TestRaft_VerifyLeader_ParitalConnect(t *testing.T) { // Make a cluster - conf := inmemConfig() + conf := inmemConfig(t) c := MakeCluster(3, t, conf) defer c.Close() @@ -1482,3 +1509,78 @@ func TestRaft_SettingPeers(t *testing.T) { t.Fatalf("no leader?") } } + +func TestRaft_StartAsLeader(t *testing.T) { + conf := inmemConfig(t) + conf.StartAsLeader = true + c := MakeCluster(1, t, conf) + defer c.Close() + raft := c.rafts[0] + + // Watch leaderCh for change + select { + case v := <-raft.LeaderCh(): + if !v { + t.Fatalf("should become leader") + } + case <-time.After(5 * time.Millisecond): + t.Fatalf("timeout becoming leader") + } + + // Should be leader + if s := raft.State(); s != Leader { + t.Fatalf("expected leader: %v", s) + } + + // Should be able to apply + future := raft.Apply([]byte("test"), time.Millisecond) + if err := future.Error(); err != nil { + t.Fatalf("err: %v", err) + } + + // Check the response + if future.Response().(int) != 1 { + t.Fatalf("bad response: %v", future.Response()) + } + + // Check the index + if idx := future.Index(); idx == 0 { + t.Fatalf("bad index: %d", idx) + } + + // Check that it is applied to the FSM + if len(c.fsms[0].logs) != 1 { + t.Fatalf("did not apply to FSM!") + } +} + +func TestRaft_NotifyCh(t *testing.T) { + ch := make(chan bool, 1) + conf := inmemConfig(t) + conf.NotifyCh = ch + c := MakeCluster(1, t, conf) + defer c.Close() + + // Watch leaderCh for change + select { + case v := <-ch: + if !v { + t.Fatalf("should become leader") + } + case <-time.After(conf.HeartbeatTimeout * 3): + t.Fatalf("timeout becoming leader") + } + + // Close the cluster + c.Close() + + // Watch leaderCh for change + select { + case v := <-ch: + if v { + t.Fatalf("should step down as leader") + } + case <-time.After(conf.HeartbeatTimeout * 3): + t.Fatalf("timeout becoming leader") + } +} diff --git a/_third_party/github.com/jordan-wright/email/README.md b/_third_party/github.com/jordan-wright/email/README.md index 53f65542fb..45d0ac6765 100644 --- a/_third_party/github.com/jordan-wright/email/README.md +++ b/_third_party/github.com/jordan-wright/email/README.md @@ -1,7 +1,7 @@ email ===== -[![Build Status](https://travis-ci.org/jordan-wright/email.png?branch=master)](https://travis-ci.org/jordan-wright/email) +[![Build Status](https://travis-ci.org/jordan-wright/email.png?branch=master)](https://travis-ci.org/jordan-wright/email) [![GoDoc](https://godoc.org/github.com/jordan-wright/email?status.svg)](https://godoc.org/github.com/jordan-wright/email) Robust and flexible email library for Go @@ -20,7 +20,9 @@ The ```email``` package currently supports the following: ### Installation ```go get github.com/jordan-wright/email``` -*Note: Requires go version 1.1 and above* +*Note: Version > 1 of this library requires Go v1.5 or above.* + +*If you need compatibility with previous Go versions, you can use the previous package at gopkg.in/jordan-wright/email.v1* ### Examples #### Sending email using Gmail @@ -49,6 +51,9 @@ e := &email.Email { } ``` +#### Creating an Email From an io.Reader +You can also create an email from any type that implements the ```io.Reader``` interface by using ```email.NewEmailFromReader```. + #### Attaching a File ``` e := NewEmail() @@ -64,4 +69,4 @@ Sections inspired by the handy [gophermail](https://github.com/jpoehls/gophermai ### Contributors I'd like to thank all the [contributors and maintainers](https://github.com/jordan-wright/email/graphs/contributors) of this package. -A special thanks goes out to Jed Denlea [jeddenlea](https://github.com/jeddenlea) for his numerous contributions and optimizations. \ No newline at end of file +A special thanks goes out to Jed Denlea [jeddenlea](https://github.com/jeddenlea) for his numerous contributions and optimizations. diff --git a/_third_party/github.com/jordan-wright/email/email_test.go b/_third_party/github.com/jordan-wright/email/email_test.go index 7f61c53d1e..780e9fd134 100644 --- a/_third_party/github.com/jordan-wright/email/email_test.go +++ b/_third_party/github.com/jordan-wright/email/email_test.go @@ -9,6 +9,7 @@ import ( "io/ioutil" "mime" "mime/multipart" + "mime/quotedprintable" "net/mail" "net/smtp" ) @@ -103,6 +104,54 @@ func TestEmailTextHtmlAttachment(t *testing.T) { } +func TestEmailFromReader(t *testing.T) { + ex := &Email{ + Subject: "Test Subject", + To: []string{"Jordan Wright "}, + From: "Jordan Wright ", + Text: []byte("This is a test email with HTML Formatting. It also has very long lines so\nthat the content must be wrapped if using quoted-printable decoding.\n"), + HTML: []byte("
This is a test email with HTML Formatting.\u00a0It also has very long lines so that the content must be wrapped if using quoted-printable decoding.
\n"), + } + raw := []byte(`MIME-Version: 1.0 +Subject: Test Subject +From: Jordan Wright +To: Jordan Wright +Content-Type: multipart/alternative; boundary=001a114fb3fc42fd6b051f834280 + +--001a114fb3fc42fd6b051f834280 +Content-Type: text/plain; charset=UTF-8 + +This is a test email with HTML Formatting. It also has very long lines so +that the content must be wrapped if using quoted-printable decoding. + +--001a114fb3fc42fd6b051f834280 +Content-Type: text/html; charset=UTF-8 +Content-Transfer-Encoding: quoted-printable + +
This is a test email with HTML Formatting.=C2=A0It = +also has very long lines so that the content must be wrapped if using quote= +d-printable decoding.
+ +--001a114fb3fc42fd6b051f834280--`) + e, err := NewEmailFromReader(bytes.NewReader(raw)) + if err != nil { + t.Fatalf("Error creating email %s", err.Error()) + } + if e.Subject != ex.Subject { + t.Fatalf("Incorrect subject. %#q != %#q", e.Subject, ex.Subject) + } + if !bytes.Equal(e.Text, ex.Text) { + t.Fatalf("Incorrect text: %#q != %#q", e.Text, ex.Text) + } + if !bytes.Equal(e.HTML, ex.HTML) { + t.Fatalf("Incorrect HTML: %#q != %#q", e.HTML, ex.HTML) + } + if e.From != ex.From { + t.Fatalf("Incorrect \"From\": %#q != %#q", e.From, ex.From) + } + +} + func ExampleGmail() { e := NewEmail() e.From = "Jordan Wright " @@ -135,6 +184,7 @@ func Test_base64Wrap(t *testing.T) { } } +// *Since the mime library in use by ```email``` is now in the stdlib, this test is deprecated func Test_quotedPrintEncode(t *testing.T) { var buf bytes.Buffer text := []byte("Dear reader!\n\n" + @@ -147,30 +197,44 @@ func Test_quotedPrintEncode(t *testing.T) { " within\r\n" + "the quoted-printable encoding.\r\n" + "There are some wacky parts like =3D, and this input assumes UNIX line break=\r\n" + - "s so=0D\r\n" + + "s so\r\n" + "it can come out a little weird. Also, we need to support unicode so here's=\r\n" + " a fish: =F0=9F=90=9F\r\n") - - if err := quotePrintEncode(&buf, text); err != nil { + qp := quotedprintable.NewWriter(&buf) + if _, err := qp.Write(text); err != nil { t.Fatal("quotePrintEncode: ", err) } - + if err := qp.Close(); err != nil { + t.Fatal("Error closing writer", err) + } if b := buf.Bytes(); !bytes.Equal(b, expected) { t.Errorf("quotedPrintEncode generated incorrect results: %#q != %#q", b, expected) } } -func Benchmark_quotedPrintEncode(b *testing.B) { - text := []byte("Dear reader!\n\n" + - "This is a test email to try and capture some of the corner cases that exist within\n" + - "the quoted-printable encoding.\n" + +// *Since the mime library in use by ```email``` is now in the stdlib, this test is deprecated +func Test_quotedPrintDecode(t *testing.T) { + text := []byte("Dear reader!\r\n\r\n" + + "This is a test email to try and capture some of the corner cases that exist=\r\n" + + " within\r\n" + + "the quoted-printable encoding.\r\n" + + "There are some wacky parts like =3D, and this input assumes UNIX line break=\r\n" + + "s so\r\n" + + "it can come out a little weird. Also, we need to support unicode so here's=\r\n" + + " a fish: =F0=9F=90=9F\r\n") + expected := []byte("Dear reader!\r\n\r\n" + + "This is a test email to try and capture some of the corner cases that exist within\r\n" + + "the quoted-printable encoding.\r\n" + "There are some wacky parts like =, and this input assumes UNIX line breaks so\r\n" + - "it can come out a little weird. Also, we need to support unicode so here's a fish: 🐟\n") + "it can come out a little weird. Also, we need to support unicode so here's a fish: 🐟\r\n") + qp := quotedprintable.NewReader(bytes.NewReader(text)) + got, err := ioutil.ReadAll(qp) + if err != nil { + t.Fatal("quotePrintDecode: ", err) + } - for i := 0; i <= b.N; i++ { - if err := quotePrintEncode(ioutil.Discard, text); err != nil { - panic(err) - } + if !bytes.Equal(got, expected) { + t.Errorf("quotedPrintDecode generated incorrect results: %#q != %#q", got, expected) } } diff --git a/_third_party/github.com/llgcode/draw2d/AUTHORS b/_third_party/github.com/llgcode/draw2d/AUTHORS new file mode 100644 index 0000000000..6490085a4e --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/AUTHORS @@ -0,0 +1,2 @@ +Laurent Le Goff +Stani Michiels, gmail:stani.be \ No newline at end of file diff --git a/_third_party/github.com/llgcode/draw2d/LICENSE b/_third_party/github.com/llgcode/draw2d/LICENSE new file mode 100644 index 0000000000..c495dbc5d0 --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2010, Laurent Le Goff +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, +OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, +OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/_third_party/github.com/llgcode/draw2d/README.md b/_third_party/github.com/llgcode/draw2d/README.md new file mode 100644 index 0000000000..6444ff62ae --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/README.md @@ -0,0 +1,124 @@ +draw2d +====== +[![Coverage](http://gocover.io/_badge/github.com/llgcode/draw2d?0)](http://gocover.io/github.com/llgcode/draw2d) +[![GoDoc](https://godoc.org/github.com/llgcode/draw2d?status.svg)](https://godoc.org/github.com/llgcode/draw2d) + +Package draw2d is a pure [go](http://golang.org) 2D vector graphics library with support for multiple output devices such as [images](http://golang.org/pkg/image) (draw2d), pdf documents (draw2dpdf) and opengl (draw2dgl), which can also be used on the google app engine. It can be used as a pure go [Cairo](http://www.cairographics.org/) alternative. draw2d is released under the BSD license. See the [documentation](http://godoc.org/github.com/llgcode/draw2d) for more details. + +[![geometry](https://raw.githubusercontent.com/llgcode/draw2d/master/output/samples/geometry.png)](https://raw.githubusercontent.com/llgcode/draw2d/master/resource/image/geometry.pdf)[![postscript](https://raw.githubusercontent.com/llgcode/draw2d/master/output/samples/postscript.png)](https://raw.githubusercontent.com/llgcode/draw2d/master/resource/image/postscript.pdf) + +Click on an image above to get the pdf, generated with exactly the same draw2d code. The first image is the output of `samples/geometry`. The second image is the result of `samples/postcript`, which demonstrates that draw2d can draw postscript files into images or pdf documents with the [ps](https://github.com/llgcode/ps) package. + +Features +-------- + +Operations in draw2d include stroking and filling polygons, arcs, Bézier curves, drawing images and text rendering with truetype fonts. All drawing operations can be transformed by affine transformations (scale, rotation, translation). + +Package draw2d follows the conventions of the [HTML Canvas 2D Context](http://www.w3.org/TR/2dcontext/) for coordinate system, angles, etc... + +Installation +------------ + +Install [golang](http://golang.org/doc/install). To install or update the package draw2d on your system, run: + +Stable release +``` +go get -u gopkg.in/llgcode/draw2d.v1 +``` + +or Current release +``` +go get -u github.com/llgcode/draw2d +``` + + +Quick Start +----------- + +The following Go code generates a simple drawing and saves it to an image file with package draw2d: + +```go +// Initialize the graphic context on an RGBA image +dest := image.NewRGBA(image.Rect(0, 0, 297, 210.0)) +gc := draw2dimg.NewGraphicContext(dest) + +// Set some properties +gc.SetFillColor(color.RGBA{0x44, 0xff, 0x44, 0xff}) +gc.SetStrokeColor(color.RGBA{0x44, 0x44, 0x44, 0xff}) +gc.SetLineWidth(5) + +// Draw a closed shape +gc.MoveTo(10, 10) // should always be called first for a new path +gc.LineTo(100, 50) +gc.QuadCurveTo(100, 10, 10, 10) +gc.Close() +gc.FillStroke() + +// Save to file +draw2dimg.SaveToPngFile("hello.png", dest) +``` + +The same Go code can also generate a pdf document with package draw2dpdf: + +```go +// Initialize the graphic context on an RGBA image +dest := draw2dpdf.NewPdf("L", "mm", "A4") +gc := draw2dpdf.NewGraphicContext(dest) + +// Set some properties +gc.SetFillColor(color.RGBA{0x44, 0xff, 0x44, 0xff}) +gc.SetStrokeColor(color.RGBA{0x44, 0x44, 0x44, 0xff}) +gc.SetLineWidth(5) + +// Draw a closed shape +gc.MoveTo(10, 10) // should always be called first for a new path +gc.LineTo(100, 50) +gc.QuadCurveTo(100, 10, 10, 10) +gc.Close() +gc.FillStroke() + +// Save to file +draw2dpdf.SaveToPdfFile("hello.pdf", dest) +``` + +There are more examples here: https://github.com/llgcode/draw2d/tree/master/samples + +Drawing on opengl is provided by the draw2dgl package. + +Testing +------- + +The samples are run as tests from the root package folder `draw2d` by: +``` +go test ./... +``` +Or if you want to run with test coverage: +``` +go test -cover ./... | grep -v "no test" +``` +This will generate output by the different backends in the output folder. + +Acknowledgments +--------------- + +[Laurent Le Goff](https://github.com/llgcode) wrote this library, inspired by [Postscript](http://www.tailrecursive.org/postscript) and [HTML5 canvas](http://www.w3.org/TR/2dcontext/). He implemented the image and opengl backend with the [freetype-go](https://code.google.com/p/freetype-go/) package. Also he created a pure go [Postscript interpreter](https://github.com/llgcode/ps), which can read postscript images and draw to a draw2d graphic context. [Stani Michiels](https://github.com/stanim) implemented the pdf backend with the [gofpdf](https://github.com/jung-kurt/gofpdf) package. + + + +Packages using draw2d +--------------------- + + - [ps](https://github.com/llgcode/ps): Postscript interpreter written in Go + - [gonum/plot](https://github.com/gonum/plot): drawing plots in Go + - [go.uik](https://github.com/skelterjohn/go.uik): a concurrent UI kit written in pure go. + - [smartcrop](https://github.com/muesli/smartcrop): content aware image cropping + - [karta](https://github.com/peterhellberg/karta): drawing Voronoi diagrams + - [chart](https://github.com/vdobler/chart): basic charts in Go + - [hilbert](https://github.com/google/hilbert): package for drawing Hilbert curves + +References +--------- + + - [antigrain.com](http://www.antigrain.com) + - [freetype-go](http://code.google.com/p/freetype-go) + - diff --git a/_third_party/github.com/llgcode/draw2d/draw2d.go b/_third_party/github.com/llgcode/draw2d/draw2d.go new file mode 100644 index 0000000000..54a7e3f071 --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/draw2d.go @@ -0,0 +1,228 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 13/12/2010 by Laurent Le Goff + +// Package draw2d is a pure go 2D vector graphics library with support +// for multiple output devices such as images (draw2d), pdf documents +// (draw2dpdf) and opengl (draw2dgl), which can also be used on the +// google app engine. It can be used as a pure go Cairo alternative. +// draw2d is released under the BSD license. +// +// Features +// +// Operations in draw2d include stroking and filling polygons, arcs, +// Bézier curves, drawing images and text rendering with truetype fonts. +// All drawing operations can be transformed by affine transformations +// (scale, rotation, translation). +// +// Package draw2d follows the conventions of http://www.w3.org/TR/2dcontext for coordinate system, angles, etc... +// +// Installation +// +// To install or update the package draw2d on your system, run: +// go get -u github.com/llgcode/draw2d +// +// Quick Start +// +// Package draw2d itself provides a graphic context that can draw vector +// graphics and text on an image canvas. The following Go code +// generates a simple drawing and saves it to an image file: +// // Initialize the graphic context on an RGBA image +// dest := image.NewRGBA(image.Rect(0, 0, 297, 210.0)) +// gc := draw2dimg.NewGraphicContext(dest) +// +// // Set some properties +// gc.SetFillColor(color.RGBA{0x44, 0xff, 0x44, 0xff}) +// gc.SetStrokeColor(color.RGBA{0x44, 0x44, 0x44, 0xff}) +// gc.SetLineWidth(5) +// +// // Draw a closed shape +// gc.MoveTo(10, 10) // should always be called first for a new path +// gc.LineTo(100, 50) +// gc.QuadCurveTo(100, 10, 10, 10) +// gc.Close() +// gc.FillStroke() +// +// // Save to file +// draw2d.SaveToPngFile("hello.png", dest) +// +// There are more examples here: +// https://github.com/llgcode/draw2d/tree/master/samples +// +// Drawing on pdf documents is provided by the draw2dpdf package. +// Drawing on opengl is provided by the draw2dgl package. +// See subdirectories at the bottom of this page. +// +// Testing +// +// The samples are run as tests from the root package folder `draw2d` by: +// go test ./... +// +// Or if you want to run with test coverage: +// go test -cover ./... | grep -v "no test" +// +// This will generate output by the different backends in the output folder. +// +// Acknowledgments +// +// Laurent Le Goff wrote this library, inspired by Postscript and +// HTML5 canvas. He implemented the image and opengl backend with the +// freetype-go package. Also he created a pure go Postscript +// interpreter, which can read postscript images and draw to a draw2d +// graphic context (https://github.com/llgcode/ps). Stani Michiels +// implemented the pdf backend with the gofpdf package. +// +// Packages using draw2d +// +// - https://github.com/llgcode/ps: Postscript interpreter written in Go +// +// - https://github.com/gonum/plot: drawing plots in Go +// +// - https://github.com/muesli/smartcrop: content aware image cropping +// +// - https://github.com/peterhellberg/karta: drawing Voronoi diagrams +// +// - https://github.com/vdobler/chart: basic charts in Go +package draw2d + +import "image/color" + +// FillRule defines the type for fill rules +type FillRule int + +const ( + // FillRuleEvenOdd determines the "insideness" of a point in the shape + // by drawing a ray from that point to infinity in any direction + // and counting the number of path segments from the given shape that the ray crosses. + // If this number is odd, the point is inside; if even, the point is outside. + FillRuleEvenOdd FillRule = iota + // FillRuleWinding determines the "insideness" of a point in the shape + // by drawing a ray from that point to infinity in any direction + // and then examining the places where a segment of the shape crosses the ray. + // Starting with a count of zero, add one each time a path segment crosses + // the ray from left to right and subtract one each time + // a path segment crosses the ray from right to left. After counting the crossings, + // if the result is zero then the point is outside the path. Otherwise, it is inside. + FillRuleWinding +) + +// LineCap is the style of line extremities +type LineCap int + +const ( + // RoundCap defines a rounded shape at the end of the line + RoundCap LineCap = iota + // ButtCap defines a squared shape exactly at the end of the line + ButtCap + // SquareCap defines a squared shape at the end of the line + SquareCap +) + +// LineJoin is the style of segments joint +type LineJoin int + +const ( + // BevelJoin represents cut segments joint + BevelJoin LineJoin = iota + // RoundJoin represents rounded segments joint + RoundJoin + // MiterJoin represents peaker segments joint + MiterJoin +) + +// StrokeStyle keeps stroke style attributes +// that is used by the Stroke method of a Drawer +type StrokeStyle struct { + // Color defines the color of stroke + Color color.Color + // Line width + Width float64 + // Line cap style rounded, butt or square + LineCap LineCap + // Line join style bevel, round or miter + LineJoin LineJoin + // offset of the first dash + DashOffset float64 + // array represented dash length pair values are plain dash and impair are space between dash + // if empty display plain line + Dash []float64 +} + +// SolidFillStyle define style attributes for a solid fill style +type SolidFillStyle struct { + // Color defines the line color + Color color.Color + // FillRule defines the file rule to used + FillRule FillRule +} + +// Valign Vertical Alignment of the text +type Valign int + +const ( + // ValignTop top align text + ValignTop Valign = iota + // ValignCenter centered text + ValignCenter + // ValignBottom bottom aligned text + ValignBottom + // ValignBaseline align text with the baseline of the font + ValignBaseline +) + +// Halign Horizontal Alignment of the text +type Halign int + +const ( + // HalignLeft Horizontally align to left + HalignLeft = iota + // HalignCenter Horizontally align to center + HalignCenter + // HalignRight Horizontally align to right + HalignRight +) + +// TextStyle describe text property +type TextStyle struct { + // Color defines the color of text + Color color.Color + // Size font size + Size float64 + // The font to use + Font FontData + // Horizontal Alignment of the text + Halign Halign + // Vertical Alignment of the text + Valign Valign +} + +// ScalingPolicy is a constant to define how to scale an image +type ScalingPolicy int + +const ( + // ScalingNone no scaling applied + ScalingNone ScalingPolicy = iota + // ScalingStretch the image is stretched so that its width and height are exactly the given width and height + ScalingStretch + // ScalingWidth the image is scaled so that its width is exactly the given width + ScalingWidth + // ScalingHeight the image is scaled so that its height is exactly the given height + ScalingHeight + // ScalingFit the image is scaled to the largest scale that allow the image to fit within a rectangle width x height + ScalingFit + // ScalingSameArea the image is scaled so that its area is exactly the area of the given rectangle width x height + ScalingSameArea + // ScalingFill the image is scaled to the smallest scale that allow the image to fully cover a rectangle width x height + ScalingFill +) + +// ImageScaling style attributes used to display the image +type ImageScaling struct { + // Horizontal Alignment of the image + Halign Halign + // Vertical Alignment of the image + Valign Valign + // Width Height used by scaling policy + Width, Height float64 + // ScalingPolicy defines the scaling policy to applied to the image + ScalingPolicy ScalingPolicy +} diff --git a/_third_party/github.com/llgcode/draw2d/draw2dbase/README.md b/_third_party/github.com/llgcode/draw2d/draw2dbase/README.md new file mode 100644 index 0000000000..3296f5ec5a --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/draw2dbase/README.md @@ -0,0 +1,7 @@ +draw2d/draw2dbase +================= + +[![Coverage](http://gocover.io/_badge/github.com/llgcode/draw2d/draw2dbase?0)](http://gocover.io/github.com/llgcode/draw2d/draw2dbase) +[![GoDoc](https://godoc.org/github.com/llgcode/draw2d/draw2dbase?status.svg)](https://godoc.org/github.com/llgcode/draw2d/draw2dbase) + +Base package implementation that is used by pdf, svg, img, gl implementations. diff --git a/_third_party/github.com/llgcode/draw2d/draw2dbase/curve.go b/_third_party/github.com/llgcode/draw2d/draw2dbase/curve.go new file mode 100644 index 0000000000..211e107f15 --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/draw2dbase/curve.go @@ -0,0 +1,161 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 17/05/2011 by Laurent Le Goff + +package draw2dbase + +import ( + "math" +) + +const ( + // CurveRecursionLimit represents the maximum recursion that is really necessary to subsivide a curve into straight lines + CurveRecursionLimit = 32 +) + +// Cubic +// x1, y1, cpx1, cpy1, cpx2, cpy2, x2, y2 float64 + +// SubdivideCubic a Bezier cubic curve in 2 equivalents Bezier cubic curves. +// c1 and c2 parameters are the resulting curves +func SubdivideCubic(c, c1, c2 []float64) { + // First point of c is the first point of c1 + c1[0], c1[1] = c[0], c[1] + // Last point of c is the last point of c2 + c2[6], c2[7] = c[6], c[7] + + // Subdivide segment using midpoints + c1[2] = (c[0] + c[2]) / 2 + c1[3] = (c[1] + c[3]) / 2 + + midX := (c[2] + c[4]) / 2 + midY := (c[3] + c[5]) / 2 + + c2[4] = (c[4] + c[6]) / 2 + c2[5] = (c[5] + c[7]) / 2 + + c1[4] = (c1[2] + midX) / 2 + c1[5] = (c1[3] + midY) / 2 + + c2[2] = (midX + c2[4]) / 2 + c2[3] = (midY + c2[5]) / 2 + + c1[6] = (c1[4] + c2[2]) / 2 + c1[7] = (c1[5] + c2[3]) / 2 + + // Last Point of c1 is equal to the first point of c2 + c2[0], c2[1] = c1[6], c1[7] +} + +// TraceCubic generate lines subdividing the cubic curve using a Liner +// flattening_threshold helps determines the flattening expectation of the curve +func TraceCubic(t Liner, cubic []float64, flatteningThreshold float64) { + // Allocation curves + var curves [CurveRecursionLimit * 8]float64 + copy(curves[0:8], cubic[0:8]) + i := 0 + + // current curve + var c []float64 + + var dx, dy, d2, d3 float64 + + for i >= 0 { + c = curves[i*8:] + dx = c[6] - c[0] + dy = c[7] - c[1] + + d2 = math.Abs((c[2]-c[6])*dy - (c[3]-c[7])*dx) + d3 = math.Abs((c[4]-c[6])*dy - (c[5]-c[7])*dx) + + // if it's flat then trace a line + if (d2+d3)*(d2+d3) < flatteningThreshold*(dx*dx+dy*dy) || i == len(curves)-1 { + t.LineTo(c[6], c[7]) + i-- + } else { + // second half of bezier go lower onto the stack + SubdivideCubic(c, curves[(i+1)*8:], curves[i*8:]) + i++ + } + } +} + +// Quad +// x1, y1, cpx1, cpy2, x2, y2 float64 + +// SubdivideQuad a Bezier quad curve in 2 equivalents Bezier quad curves. +// c1 and c2 parameters are the resulting curves +func SubdivideQuad(c, c1, c2 []float64) { + // First point of c is the first point of c1 + c1[0], c1[1] = c[0], c[1] + // Last point of c is the last point of c2 + c2[4], c2[5] = c[4], c[5] + + // Subdivide segment using midpoints + c1[2] = (c[0] + c[2]) / 2 + c1[3] = (c[1] + c[3]) / 2 + c2[2] = (c[2] + c[4]) / 2 + c2[3] = (c[3] + c[5]) / 2 + c1[4] = (c1[2] + c2[2]) / 2 + c1[5] = (c1[3] + c2[3]) / 2 + c2[0], c2[1] = c1[4], c1[5] + return +} + +// TraceQuad generate lines subdividing the curve using a Liner +// flattening_threshold helps determines the flattening expectation of the curve +func TraceQuad(t Liner, quad []float64, flatteningThreshold float64) { + // Allocates curves stack + var curves [CurveRecursionLimit * 6]float64 + copy(curves[0:6], quad[0:6]) + i := 0 + // current curve + var c []float64 + var dx, dy, d float64 + + for i >= 0 { + c = curves[i*6:] + dx = c[4] - c[0] + dy = c[5] - c[1] + + d = math.Abs(((c[2]-c[4])*dy - (c[3]-c[5])*dx)) + + // if it's flat then trace a line + if (d*d) < flatteningThreshold*(dx*dx+dy*dy) || i == len(curves)-1 { + t.LineTo(c[4], c[5]) + i-- + } else { + // second half of bezier go lower onto the stack + SubdivideQuad(c, curves[(i+1)*6:], curves[i*6:]) + i++ + } + } +} + +// TraceArc trace an arc using a Liner +func TraceArc(t Liner, x, y, rx, ry, start, angle, scale float64) (lastX, lastY float64) { + end := start + angle + clockWise := true + if angle < 0 { + clockWise = false + } + ra := (math.Abs(rx) + math.Abs(ry)) / 2 + da := math.Acos(ra/(ra+0.125/scale)) * 2 + //normalize + if !clockWise { + da = -da + } + angle = start + da + var curX, curY float64 + for { + if (angle < end-da/4) != clockWise { + curX = x + math.Cos(end)*rx + curY = y + math.Sin(end)*ry + return curX, curY + } + curX = x + math.Cos(angle)*rx + curY = y + math.Sin(angle)*ry + + angle += da + t.LineTo(curX, curY) + } +} diff --git a/_third_party/github.com/llgcode/draw2d/draw2dbase/curve_test.go b/_third_party/github.com/llgcode/draw2d/draw2dbase/curve_test.go new file mode 100644 index 0000000000..fa954dfbff --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/draw2dbase/curve_test.go @@ -0,0 +1,134 @@ +package draw2dbase + +import ( + "bufio" + "fmt" + "image" + "image/color" + "image/draw" + "image/png" + "log" + "os" + "testing" +) + +var ( + flatteningThreshold = 0.5 + testsCubicFloat64 = []float64{ + 100, 100, 200, 100, 100, 200, 200, 200, + 100, 100, 300, 200, 200, 200, 300, 100, + 100, 100, 0, 300, 200, 0, 300, 300, + 150, 290, 10, 10, 290, 10, 150, 290, + 10, 290, 10, 10, 290, 10, 290, 290, + 100, 290, 290, 10, 10, 10, 200, 290, + } + testsQuadFloat64 = []float64{ + 100, 100, 200, 100, 200, 200, + 100, 100, 290, 200, 290, 100, + 100, 100, 0, 290, 200, 290, + 150, 290, 10, 10, 290, 290, + 10, 290, 10, 10, 290, 290, + 100, 290, 290, 10, 120, 290, + } +) + +func init() { + os.Mkdir("test_results", 0666) + f, err := os.Create("../output/curve/_test.html") + if err != nil { + log.Println(err) + os.Exit(1) + } + defer f.Close() + log.Printf("Create html viewer") + f.Write([]byte("")) + for i := 0; i < len(testsCubicFloat64)/8; i++ { + f.Write([]byte(fmt.Sprintf("
\n", i))) + } + for i := 0; i < len(testsQuadFloat64); i++ { + f.Write([]byte(fmt.Sprintf("
\n
\n", i))) + } + f.Write([]byte("")) + +} + +func drawPoints(img draw.Image, c color.Color, s ...float64) image.Image { + for i := 0; i < len(s); i += 2 { + x, y := int(s[i]+0.5), int(s[i+1]+0.5) + img.Set(x, y, c) + img.Set(x, y+1, c) + img.Set(x, y-1, c) + img.Set(x+1, y, c) + img.Set(x+1, y+1, c) + img.Set(x+1, y-1, c) + img.Set(x-1, y, c) + img.Set(x-1, y+1, c) + img.Set(x-1, y-1, c) + + } + return img +} + +func TestCubicCurve(t *testing.T) { + for i := 0; i < len(testsCubicFloat64); i += 8 { + var p SegmentedPath + p.MoveTo(testsCubicFloat64[i], testsCubicFloat64[i+1]) + TraceCubic(&p, testsCubicFloat64[i:], flatteningThreshold) + img := image.NewNRGBA(image.Rect(0, 0, 300, 300)) + PolylineBresenham(img, color.NRGBA{0xff, 0, 0, 0xff}, testsCubicFloat64[i:i+8]...) + PolylineBresenham(img, image.Black, p.Points...) + //drawPoints(img, image.NRGBAColor{0, 0, 0, 0xff}, curve[:]...) + drawPoints(img, color.NRGBA{0, 0, 0, 0xff}, p.Points...) + SaveToPngFile(fmt.Sprintf("../output/curve/_test%d.png", i/8), img) + log.Printf("Num of points: %d\n", len(p.Points)) + } + fmt.Println() +} + +func TestQuadCurve(t *testing.T) { + for i := 0; i < len(testsQuadFloat64); i += 6 { + var p SegmentedPath + p.MoveTo(testsQuadFloat64[i], testsQuadFloat64[i+1]) + TraceQuad(&p, testsQuadFloat64[i:], flatteningThreshold) + img := image.NewNRGBA(image.Rect(0, 0, 300, 300)) + PolylineBresenham(img, color.NRGBA{0xff, 0, 0, 0xff}, testsQuadFloat64[i:i+6]...) + PolylineBresenham(img, image.Black, p.Points...) + //drawPoints(img, image.NRGBAColor{0, 0, 0, 0xff}, curve[:]...) + drawPoints(img, color.NRGBA{0, 0, 0, 0xff}, p.Points...) + SaveToPngFile(fmt.Sprintf("../output/curve/_testQuad%d.png", i), img) + log.Printf("Num of points: %d\n", len(p.Points)) + } + fmt.Println() +} + +func BenchmarkCubicCurve(b *testing.B) { + for i := 0; i < b.N; i++ { + for i := 0; i < len(testsCubicFloat64); i += 8 { + var p SegmentedPath + p.MoveTo(testsCubicFloat64[i], testsCubicFloat64[i+1]) + TraceCubic(&p, testsCubicFloat64[i:], flatteningThreshold) + } + } +} + +// SaveToPngFile create and save an image to a file using PNG format +func SaveToPngFile(filePath string, m image.Image) error { + // Create the file + f, err := os.Create(filePath) + if err != nil { + return err + } + defer f.Close() + // Create Writer from file + b := bufio.NewWriter(f) + // Write the image into the buffer + err = png.Encode(b, m) + if err != nil { + return err + } + err = b.Flush() + if err != nil { + return err + } + return nil +} diff --git a/_third_party/code.google.com/p/draw2d/draw2d/dasher.go b/_third_party/github.com/llgcode/draw2d/draw2dbase/dasher.go similarity index 59% rename from _third_party/code.google.com/p/draw2d/draw2d/dasher.go rename to _third_party/github.com/llgcode/draw2d/draw2dbase/dasher.go index 5210299922..6f8260c72a 100644 --- a/_third_party/code.google.com/p/draw2d/draw2d/dasher.go +++ b/_third_party/github.com/llgcode/draw2d/draw2dbase/dasher.go @@ -1,51 +1,48 @@ // Copyright 2010 The draw2d Authors. All rights reserved. // created: 13/12/2010 by Laurent Le Goff -package draw2d +package draw2dbase type DashVertexConverter struct { - command VertexCommand - next VertexConverter + next Flattener x, y, distance float64 dash []float64 currentDash int dashOffset float64 } -func NewDashConverter(dash []float64, dashOffset float64, converter VertexConverter) *DashVertexConverter { +func NewDashConverter(dash []float64, dashOffset float64, flattener Flattener) *DashVertexConverter { var dasher DashVertexConverter dasher.dash = dash dasher.currentDash = 0 dasher.dashOffset = dashOffset - dasher.next = converter + dasher.next = flattener return &dasher } -func (dasher *DashVertexConverter) NextCommand(cmd VertexCommand) { - dasher.command = cmd - if dasher.command == VertexStopCommand { - dasher.next.NextCommand(VertexStopCommand) - } -} - -func (dasher *DashVertexConverter) Vertex(x, y float64) { - switch dasher.command { - case VertexStartCommand: - dasher.start(x, y) - default: - dasher.lineTo(x, y) - } - dasher.command = VertexNoCommand +func (dasher *DashVertexConverter) LineTo(x, y float64) { + dasher.lineTo(x, y) } -func (dasher *DashVertexConverter) start(x, y float64) { - dasher.next.NextCommand(VertexStartCommand) - dasher.next.Vertex(x, y) +func (dasher *DashVertexConverter) MoveTo(x, y float64) { + dasher.next.MoveTo(x, y) dasher.x, dasher.y = x, y dasher.distance = dasher.dashOffset dasher.currentDash = 0 } +func (dasher *DashVertexConverter) LineJoin() { + dasher.next.LineJoin() +} + +func (dasher *DashVertexConverter) Close() { + dasher.next.Close() +} + +func (dasher *DashVertexConverter) End() { + dasher.next.End() +} + func (dasher *DashVertexConverter) lineTo(x, y float64) { rest := dasher.dash[dasher.currentDash] - dasher.distance for rest < 0 { @@ -60,12 +57,11 @@ func (dasher *DashVertexConverter) lineTo(x, y float64) { ly := dasher.y + k*(y-dasher.y) if dasher.currentDash%2 == 0 { // line - dasher.next.Vertex(lx, ly) + dasher.next.LineTo(lx, ly) } else { // gap - dasher.next.NextCommand(VertexStopCommand) - dasher.next.NextCommand(VertexStartCommand) - dasher.next.Vertex(lx, ly) + dasher.next.End() + dasher.next.MoveTo(lx, ly) } d = d - rest dasher.x, dasher.y = lx, ly @@ -75,12 +71,11 @@ func (dasher *DashVertexConverter) lineTo(x, y float64) { dasher.distance = d if dasher.currentDash%2 == 0 { // line - dasher.next.Vertex(x, y) + dasher.next.LineTo(x, y) } else { // gap - dasher.next.NextCommand(VertexStopCommand) - dasher.next.NextCommand(VertexStartCommand) - dasher.next.Vertex(x, y) + dasher.next.End() + dasher.next.MoveTo(x, y) } if dasher.distance >= dasher.dash[dasher.currentDash] { dasher.distance = dasher.distance - dasher.dash[dasher.currentDash] @@ -88,3 +83,7 @@ func (dasher *DashVertexConverter) lineTo(x, y float64) { } dasher.x, dasher.y = x, y } + +func distance(x1, y1, x2, y2 float64) float64 { + return vectorDistance(x2-x1, y2-y1) +} diff --git a/_third_party/github.com/llgcode/draw2d/draw2dbase/demux_flattener.go b/_third_party/github.com/llgcode/draw2d/draw2dbase/demux_flattener.go new file mode 100644 index 0000000000..13b6c40838 --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/draw2dbase/demux_flattener.go @@ -0,0 +1,35 @@ +package draw2dbase + +type DemuxFlattener struct { + Flatteners []Flattener +} + +func (dc DemuxFlattener) MoveTo(x, y float64) { + for _, flattener := range dc.Flatteners { + flattener.MoveTo(x, y) + } +} + +func (dc DemuxFlattener) LineTo(x, y float64) { + for _, flattener := range dc.Flatteners { + flattener.LineTo(x, y) + } +} + +func (dc DemuxFlattener) LineJoin() { + for _, flattener := range dc.Flatteners { + flattener.LineJoin() + } +} + +func (dc DemuxFlattener) Close() { + for _, flattener := range dc.Flatteners { + flattener.Close() + } +} + +func (dc DemuxFlattener) End() { + for _, flattener := range dc.Flatteners { + flattener.End() + } +} diff --git a/_third_party/github.com/llgcode/draw2d/draw2dbase/flattener.go b/_third_party/github.com/llgcode/draw2d/draw2dbase/flattener.go new file mode 100644 index 0000000000..7b12dc2d21 --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/draw2dbase/flattener.go @@ -0,0 +1,127 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 06/12/2010 by Laurent Le Goff + +package draw2dbase + +import ( + "bosun.org/_third_party/github.com/llgcode/draw2d" +) + +// Liner receive segment definition +type Liner interface { + // LineTo Draw a line from the current position to the point (x, y) + LineTo(x, y float64) +} + +// Flattener receive segment definition +type Flattener interface { + // MoveTo Start a New line from the point (x, y) + MoveTo(x, y float64) + // LineTo Draw a line from the current position to the point (x, y) + LineTo(x, y float64) + // LineJoin add the most recent starting point to close the path to create a polygon + LineJoin() + // Close add the most recent starting point to close the path to create a polygon + Close() + // End mark the current line as finished so we can draw caps + End() +} + +// Flatten convert curves into straight segments keeping join segments info +func Flatten(path *draw2d.Path, flattener Flattener, scale float64) { + // First Point + var startX, startY float64 = 0, 0 + // Current Point + var x, y float64 = 0, 0 + i := 0 + for _, cmp := range path.Components { + switch cmp { + case draw2d.MoveToCmp: + x, y = path.Points[i], path.Points[i+1] + startX, startY = x, y + if i != 0 { + flattener.End() + } + flattener.MoveTo(x, y) + i += 2 + case draw2d.LineToCmp: + x, y = path.Points[i], path.Points[i+1] + flattener.LineTo(x, y) + flattener.LineJoin() + i += 2 + case draw2d.QuadCurveToCmp: + TraceQuad(flattener, path.Points[i-2:], 0.5) + x, y = path.Points[i+2], path.Points[i+3] + flattener.LineTo(x, y) + i += 4 + case draw2d.CubicCurveToCmp: + TraceCubic(flattener, path.Points[i-2:], 0.5) + x, y = path.Points[i+4], path.Points[i+5] + flattener.LineTo(x, y) + i += 6 + case draw2d.ArcToCmp: + x, y = TraceArc(flattener, path.Points[i], path.Points[i+1], path.Points[i+2], path.Points[i+3], path.Points[i+4], path.Points[i+5], scale) + flattener.LineTo(x, y) + i += 6 + case draw2d.CloseCmp: + flattener.LineTo(startX, startY) + flattener.Close() + } + } + flattener.End() +} + +// Transformer apply the Matrix transformation tr +type Transformer struct { + Tr draw2d.Matrix + Flattener Flattener +} + +func (t Transformer) MoveTo(x, y float64) { + u := x*t.Tr[0] + y*t.Tr[2] + t.Tr[4] + v := x*t.Tr[1] + y*t.Tr[3] + t.Tr[5] + t.Flattener.MoveTo(u, v) +} + +func (t Transformer) LineTo(x, y float64) { + u := x*t.Tr[0] + y*t.Tr[2] + t.Tr[4] + v := x*t.Tr[1] + y*t.Tr[3] + t.Tr[5] + t.Flattener.LineTo(u, v) +} + +func (t Transformer) LineJoin() { + t.Flattener.LineJoin() +} + +func (t Transformer) Close() { + t.Flattener.Close() +} + +func (t Transformer) End() { + t.Flattener.End() +} + +type SegmentedPath struct { + Points []float64 +} + +func (p *SegmentedPath) MoveTo(x, y float64) { + p.Points = append(p.Points, x, y) + // TODO need to mark this point as moveto +} + +func (p *SegmentedPath) LineTo(x, y float64) { + p.Points = append(p.Points, x, y) +} + +func (p *SegmentedPath) LineJoin() { + // TODO need to mark the current point as linejoin +} + +func (p *SegmentedPath) Close() { + // TODO Close +} + +func (p *SegmentedPath) End() { + // Nothing to do +} diff --git a/_third_party/github.com/llgcode/draw2d/draw2dbase/line.go b/_third_party/github.com/llgcode/draw2d/draw2dbase/line.go new file mode 100644 index 0000000000..a83bbc3f80 --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/draw2dbase/line.go @@ -0,0 +1,58 @@ +// Copyright 2011 The draw2d Authors. All rights reserved. +// created: 27/05/2011 by Laurent Le Goff + +package draw2dbase + +import ( + "image/color" + "image/draw" +) + +func abs(i int) int { + if i < 0 { + return -i + } + return i +} + +// PolylineBresenham draws a polyline to an image +func PolylineBresenham(img draw.Image, c color.Color, s ...float64) { + for i := 2; i < len(s); i += 2 { + Bresenham(img, c, int(s[i-2]+0.5), int(s[i-1]+0.5), int(s[i]+0.5), int(s[i+1]+0.5)) + } +} + +// Bresenham draws a line between (x0, y0) and (x1, y1) +func Bresenham(img draw.Image, color color.Color, x0, y0, x1, y1 int) { + dx := abs(x1 - x0) + dy := abs(y1 - y0) + var sx, sy int + if x0 < x1 { + sx = 1 + } else { + sx = -1 + } + if y0 < y1 { + sy = 1 + } else { + sy = -1 + } + err := dx - dy + + var e2 int + for { + img.Set(x0, y0, color) + if x0 == x1 && y0 == y1 { + return + } + e2 = 2 * err + if e2 > -dy { + err = err - dy + x0 = x0 + sx + } + if e2 < dx { + err = err + dx + y0 = y0 + sy + } + } +} diff --git a/_third_party/code.google.com/p/draw2d/draw2d/stack_gc.go b/_third_party/github.com/llgcode/draw2d/draw2dbase/stack_gc.go similarity index 54% rename from _third_party/code.google.com/p/draw2d/draw2d/stack_gc.go rename to _third_party/github.com/llgcode/draw2d/draw2dbase/stack_gc.go index 3ee6f8e0a7..5e747a45e7 100644 --- a/_third_party/code.google.com/p/draw2d/draw2d/stack_gc.go +++ b/_third_party/github.com/llgcode/draw2d/draw2dbase/stack_gc.go @@ -1,38 +1,43 @@ // Copyright 2010 The draw2d Authors. All rights reserved. // created: 21/11/2010 by Laurent Le Goff -package draw2d +package draw2dbase import ( - "bosun.org/_third_party/code.google.com/p/freetype-go/freetype/truetype" "image" "image/color" + + "bosun.org/_third_party/github.com/llgcode/draw2d" + + "bosun.org/_third_party/github.com/golang/freetype/truetype" ) +var DefaultFontData = draw2d.FontData{Name: "luxi", Family: draw2d.FontFamilySans, Style: draw2d.FontStyleNormal} + type StackGraphicContext struct { Current *ContextStack } type ContextStack struct { - Tr MatrixTransform - Path *PathStorage + Tr draw2d.Matrix + Path *draw2d.Path LineWidth float64 Dash []float64 DashOffset float64 StrokeColor color.Color FillColor color.Color - FillRule FillRule - Cap Cap - Join Join + FillRule draw2d.FillRule + Cap draw2d.LineCap + Join draw2d.LineJoin FontSize float64 - FontData FontData + FontData draw2d.FontData - font *truetype.Font + Font *truetype.Font // fontSize and dpi are used to calculate scale. scale is the number of // 26.6 fixed point units in 1 em. - scale int32 + Scale float64 - previous *ContextStack + Previous *ContextStack } /** @@ -41,41 +46,41 @@ type ContextStack struct { func NewStackGraphicContext() *StackGraphicContext { gc := &StackGraphicContext{} gc.Current = new(ContextStack) - gc.Current.Tr = NewIdentityMatrix() - gc.Current.Path = NewPathStorage() + gc.Current.Tr = draw2d.NewIdentityMatrix() + gc.Current.Path = new(draw2d.Path) gc.Current.LineWidth = 1.0 gc.Current.StrokeColor = image.Black gc.Current.FillColor = image.White - gc.Current.Cap = RoundCap - gc.Current.FillRule = FillRuleEvenOdd - gc.Current.Join = RoundJoin + gc.Current.Cap = draw2d.RoundCap + gc.Current.FillRule = draw2d.FillRuleEvenOdd + gc.Current.Join = draw2d.RoundJoin gc.Current.FontSize = 10 - gc.Current.FontData = defaultFontData + gc.Current.FontData = DefaultFontData return gc } -func (gc *StackGraphicContext) GetMatrixTransform() MatrixTransform { +func (gc *StackGraphicContext) GetMatrixTransform() draw2d.Matrix { return gc.Current.Tr } -func (gc *StackGraphicContext) SetMatrixTransform(Tr MatrixTransform) { +func (gc *StackGraphicContext) SetMatrixTransform(Tr draw2d.Matrix) { gc.Current.Tr = Tr } -func (gc *StackGraphicContext) ComposeMatrixTransform(Tr MatrixTransform) { - gc.Current.Tr = Tr.Multiply(gc.Current.Tr) +func (gc *StackGraphicContext) ComposeMatrixTransform(Tr draw2d.Matrix) { + gc.Current.Tr.Compose(Tr) } func (gc *StackGraphicContext) Rotate(angle float64) { - gc.Current.Tr = NewRotationMatrix(angle).Multiply(gc.Current.Tr) + gc.Current.Tr.Rotate(angle) } func (gc *StackGraphicContext) Translate(tx, ty float64) { - gc.Current.Tr = NewTranslationMatrix(tx, ty).Multiply(gc.Current.Tr) + gc.Current.Tr.Translate(tx, ty) } func (gc *StackGraphicContext) Scale(sx, sy float64) { - gc.Current.Tr = NewScaleMatrix(sx, sy).Multiply(gc.Current.Tr) + gc.Current.Tr.Scale(sx, sy) } func (gc *StackGraphicContext) SetStrokeColor(c color.Color) { @@ -86,40 +91,40 @@ func (gc *StackGraphicContext) SetFillColor(c color.Color) { gc.Current.FillColor = c } -func (gc *StackGraphicContext) SetFillRule(f FillRule) { +func (gc *StackGraphicContext) SetFillRule(f draw2d.FillRule) { gc.Current.FillRule = f } -func (gc *StackGraphicContext) SetLineWidth(LineWidth float64) { - gc.Current.LineWidth = LineWidth +func (gc *StackGraphicContext) SetLineWidth(lineWidth float64) { + gc.Current.LineWidth = lineWidth } -func (gc *StackGraphicContext) SetLineCap(Cap Cap) { - gc.Current.Cap = Cap +func (gc *StackGraphicContext) SetLineCap(cap draw2d.LineCap) { + gc.Current.Cap = cap } -func (gc *StackGraphicContext) SetLineJoin(Join Join) { - gc.Current.Join = Join +func (gc *StackGraphicContext) SetLineJoin(join draw2d.LineJoin) { + gc.Current.Join = join } -func (gc *StackGraphicContext) SetLineDash(Dash []float64, DashOffset float64) { - gc.Current.Dash = Dash - gc.Current.DashOffset = DashOffset +func (gc *StackGraphicContext) SetLineDash(dash []float64, dashOffset float64) { + gc.Current.Dash = dash + gc.Current.DashOffset = dashOffset } -func (gc *StackGraphicContext) SetFontSize(FontSize float64) { - gc.Current.FontSize = FontSize +func (gc *StackGraphicContext) SetFontSize(fontSize float64) { + gc.Current.FontSize = fontSize } func (gc *StackGraphicContext) GetFontSize() float64 { return gc.Current.FontSize } -func (gc *StackGraphicContext) SetFontData(FontData FontData) { - gc.Current.FontData = FontData +func (gc *StackGraphicContext) SetFontData(fontData draw2d.FontData) { + gc.Current.FontData = fontData } -func (gc *StackGraphicContext) GetFontData() FontData { +func (gc *StackGraphicContext) GetFontData() draw2d.FontData { return gc.Current.FontData } @@ -139,42 +144,22 @@ func (gc *StackGraphicContext) MoveTo(x, y float64) { gc.Current.Path.MoveTo(x, y) } -func (gc *StackGraphicContext) RMoveTo(dx, dy float64) { - gc.Current.Path.RMoveTo(dx, dy) -} - func (gc *StackGraphicContext) LineTo(x, y float64) { gc.Current.Path.LineTo(x, y) } -func (gc *StackGraphicContext) RLineTo(dx, dy float64) { - gc.Current.Path.RLineTo(dx, dy) -} - func (gc *StackGraphicContext) QuadCurveTo(cx, cy, x, y float64) { gc.Current.Path.QuadCurveTo(cx, cy, x, y) } -func (gc *StackGraphicContext) RQuadCurveTo(dcx, dcy, dx, dy float64) { - gc.Current.Path.RQuadCurveTo(dcx, dcy, dx, dy) -} - func (gc *StackGraphicContext) CubicCurveTo(cx1, cy1, cx2, cy2, x, y float64) { gc.Current.Path.CubicCurveTo(cx1, cy1, cx2, cy2, x, y) } -func (gc *StackGraphicContext) RCubicCurveTo(dcx1, dcy1, dcx2, dcy2, dx, dy float64) { - gc.Current.Path.RCubicCurveTo(dcx1, dcy1, dcx2, dcy2, dx, dy) -} - func (gc *StackGraphicContext) ArcTo(cx, cy, rx, ry, startAngle, angle float64) { gc.Current.Path.ArcTo(cx, cy, rx, ry, startAngle, angle) } -func (gc *StackGraphicContext) RArcTo(dcx, dcy, rx, ry, startAngle, angle float64) { - gc.Current.Path.RArcTo(dcx, dcy, rx, ry, startAngle, angle) -} - func (gc *StackGraphicContext) Close() { gc.Current.Path.Close() } @@ -192,17 +177,17 @@ func (gc *StackGraphicContext) Save() { context.Cap = gc.Current.Cap context.Join = gc.Current.Join context.Path = gc.Current.Path.Copy() - context.font = gc.Current.font - context.scale = gc.Current.scale + context.Font = gc.Current.Font + context.Scale = gc.Current.Scale copy(context.Tr[:], gc.Current.Tr[:]) - context.previous = gc.Current + context.Previous = gc.Current gc.Current = context } func (gc *StackGraphicContext) Restore() { - if gc.Current.previous != nil { + if gc.Current.Previous != nil { oldContext := gc.Current - gc.Current = gc.Current.previous - oldContext.previous = nil + gc.Current = gc.Current.Previous + oldContext.Previous = nil } } diff --git a/_third_party/github.com/llgcode/draw2d/draw2dbase/stroker.go b/_third_party/github.com/llgcode/draw2d/draw2dbase/stroker.go new file mode 100644 index 0000000000..22c0d47c5b --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/draw2dbase/stroker.go @@ -0,0 +1,90 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 13/12/2010 by Laurent Le Goff + +package draw2dbase + +import ( + "math" + + "bosun.org/_third_party/github.com/llgcode/draw2d" +) + +type LineStroker struct { + Flattener Flattener + HalfLineWidth float64 + Cap draw2d.LineCap + Join draw2d.LineJoin + vertices []float64 + rewind []float64 + x, y, nx, ny float64 +} + +func NewLineStroker(c draw2d.LineCap, j draw2d.LineJoin, flattener Flattener) *LineStroker { + l := new(LineStroker) + l.Flattener = flattener + l.HalfLineWidth = 0.5 + l.Cap = c + l.Join = j + return l +} + +func (l *LineStroker) MoveTo(x, y float64) { + l.x, l.y = x, y +} + +func (l *LineStroker) LineTo(x, y float64) { + l.line(l.x, l.y, x, y) +} + +func (l *LineStroker) LineJoin() { + +} + +func (l *LineStroker) line(x1, y1, x2, y2 float64) { + dx := (x2 - x1) + dy := (y2 - y1) + d := vectorDistance(dx, dy) + if d != 0 { + nx := dy * l.HalfLineWidth / d + ny := -(dx * l.HalfLineWidth / d) + l.appendVertex(x1+nx, y1+ny, x2+nx, y2+ny, x1-nx, y1-ny, x2-nx, y2-ny) + l.x, l.y, l.nx, l.ny = x2, y2, nx, ny + } +} + +func (l *LineStroker) Close() { + if len(l.vertices) > 1 { + l.appendVertex(l.vertices[0], l.vertices[1], l.rewind[0], l.rewind[1]) + } +} + +func (l *LineStroker) End() { + if len(l.vertices) > 1 { + l.Flattener.MoveTo(l.vertices[0], l.vertices[1]) + for i, j := 2, 3; j < len(l.vertices); i, j = i+2, j+2 { + l.Flattener.LineTo(l.vertices[i], l.vertices[j]) + } + } + for i, j := len(l.rewind)-2, len(l.rewind)-1; j > 0; i, j = i-2, j-2 { + l.Flattener.LineTo(l.rewind[i], l.rewind[j]) + } + if len(l.vertices) > 1 { + l.Flattener.LineTo(l.vertices[0], l.vertices[1]) + } + l.Flattener.End() + // reinit vertices + l.vertices = l.vertices[0:0] + l.rewind = l.rewind[0:0] + l.x, l.y, l.nx, l.ny = 0, 0, 0, 0 + +} + +func (l *LineStroker) appendVertex(vertices ...float64) { + s := len(vertices) / 2 + l.vertices = append(l.vertices, vertices[:s]...) + l.rewind = append(l.rewind, vertices[s:]...) +} + +func vectorDistance(dx, dy float64) float64 { + return float64(math.Sqrt(dx*dx + dy*dy)) +} diff --git a/_third_party/github.com/llgcode/draw2d/draw2dimg/README.md b/_third_party/github.com/llgcode/draw2d/draw2dimg/README.md new file mode 100644 index 0000000000..5798d5360e --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/draw2dimg/README.md @@ -0,0 +1,8 @@ +draw2d/draw2dimg +================= + +[![Coverage](http://gocover.io/_badge/github.com/llgcode/draw2d/draw2dimg?0)](http://gocover.io/github.com/llgcode/draw2d/draw2dimg) +[![GoDoc](https://godoc.org/github.com/llgcode/draw2d/draw2dimg?status.svg)](https://godoc.org/github.com/llgcode/draw2d/draw2dimg) + + +draw2d implementation that generates raster images using https://github.com/golang/freetype package. diff --git a/_third_party/github.com/llgcode/draw2d/draw2dimg/fileutil.go b/_third_party/github.com/llgcode/draw2d/draw2dimg/fileutil.go new file mode 100644 index 0000000000..b6dccb9264 --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/draw2dimg/fileutil.go @@ -0,0 +1,46 @@ +package draw2dimg + +import ( + "bufio" + "image" + "image/png" + "os" +) + +// SaveToPngFile create and save an image to a file using PNG format +func SaveToPngFile(filePath string, m image.Image) error { + // Create the file + f, err := os.Create(filePath) + if err != nil { + return err + } + defer f.Close() + // Create Writer from file + b := bufio.NewWriter(f) + // Write the image into the buffer + err = png.Encode(b, m) + if err != nil { + return err + } + err = b.Flush() + if err != nil { + return err + } + return nil +} + +// LoadFromPngFile Open a png file +func LoadFromPngFile(filePath string) (image.Image, error) { + // Open file + f, err := os.OpenFile(filePath, 0, 0) + if err != nil { + return nil, err + } + defer f.Close() + b := bufio.NewReader(f) + img, err := png.Decode(b) + if err != nil { + return nil, err + } + return img, nil +} diff --git a/_third_party/github.com/llgcode/draw2d/draw2dimg/ftgc.go b/_third_party/github.com/llgcode/draw2d/draw2dimg/ftgc.go new file mode 100644 index 0000000000..ba3b9bb4ef --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/draw2dimg/ftgc.go @@ -0,0 +1,328 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 21/11/2010 by Laurent Le Goff + +package draw2dimg + +import ( + "errors" + "image" + "image/color" + "image/draw" + "log" + "math" + + "bosun.org/_third_party/github.com/llgcode/draw2d" + "bosun.org/_third_party/github.com/llgcode/draw2d/draw2dbase" + + "bosun.org/_third_party/github.com/golang/freetype/raster" + "bosun.org/_third_party/github.com/golang/freetype/truetype" + + "golang.org/x/image/font" + "golang.org/x/image/math/fixed" +) + +// Painter implements the freetype raster.Painter and has a SetColor method like the RGBAPainter +type Painter interface { + raster.Painter + SetColor(color color.Color) +} + +// GraphicContext is the implementation of draw2d.GraphicContext for a raster image +type GraphicContext struct { + *draw2dbase.StackGraphicContext + img draw.Image + painter Painter + fillRasterizer *raster.Rasterizer + strokeRasterizer *raster.Rasterizer + glyphBuf *truetype.GlyphBuf + DPI int +} + +// NewGraphicContext creates a new Graphic context from an image. +func NewGraphicContext(img draw.Image) *GraphicContext { + + var painter Painter + switch selectImage := img.(type) { + case *image.RGBA: + painter = raster.NewRGBAPainter(selectImage) + default: + panic("Image type not supported") + } + return NewGraphicContextWithPainter(img, painter) +} + +// NewGraphicContextWithPainter creates a new Graphic context from an image and a Painter (see Freetype-go) +func NewGraphicContextWithPainter(img draw.Image, painter Painter) *GraphicContext { + width, height := img.Bounds().Dx(), img.Bounds().Dy() + dpi := 92 + gc := &GraphicContext{ + draw2dbase.NewStackGraphicContext(), + img, + painter, + raster.NewRasterizer(width, height), + raster.NewRasterizer(width, height), + &truetype.GlyphBuf{}, + dpi, + } + return gc +} + +// GetDPI returns the resolution of the Image GraphicContext +func (gc *GraphicContext) GetDPI() int { + return gc.DPI +} + +// Clear fills the current canvas with a default transparent color +func (gc *GraphicContext) Clear() { + width, height := gc.img.Bounds().Dx(), gc.img.Bounds().Dy() + gc.ClearRect(0, 0, width, height) +} + +// ClearRect fills the current canvas with a default transparent color at the specified rectangle +func (gc *GraphicContext) ClearRect(x1, y1, x2, y2 int) { + imageColor := image.NewUniform(gc.Current.FillColor) + draw.Draw(gc.img, image.Rect(x1, y1, x2, y2), imageColor, image.ZP, draw.Over) +} + +// DrawImage draws the raster image in the current canvas +func (gc *GraphicContext) DrawImage(img image.Image) { + DrawImage(img, gc.img, gc.Current.Tr, draw.Over, BilinearFilter) +} + +// FillString draws the text at point (0, 0) +func (gc *GraphicContext) FillString(text string) (cursor float64) { + return gc.FillStringAt(text, 0, 0) +} + +// FillStringAt draws the text at the specified point (x, y) +func (gc *GraphicContext) FillStringAt(text string, x, y float64) (cursor float64) { + width := gc.CreateStringPath(text, x, y) + gc.Fill() + return width +} + +// StrokeString draws the contour of the text at point (0, 0) +func (gc *GraphicContext) StrokeString(text string) (cursor float64) { + return gc.StrokeStringAt(text, 0, 0) +} + +// StrokeStringAt draws the contour of the text at point (x, y) +func (gc *GraphicContext) StrokeStringAt(text string, x, y float64) (cursor float64) { + width := gc.CreateStringPath(text, x, y) + gc.Stroke() + return width +} + +func (gc *GraphicContext) loadCurrentFont() (*truetype.Font, error) { + font := draw2d.GetFont(gc.Current.FontData) + if font == nil { + font = draw2d.GetFont(draw2dbase.DefaultFontData) + } + if font == nil { + return nil, errors.New("No font set, and no default font available.") + } + gc.SetFont(font) + gc.SetFontSize(gc.Current.FontSize) + return font, nil +} + +// p is a truetype.Point measured in FUnits and positive Y going upwards. +// The returned value is the same thing measured in floating point and positive Y +// going downwards. + +func (gc *GraphicContext) drawGlyph(glyph truetype.Index, dx, dy float64) error { + if err := gc.glyphBuf.Load(gc.Current.Font, fixed.Int26_6(gc.Current.Scale), glyph, font.HintingNone); err != nil { + return err + } + e0 := 0 + for _, e1 := range gc.glyphBuf.Ends { + DrawContour(gc, gc.glyphBuf.Points[e0:e1], dx, dy) + e0 = e1 + } + return nil +} + +// CreateStringPath creates a path from the string s at x, y, and returns the string width. +// The text is placed so that the left edge of the em square of the first character of s +// and the baseline intersect at x, y. The majority of the affected pixels will be +// above and to the right of the point, but some may be below or to the left. +// For example, drawing a string that starts with a 'J' in an italic font may +// affect pixels below and left of the point. +func (gc *GraphicContext) CreateStringPath(s string, x, y float64) float64 { + f, err := gc.loadCurrentFont() + if err != nil { + log.Println(err) + return 0.0 + } + startx := x + prev, hasPrev := truetype.Index(0), false + for _, rune := range s { + index := f.Index(rune) + if hasPrev { + x += fUnitsToFloat64(f.Kern(fixed.Int26_6(gc.Current.Scale), prev, index)) + } + err := gc.drawGlyph(index, x, y) + if err != nil { + log.Println(err) + return startx - x + } + x += fUnitsToFloat64(f.HMetric(fixed.Int26_6(gc.Current.Scale), index).AdvanceWidth) + prev, hasPrev = index, true + } + return x - startx +} + +// GetStringBounds returns the approximate pixel bounds of the string s at x, y. +// The the left edge of the em square of the first character of s +// and the baseline intersect at 0, 0 in the returned coordinates. +// Therefore the top and left coordinates may well be negative. +func (gc *GraphicContext) GetStringBounds(s string) (left, top, right, bottom float64) { + f, err := gc.loadCurrentFont() + if err != nil { + log.Println(err) + return 0, 0, 0, 0 + } + top, left, bottom, right = 10e6, 10e6, -10e6, -10e6 + cursor := 0.0 + prev, hasPrev := truetype.Index(0), false + for _, rune := range s { + index := f.Index(rune) + if hasPrev { + cursor += fUnitsToFloat64(f.Kern(fixed.Int26_6(gc.Current.Scale), prev, index)) + } + if err := gc.glyphBuf.Load(gc.Current.Font, fixed.Int26_6(gc.Current.Scale), index, font.HintingNone); err != nil { + log.Println(err) + return 0, 0, 0, 0 + } + e0 := 0 + for _, e1 := range gc.glyphBuf.Ends { + ps := gc.glyphBuf.Points[e0:e1] + for _, p := range ps { + x, y := pointToF64Point(p) + top = math.Min(top, y) + bottom = math.Max(bottom, y) + left = math.Min(left, x+cursor) + right = math.Max(right, x+cursor) + } + } + cursor += fUnitsToFloat64(f.HMetric(fixed.Int26_6(gc.Current.Scale), index).AdvanceWidth) + prev, hasPrev = index, true + } + return left, top, right, bottom +} + +// recalc recalculates scale and bounds values from the font size, screen +// resolution and font metrics, and invalidates the glyph cache. +func (gc *GraphicContext) recalc() { + gc.Current.Scale = gc.Current.FontSize * float64(gc.DPI) * (64.0 / 72.0) +} + +// SetDPI sets the screen resolution in dots per inch. +func (gc *GraphicContext) SetDPI(dpi int) { + gc.DPI = dpi + gc.recalc() +} + +// SetFont sets the font used to draw text. +func (gc *GraphicContext) SetFont(font *truetype.Font) { + gc.Current.Font = font +} + +// SetFontSize sets the font size in points (as in ``a 12 point font''). +func (gc *GraphicContext) SetFontSize(fontSize float64) { + gc.Current.FontSize = fontSize + gc.recalc() +} + +func (gc *GraphicContext) paint(rasterizer *raster.Rasterizer, color color.Color) { + gc.painter.SetColor(color) + rasterizer.Rasterize(gc.painter) + rasterizer.Clear() + gc.Current.Path.Clear() +} + +// Stroke strokes the paths with the color specified by SetStrokeColor +func (gc *GraphicContext) Stroke(paths ...*draw2d.Path) { + paths = append(paths, gc.Current.Path) + gc.strokeRasterizer.UseNonZeroWinding = true + + stroker := draw2dbase.NewLineStroker(gc.Current.Cap, gc.Current.Join, draw2dbase.Transformer{Tr: gc.Current.Tr, Flattener: FtLineBuilder{Adder: gc.strokeRasterizer}}) + stroker.HalfLineWidth = gc.Current.LineWidth / 2 + + var liner draw2dbase.Flattener + if gc.Current.Dash != nil && len(gc.Current.Dash) > 0 { + liner = draw2dbase.NewDashConverter(gc.Current.Dash, gc.Current.DashOffset, stroker) + } else { + liner = stroker + } + for _, p := range paths { + draw2dbase.Flatten(p, liner, gc.Current.Tr.GetScale()) + } + + gc.paint(gc.strokeRasterizer, gc.Current.StrokeColor) +} + +// Fill fills the paths with the color specified by SetFillColor +func (gc *GraphicContext) Fill(paths ...*draw2d.Path) { + paths = append(paths, gc.Current.Path) + gc.fillRasterizer.UseNonZeroWinding = gc.Current.FillRule == draw2d.FillRuleWinding + + /**** first method ****/ + flattener := draw2dbase.Transformer{Tr: gc.Current.Tr, Flattener: FtLineBuilder{Adder: gc.fillRasterizer}} + for _, p := range paths { + draw2dbase.Flatten(p, flattener, gc.Current.Tr.GetScale()) + } + + gc.paint(gc.fillRasterizer, gc.Current.FillColor) +} + +// FillStroke first fills the paths and than strokes them +func (gc *GraphicContext) FillStroke(paths ...*draw2d.Path) { + paths = append(paths, gc.Current.Path) + gc.fillRasterizer.UseNonZeroWinding = gc.Current.FillRule == draw2d.FillRuleWinding + gc.strokeRasterizer.UseNonZeroWinding = true + + flattener := draw2dbase.Transformer{Tr: gc.Current.Tr, Flattener: FtLineBuilder{Adder: gc.fillRasterizer}} + + stroker := draw2dbase.NewLineStroker(gc.Current.Cap, gc.Current.Join, draw2dbase.Transformer{Tr: gc.Current.Tr, Flattener: FtLineBuilder{Adder: gc.strokeRasterizer}}) + stroker.HalfLineWidth = gc.Current.LineWidth / 2 + + var liner draw2dbase.Flattener + if gc.Current.Dash != nil && len(gc.Current.Dash) > 0 { + liner = draw2dbase.NewDashConverter(gc.Current.Dash, gc.Current.DashOffset, stroker) + } else { + liner = stroker + } + + demux := draw2dbase.DemuxFlattener{Flatteners: []draw2dbase.Flattener{flattener, liner}} + for _, p := range paths { + draw2dbase.Flatten(p, demux, gc.Current.Tr.GetScale()) + } + + // Fill + gc.paint(gc.fillRasterizer, gc.Current.FillColor) + // Stroke + gc.paint(gc.strokeRasterizer, gc.Current.StrokeColor) +} + +func toFtCap(c draw2d.LineCap) raster.Capper { + switch c { + case draw2d.RoundCap: + return raster.RoundCapper + case draw2d.ButtCap: + return raster.ButtCapper + case draw2d.SquareCap: + return raster.SquareCapper + } + return raster.RoundCapper +} + +func toFtJoin(j draw2d.LineJoin) raster.Joiner { + switch j { + case draw2d.RoundJoin: + return raster.RoundJoiner + case draw2d.BevelJoin: + return raster.BevelJoiner + } + return raster.RoundJoiner +} diff --git a/_third_party/github.com/llgcode/draw2d/draw2dimg/ftpath.go b/_third_party/github.com/llgcode/draw2d/draw2dimg/ftpath.go new file mode 100644 index 0000000000..1e9ff92e29 --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/draw2dimg/ftpath.go @@ -0,0 +1,30 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 13/12/2010 by Laurent Le Goff + +package draw2dimg + +import ( + "bosun.org/_third_party/github.com/golang/freetype/raster" + "golang.org/x/image/math/fixed" +) + +type FtLineBuilder struct { + Adder raster.Adder +} + +func (liner FtLineBuilder) MoveTo(x, y float64) { + liner.Adder.Start(fixed.Point26_6{X: fixed.Int26_6(x * 64), Y: fixed.Int26_6(y * 64)}) +} + +func (liner FtLineBuilder) LineTo(x, y float64) { + liner.Adder.Add1(fixed.Point26_6{X: fixed.Int26_6(x * 64), Y: fixed.Int26_6(y * 64)}) +} + +func (liner FtLineBuilder) LineJoin() { +} + +func (liner FtLineBuilder) Close() { +} + +func (liner FtLineBuilder) End() { +} diff --git a/_third_party/code.google.com/p/draw2d/draw2d/rgba_interpolation.go b/_third_party/github.com/llgcode/draw2d/draw2dimg/rgba_interpolation.go similarity index 86% rename from _third_party/code.google.com/p/draw2d/draw2d/rgba_interpolation.go rename to _third_party/github.com/llgcode/draw2d/draw2dimg/rgba_interpolation.go index 92534e7ebf..5c6a639699 100644 --- a/_third_party/code.google.com/p/draw2d/draw2d/rgba_interpolation.go +++ b/_third_party/github.com/llgcode/draw2d/draw2dimg/rgba_interpolation.go @@ -2,21 +2,29 @@ // created: 21/11/2010 by Laurent Le Goff // see http://pippin.gimp.org/image_processing/chap_resampling.html -package draw2d +package draw2dimg import ( "image" "image/color" "image/draw" "math" + + "bosun.org/_third_party/github.com/llgcode/draw2d" ) +// ImageFilter defines the type of filter to use type ImageFilter int const ( + // LinearFilter defines a linear filter LinearFilter ImageFilter = iota + // BilinearFilter defines a bilinear filter BilinearFilter + // BicubicFilter defines a bicubic filter BicubicFilter + // M is the maximum value for a rgb component + M = 1<<16 - 1 ) //see http://pippin.gimp.org/image_processing/chap_resampling.html @@ -46,14 +54,7 @@ func getColorBilinear(img image.Image, x, y float64) color.Color { return color.RGBA{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8), uint8(a >> 8)} } -/** --- LERP --- /lerp/, vi.,n. --- --- Quasi-acronym for Linear Interpolation, used as a verb or noun for --- the operation. "Bresenham's algorithm lerps incrementally between the --- two endpoints of the line." (From Jargon File (4.4.4, 14 Aug 2003) -*/ +// lerp is a linear interpolation bertween 2 points func lerp(v1, v2, ratio float64) float64 { return v1*(1-ratio) + v2*ratio } @@ -103,10 +104,10 @@ func cubic(offset, v0, v1, v2, v3 float64) uint32 { (-9*v0+9*v2))*offset + (v0 + 16*v1 + v2)) / 18.0) } -func DrawImage(src image.Image, dest draw.Image, tr MatrixTransform, op draw.Op, filter ImageFilter) { +// DrawImage draws an image into dest using an affine transformation matrix, an op and a filter +func DrawImage(src image.Image, dest draw.Image, tr draw2d.Matrix, op draw.Op, filter ImageFilter) { bounds := src.Bounds() - x0, y0, x1, y1 := float64(bounds.Min.X), float64(bounds.Min.Y), float64(bounds.Max.X), float64(bounds.Max.Y) - tr.TransformRectangle(&x0, &y0, &x1, &y1) + x0, y0, x1, y1 := tr.TransformRectangle(float64(bounds.Min.X), float64(bounds.Min.Y), float64(bounds.Max.X), float64(bounds.Max.Y)) var x, y, u, v float64 var c1, c2, cr color.Color var r, g, b, a, ia, r1, g1, b1, a1, r2, g2, b2, a2 uint32 @@ -115,7 +116,7 @@ func DrawImage(src image.Image, dest draw.Image, tr MatrixTransform, op draw.Op, for y = y0; y < y1; y++ { u = x v = y - tr.InverseTransform(&u, &v) + u, v = tr.InverseTransformPoint(u, v) if bounds.Min.X <= int(u) && bounds.Max.X > int(u) && bounds.Min.Y <= int(v) && bounds.Max.Y > int(v) { c1 = dest.At(int(x), int(y)) switch filter { diff --git a/_third_party/github.com/llgcode/draw2d/draw2dimg/text.go b/_third_party/github.com/llgcode/draw2d/draw2dimg/text.go new file mode 100644 index 0000000000..153b433aeb --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/draw2dimg/text.go @@ -0,0 +1,82 @@ +package draw2dimg + +import ( + "bosun.org/_third_party/github.com/golang/freetype/truetype" + "bosun.org/_third_party/github.com/llgcode/draw2d" + + "golang.org/x/image/math/fixed" +) + +// DrawContour draws the given closed contour at the given sub-pixel offset. +func DrawContour(path draw2d.PathBuilder, ps []truetype.Point, dx, dy float64) { + if len(ps) == 0 { + return + } + startX, startY := pointToF64Point(ps[0]) + path.MoveTo(startX+dx, startY+dy) + q0X, q0Y, on0 := startX, startY, true + for _, p := range ps[1:] { + qX, qY := pointToF64Point(p) + on := p.Flags&0x01 != 0 + if on { + if on0 { + path.LineTo(qX+dx, qY+dy) + } else { + path.QuadCurveTo(q0X+dx, q0Y+dy, qX+dx, qY+dy) + } + } else { + if on0 { + // No-op. + } else { + midX := (q0X + qX) / 2 + midY := (q0Y + qY) / 2 + path.QuadCurveTo(q0X+dx, q0Y+dy, midX+dx, midY+dy) + } + } + q0X, q0Y, on0 = qX, qY, on + } + // Close the curve. + if on0 { + path.LineTo(startX+dx, startY+dy) + } else { + path.QuadCurveTo(q0X+dx, q0Y+dy, startX+dx, startY+dy) + } +} + +func pointToF64Point(p truetype.Point) (x, y float64) { + return fUnitsToFloat64(p.X), -fUnitsToFloat64(p.Y) +} + +func fUnitsToFloat64(x fixed.Int26_6) float64 { + scaled := x << 2 + return float64(scaled/256) + float64(scaled%256)/256.0 +} + +// FontExtents contains font metric information. +type FontExtents struct { + // Ascent is the distance that the text + // extends above the baseline. + Ascent float64 + + // Descent is the distance that the text + // extends below the baseline. The descent + // is given as a negative value. + Descent float64 + + // Height is the distance from the lowest + // descending point to the highest ascending + // point. + Height float64 +} + +// Extents returns the FontExtents for a font. +// TODO needs to read this https://developer.apple.com/fonts/TrueType-Reference-Manual/RM02/Chap2.html#intro +func Extents(font *truetype.Font, size float64) FontExtents { + bounds := font.Bounds(fixed.Int26_6(font.FUnitsPerEm())) + scale := size / float64(font.FUnitsPerEm()) + return FontExtents{ + Ascent: float64(bounds.Max.Y) * scale, + Descent: float64(bounds.Min.Y) * scale, + Height: float64(bounds.Max.Y-bounds.Min.Y) * scale, + } +} diff --git a/_third_party/code.google.com/p/draw2d/draw2d/font.go b/_third_party/github.com/llgcode/draw2d/font.go similarity index 75% rename from _third_party/code.google.com/p/draw2d/draw2d/font.go rename to _third_party/github.com/llgcode/draw2d/font.go index 4a286c8396..b16afe8637 100644 --- a/_third_party/code.google.com/p/draw2d/draw2d/font.go +++ b/_third_party/github.com/llgcode/draw2d/font.go @@ -4,15 +4,18 @@ package draw2d import ( - "bosun.org/_third_party/code.google.com/p/freetype-go/freetype/truetype" "io/ioutil" "log" "path" + "path/filepath" + + "bosun.org/_third_party/github.com/golang/freetype/truetype" ) var ( - fontFolder = "../resource/font/" - fonts = make(map[string]*truetype.Font) + fontFolder = "../resource/font/" + fonts = make(map[string]*truetype.Font) + fontNamer FontFileNamer = FontFileName ) type FontStyle byte @@ -37,7 +40,9 @@ type FontData struct { Style FontStyle } -func fontFileName(fontData FontData) string { +type FontFileNamer func(fontData FontData) string + +func FontFileName(fontData FontData) string { fontFileName := fontData.Name switch fontData.Family { case FontFamilySans: @@ -61,11 +66,11 @@ func fontFileName(fontData FontData) string { } func RegisterFont(fontData FontData, font *truetype.Font) { - fonts[fontFileName(fontData)] = font + fonts[fontNamer(fontData)] = font } func GetFont(fontData FontData) *truetype.Font { - fontFileName := fontFileName(fontData) + fontFileName := fontNamer(fontData) font := fonts[fontFileName] if font != nil { return font @@ -79,7 +84,11 @@ func GetFontFolder() string { } func SetFontFolder(folder string) { - fontFolder = folder + fontFolder = filepath.Clean(folder) +} + +func SetFontNamer(fn FontFileNamer) { + fontNamer = fn } func loadFont(fontFileName string) *truetype.Font { diff --git a/_third_party/github.com/llgcode/draw2d/gc.go b/_third_party/github.com/llgcode/draw2d/gc.go new file mode 100644 index 0000000000..1784789d6b --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/gc.go @@ -0,0 +1,63 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 21/11/2010 by Laurent Le Goff + +package draw2d + +import ( + "image" + "image/color" +) + +// GraphicContext describes the interface for the various backends (images, pdf, opengl, ...) +type GraphicContext interface { + PathBuilder + // BeginPath creates a new path + BeginPath() + // GetMatrixTransform returns the current transformation matrix + GetMatrixTransform() Matrix + // SetMatrixTransform sets the current transformation matrix + SetMatrixTransform(tr Matrix) + // ComposeMatrixTransform composes the current transformation matrix with tr + ComposeMatrixTransform(tr Matrix) + // Rotate applies a rotation to the current transformation matrix. angle is in radian. + Rotate(angle float64) + // Translate applies a translation to the current transformation matrix. + Translate(tx, ty float64) + // Scale applies a scale to the current transformation matrix. + Scale(sx, sy float64) + // SetStrokeColor sets the current stroke color + SetStrokeColor(c color.Color) + // SetStrokeColor sets the current fill color + SetFillColor(c color.Color) + // SetFillRule sets the current fill rule + SetFillRule(f FillRule) + // SetLineWidth sets the current line width + SetLineWidth(lineWidth float64) + // SetLineCap sets the current line cap + SetLineCap(cap LineCap) + // SetLineJoin sets the current line join + SetLineJoin(join LineJoin) + // SetLineJoin sets the current dash + SetLineDash(dash []float64, dashOffset float64) + // SetFontSize + SetFontSize(fontSize float64) + GetFontSize() float64 + SetFontData(fontData FontData) + GetFontData() FontData + DrawImage(image image.Image) + Save() + Restore() + Clear() + ClearRect(x1, y1, x2, y2 int) + SetDPI(dpi int) + GetDPI() int + GetStringBounds(s string) (left, top, right, bottom float64) + CreateStringPath(text string, x, y float64) (cursor float64) + FillString(text string) (cursor float64) + FillStringAt(text string, x, y float64) (cursor float64) + StrokeString(text string) (cursor float64) + StrokeStringAt(text string, x, y float64) (cursor float64) + Stroke(paths ...*Path) + Fill(paths ...*Path) + FillStroke(paths ...*Path) +} diff --git a/_third_party/github.com/llgcode/draw2d/matrix.go b/_third_party/github.com/llgcode/draw2d/matrix.go new file mode 100644 index 0000000000..55f5e387bc --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/matrix.go @@ -0,0 +1,222 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 21/11/2010 by Laurent Le Goff + +package draw2d + +import ( + "math" +) + +// Matrix represents an affine transformation +type Matrix [6]float64 + +const ( + epsilon = 1e-6 +) + +// Determinant compute the determinant of the matrix +func (tr Matrix) Determinant() float64 { + return tr[0]*tr[3] - tr[1]*tr[2] +} + +// Transform applies the transformation matrix to points. It modify the points passed in parameter. +func (tr Matrix) Transform(points []float64) { + for i, j := 0, 1; j < len(points); i, j = i+2, j+2 { + x := points[i] + y := points[j] + points[i] = x*tr[0] + y*tr[2] + tr[4] + points[j] = x*tr[1] + y*tr[3] + tr[5] + } +} + +// TransformPoint applies the transformation matrix to point. It returns the point the transformed point. +func (tr Matrix) TransformPoint(x, y float64) (xres, yres float64) { + xres = x*tr[0] + y*tr[2] + tr[4] + yres = x*tr[1] + y*tr[3] + tr[5] + return xres, yres +} + +func minMax(x, y float64) (min, max float64) { + if x > y { + return y, x + } + return x, y +} + +// Transform applies the transformation matrix to the rectangle represented by the min and the max point of the rectangle +func (tr Matrix) TransformRectangle(x0, y0, x2, y2 float64) (nx0, ny0, nx2, ny2 float64) { + points := []float64{x0, y0, x2, y0, x2, y2, x0, y2} + tr.Transform(points) + points[0], points[2] = minMax(points[0], points[2]) + points[4], points[6] = minMax(points[4], points[6]) + points[1], points[3] = minMax(points[1], points[3]) + points[5], points[7] = minMax(points[5], points[7]) + + nx0 = math.Min(points[0], points[4]) + ny0 = math.Min(points[1], points[5]) + nx2 = math.Max(points[2], points[6]) + ny2 = math.Max(points[3], points[7]) + return nx0, ny0, nx2, ny2 +} + +// InverseTransform applies the transformation inverse matrix to the rectangle represented by the min and the max point of the rectangle +func (tr Matrix) InverseTransform(points []float64) { + d := tr.Determinant() // matrix determinant + for i, j := 0, 1; j < len(points); i, j = i+2, j+2 { + x := points[i] + y := points[j] + points[i] = ((x-tr[4])*tr[3] - (y-tr[5])*tr[2]) / d + points[j] = ((y-tr[5])*tr[0] - (x-tr[4])*tr[1]) / d + } +} + +// InverseTransformPoint applies the transformation inverse matrix to point. It returns the point the transformed point. +func (tr Matrix) InverseTransformPoint(x, y float64) (xres, yres float64) { + d := tr.Determinant() // matrix determinant + xres = ((x-tr[4])*tr[3] - (y-tr[5])*tr[2]) / d + yres = ((y-tr[5])*tr[0] - (x-tr[4])*tr[1]) / d + return xres, yres +} + +// VectorTransform applies the transformation matrix to points without using the translation parameter of the affine matrix. +// It modify the points passed in parameter. +func (tr Matrix) VectorTransform(points []float64) { + for i, j := 0, 1; j < len(points); i, j = i+2, j+2 { + x := points[i] + y := points[j] + points[i] = x*tr[0] + y*tr[2] + points[j] = x*tr[1] + y*tr[3] + } +} + +// NewIdentityMatrix creates an identity transformation matrix. +func NewIdentityMatrix() Matrix { + return Matrix{1, 0, 0, 1, 0, 0} +} + +// NewTranslationMatrix creates a transformation matrix with a translation tx and ty translation parameter +func NewTranslationMatrix(tx, ty float64) Matrix { + return Matrix{1, 0, 0, 1, tx, ty} +} + +// NewScaleMatrix creates a transformation matrix with a sx, sy scale factor +func NewScaleMatrix(sx, sy float64) Matrix { + return Matrix{sx, 0, 0, sy, 0, 0} +} + +// NewRotationMatrix creates a rotation transformation matrix. angle is in radian +func NewRotationMatrix(angle float64) Matrix { + c := math.Cos(angle) + s := math.Sin(angle) + return Matrix{c, s, -s, c, 0, 0} +} + +// NewMatrixFromRects creates a transformation matrix, combining a scale and a translation, that transform rectangle1 into rectangle2. +func NewMatrixFromRects(rectangle1, rectangle2 [4]float64) Matrix { + xScale := (rectangle2[2] - rectangle2[0]) / (rectangle1[2] - rectangle1[0]) + yScale := (rectangle2[3] - rectangle2[1]) / (rectangle1[3] - rectangle1[1]) + xOffset := rectangle2[0] - (rectangle1[0] * xScale) + yOffset := rectangle2[1] - (rectangle1[1] * yScale) + return Matrix{xScale, 0, 0, yScale, xOffset, yOffset} +} + +// Inverse computes the inverse matrix +func (tr *Matrix) Inverse() { + d := tr.Determinant() // matrix determinant + tr0, tr1, tr2, tr3, tr4, tr5 := tr[0], tr[1], tr[2], tr[3], tr[4], tr[5] + tr[0] = tr3 / d + tr[1] = -tr1 / d + tr[2] = -tr2 / d + tr[3] = tr0 / d + tr[4] = (tr2*tr5 - tr3*tr4) / d + tr[5] = (tr1*tr4 - tr0*tr5) / d +} + +func (tr Matrix) Copy() Matrix { + var result Matrix + copy(result[:], tr[:]) + return result +} + +// Compose multiplies trToConcat x tr +func (tr *Matrix) Compose(trToCompose Matrix) { + tr0, tr1, tr2, tr3, tr4, tr5 := tr[0], tr[1], tr[2], tr[3], tr[4], tr[5] + tr[0] = trToCompose[0]*tr0 + trToCompose[1]*tr2 + tr[1] = trToCompose[1]*tr3 + trToCompose[0]*tr1 + tr[2] = trToCompose[2]*tr0 + trToCompose[3]*tr2 + tr[3] = trToCompose[3]*tr3 + trToCompose[2]*tr1 + tr[4] = trToCompose[4]*tr0 + trToCompose[5]*tr2 + tr4 + tr[5] = trToCompose[5]*tr3 + trToCompose[4]*tr1 + tr5 +} + +// Scale adds a scale to the matrix +func (tr *Matrix) Scale(sx, sy float64) { + tr[0] = sx * tr[0] + tr[1] = sx * tr[1] + tr[2] = sy * tr[2] + tr[3] = sy * tr[3] +} + +// Translate adds a translation to the matrix +func (tr *Matrix) Translate(tx, ty float64) { + tr[4] = tx*tr[0] + ty*tr[2] + tr[4] + tr[5] = ty*tr[3] + tx*tr[1] + tr[5] +} + +// Rotate adds a rotation to the matrix. angle is in radian +func (tr *Matrix) Rotate(angle float64) { + c := math.Cos(angle) + s := math.Sin(angle) + t0 := c*tr[0] + s*tr[2] + t1 := s*tr[3] + c*tr[1] + t2 := c*tr[2] - s*tr[0] + t3 := c*tr[3] - s*tr[1] + tr[0] = t0 + tr[1] = t1 + tr[2] = t2 + tr[3] = t3 +} + +// GetTranslation +func (tr Matrix) GetTranslation() (x, y float64) { + return tr[4], tr[5] +} + +// GetScaling +func (tr Matrix) GetScaling() (x, y float64) { + return tr[0], tr[3] +} + +// GetScale computes a scale for the matrix +func (tr Matrix) GetScale() float64 { + x := 0.707106781*tr[0] + 0.707106781*tr[1] + y := 0.707106781*tr[2] + 0.707106781*tr[3] + return math.Sqrt(x*x + y*y) +} + +// ******************** Testing ******************** + +// Equals tests if a two transformation are equal. A tolerance is applied when comparing matrix elements. +func (tr1 Matrix) Equals(tr2 Matrix) bool { + for i := 0; i < 6; i = i + 1 { + if !fequals(tr1[i], tr2[i]) { + return false + } + } + return true +} + +// IsIdentity tests if a transformation is the identity transformation. A tolerance is applied when comparing matrix elements. +func (tr Matrix) IsIdentity() bool { + return fequals(tr[4], 0) && fequals(tr[5], 0) && tr.IsTranslation() +} + +// IsTranslation tests if a transformation is is a pure translation. A tolerance is applied when comparing matrix elements. +func (tr Matrix) IsTranslation() bool { + return fequals(tr[0], 1) && fequals(tr[1], 0) && fequals(tr[2], 0) && fequals(tr[3], 1) +} + +// fequals compares two floats. return true if the distance between the two floats is less than epsilon, false otherwise +func fequals(float1, float2 float64) bool { + return math.Abs(float1-float2) <= epsilon +} diff --git a/_third_party/github.com/llgcode/draw2d/path.go b/_third_party/github.com/llgcode/draw2d/path.go new file mode 100644 index 0000000000..57ba3ffb33 --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/path.go @@ -0,0 +1,189 @@ +// Copyright 2010 The draw2d Authors. All rights reserved. +// created: 21/11/2010 by Laurent Le Goff + +package draw2d + +import ( + "fmt" + "math" +) + +// PathBuilder describes the interface for path drawing. +type PathBuilder interface { + // LastPoint returns the current point of the current sub path + LastPoint() (x, y float64) + // MoveTo creates a new subpath that start at the specified point + MoveTo(x, y float64) + // LineTo adds a line to the current subpath + LineTo(x, y float64) + // QuadCurveTo adds a quadratic Bézier curve to the current subpath + QuadCurveTo(cx, cy, x, y float64) + // CubicCurveTo adds a cubic Bézier curve to the current subpath + CubicCurveTo(cx1, cy1, cx2, cy2, x, y float64) + // ArcTo adds an arc to the current subpath + ArcTo(cx, cy, rx, ry, startAngle, angle float64) + // Close creates a line from the current point to the last MoveTo + // point (if not the same) and mark the path as closed so the + // first and last lines join nicely. + Close() +} + +// PathCmp represents component of a path +type PathCmp int + +const ( + // MoveToCmp is a MoveTo component in a Path + MoveToCmp PathCmp = iota + // LineToCmp is a LineTo component in a Path + LineToCmp + // QuadCurveToCmp is a QuadCurveTo component in a Path + QuadCurveToCmp + // CubicCurveToCmp is a CubicCurveTo component in a Path + CubicCurveToCmp + // ArcToCmp is a ArcTo component in a Path + ArcToCmp + // CloseCmp is a ArcTo component in a Path + CloseCmp +) + +// Path stores points +type Path struct { + // Components is a slice of PathCmp in a Path and mark the role of each points in the Path + Components []PathCmp + // Points are combined with Components to have a specific role in the path + Points []float64 + // Last Point of the Path + x, y float64 +} + +func (p *Path) appendToPath(cmd PathCmp, points ...float64) { + p.Components = append(p.Components, cmd) + p.Points = append(p.Points, points...) +} + +// LastPoint returns the current point of the current path +func (p *Path) LastPoint() (x, y float64) { + return p.x, p.y +} + +// MoveTo starts a new path at (x, y) position +func (p *Path) MoveTo(x, y float64) { + p.appendToPath(MoveToCmp, x, y) + p.x = x + p.y = y +} + +// LineTo adds a line to the current path +func (p *Path) LineTo(x, y float64) { + if len(p.Components) == 0 { //special case when no move has been done + p.MoveTo(0, 0) + } + p.appendToPath(LineToCmp, x, y) + p.x = x + p.y = y +} + +// QuadCurveTo adds a quadratic bezier curve to the current path +func (p *Path) QuadCurveTo(cx, cy, x, y float64) { + if len(p.Components) == 0 { //special case when no move has been done + p.MoveTo(0, 0) + } + p.appendToPath(QuadCurveToCmp, cx, cy, x, y) + p.x = x + p.y = y +} + +// CubicCurveTo adds a cubic bezier curve to the current path +func (p *Path) CubicCurveTo(cx1, cy1, cx2, cy2, x, y float64) { + if len(p.Components) == 0 { //special case when no move has been done + p.MoveTo(0, 0) + } + p.appendToPath(CubicCurveToCmp, cx1, cy1, cx2, cy2, x, y) + p.x = x + p.y = y +} + +// ArcTo adds an arc to the path +func (p *Path) ArcTo(cx, cy, rx, ry, startAngle, angle float64) { + endAngle := startAngle + angle + clockWise := true + if angle < 0 { + clockWise = false + } + // normalize + if clockWise { + for endAngle < startAngle { + endAngle += math.Pi * 2.0 + } + } else { + for startAngle < endAngle { + startAngle += math.Pi * 2.0 + } + } + startX := cx + math.Cos(startAngle)*rx + startY := cy + math.Sin(startAngle)*ry + if len(p.Components) > 0 { + p.LineTo(startX, startY) + } else { + p.MoveTo(startX, startY) + } + p.appendToPath(ArcToCmp, cx, cy, rx, ry, startAngle, angle) + p.x = cx + math.Cos(endAngle)*rx + p.y = cy + math.Sin(endAngle)*ry +} + +// Close closes the current path +func (p *Path) Close() { + p.appendToPath(CloseCmp) +} + +// Copy make a clone of the current path and return it +func (p *Path) Copy() (dest *Path) { + dest = new(Path) + dest.Components = make([]PathCmp, len(p.Components)) + copy(dest.Components, p.Components) + dest.Points = make([]float64, len(p.Points)) + copy(dest.Points, p.Points) + dest.x, dest.y = p.x, p.y + return dest +} + +// Clear reset the path +func (p *Path) Clear() { + p.Components = p.Components[0:0] + p.Points = p.Points[0:0] + return +} + +// IsEmpty returns true if the path is empty +func (p *Path) IsEmpty() bool { + return len(p.Components) == 0 +} + +// String returns a debug text view of the path +func (p *Path) String() string { + s := "" + j := 0 + for _, cmd := range p.Components { + switch cmd { + case MoveToCmp: + s += fmt.Sprintf("MoveTo: %f, %f\n", p.Points[j], p.Points[j+1]) + j = j + 2 + case LineToCmp: + s += fmt.Sprintf("LineTo: %f, %f\n", p.Points[j], p.Points[j+1]) + j = j + 2 + case QuadCurveToCmp: + s += fmt.Sprintf("QuadCurveTo: %f, %f, %f, %f\n", p.Points[j], p.Points[j+1], p.Points[j+2], p.Points[j+3]) + j = j + 4 + case CubicCurveToCmp: + s += fmt.Sprintf("CubicCurveTo: %f, %f, %f, %f, %f, %f\n", p.Points[j], p.Points[j+1], p.Points[j+2], p.Points[j+3], p.Points[j+4], p.Points[j+5]) + j = j + 6 + case ArcToCmp: + s += fmt.Sprintf("ArcTo: %f, %f, %f, %f, %f, %f\n", p.Points[j], p.Points[j+1], p.Points[j+2], p.Points[j+3], p.Points[j+4], p.Points[j+5]) + j = j + 6 + case CloseCmp: + s += "Close\n" + } + } + return s +} diff --git a/_third_party/github.com/llgcode/draw2d/samples_test.go b/_third_party/github.com/llgcode/draw2d/samples_test.go new file mode 100644 index 0000000000..903ccfcea1 --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/samples_test.go @@ -0,0 +1,60 @@ +// See also test_test.go + +package draw2d_test + +import ( + "testing" + + "bosun.org/_third_party/github.com/llgcode/draw2d" + "bosun.org/_third_party/github.com/llgcode/draw2d/samples/android" + "bosun.org/_third_party/github.com/llgcode/draw2d/samples/frameimage" + "bosun.org/_third_party/github.com/llgcode/draw2d/samples/geometry" + "bosun.org/_third_party/github.com/llgcode/draw2d/samples/gopher" + "bosun.org/_third_party/github.com/llgcode/draw2d/samples/gopher2" + "bosun.org/_third_party/github.com/llgcode/draw2d/samples/helloworld" + "bosun.org/_third_party/github.com/llgcode/draw2d/samples/line" + "bosun.org/_third_party/github.com/llgcode/draw2d/samples/linecapjoin" + "bosun.org/_third_party/github.com/llgcode/draw2d/samples/postscript" +) + +func TestSampleAndroid(t *testing.T) { + test(t, android.Main) +} + +func TestSampleGeometry(t *testing.T) { + // Set the global folder for searching fonts + // The pdf backend needs for every ttf file its corresponding + // json/.z file which is generated by gofpdf/makefont. + draw2d.SetFontFolder("resource/font") + test(t, geometry.Main) +} + +func TestSampleGopher(t *testing.T) { + test(t, gopher.Main) +} + +func TestSampleGopher2(t *testing.T) { + test(t, gopher2.Main) +} + +func TestSampleHelloWorld(t *testing.T) { + // Set the global folder for searching fonts + draw2d.SetFontFolder("resource/font") + test(t, helloworld.Main) +} + +func TestSampleFrameImage(t *testing.T) { + test(t, frameimage.Main) +} + +func TestSampleLine(t *testing.T) { + test(t, line.Main) +} + +func TestSampleLineCapJoin(t *testing.T) { + test(t, linecapjoin.Main) +} + +func TestSamplePostscript(t *testing.T) { + test(t, postscript.Main) +} diff --git a/_third_party/github.com/llgcode/draw2d/test b/_third_party/github.com/llgcode/draw2d/test new file mode 100755 index 0000000000..2a387e6804 --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/test @@ -0,0 +1,8 @@ +echo golint +golint ./... | grep "draw2dpdf\|samples\|^advanced_path\|^arc\|draw2d[.]\|fileutil\|^gc\|math\|^path[.]\|rgba_interpolation\|test\|vertex2d" +echo +echo go vet +go vet ./... +echo +echo go test +go test -cover ./... | grep -v "no test" \ No newline at end of file diff --git a/_third_party/github.com/llgcode/draw2d/test_test.go b/_third_party/github.com/llgcode/draw2d/test_test.go new file mode 100644 index 0000000000..654b5760e6 --- /dev/null +++ b/_third_party/github.com/llgcode/draw2d/test_test.go @@ -0,0 +1,30 @@ +// Package draw2d_test gives test coverage with the command: +// go test -cover ./... | grep -v "no test" +package draw2d_test + +import ( + "image" + "testing" + + "bosun.org/_third_party/github.com/llgcode/draw2d" + "bosun.org/_third_party/github.com/llgcode/draw2d/draw2dimg" +) + +type sample func(gc draw2d.GraphicContext, ext string) (string, error) + +func test(t *testing.T, draw sample) { + // Initialize the graphic context on an RGBA image + dest := image.NewRGBA(image.Rect(0, 0, 297, 210.0)) + gc := draw2dimg.NewGraphicContext(dest) + // Draw Android logo + output, err := draw(gc, "png") + if err != nil { + t.Errorf("Drawing %q failed: %v", output, err) + return + } + // Save to png + err = draw2dimg.SaveToPngFile(output, dest) + if err != nil { + t.Errorf("Saving %q failed: %v", output, err) + } +} diff --git a/_third_party/github.com/olivere/elastic/CONTRIBUTORS b/_third_party/github.com/olivere/elastic/CONTRIBUTORS index aa4caad6bc..268b4ac3b3 100644 --- a/_third_party/github.com/olivere/elastic/CONTRIBUTORS +++ b/_third_party/github.com/olivere/elastic/CONTRIBUTORS @@ -9,11 +9,15 @@ Alexey Sharov [@nizsheanez](https://github.com/nizsheanez) Conrad Pankoff [@deoxxa](https://github.com/deoxxa) Corey Scott [@corsc](https://github.com/corsc) +Gerhard Häring [@ghaering](https://github.com/ghaering) +Guilherme Silveira [@guilherme-santos](https://github.com/guilherme-santos) Jack Lindamood [@cep21](https://github.com/cep21) Junpei Tsuji [@jun06t](https://github.com/jun06t) Maciej Lisiewski [@c2h5oh](https://github.com/c2h5oh) Mara Kim [@autochthe](https://github.com/autochthe) Medhi Bechina [@mdzor](https://github.com/mdzor) Nicholas Wolff [@nwolff](https://github.com/nwolff) +Orne Brocaar [@brocaar](https://github.com/brocaar) Sacheendra talluri [@sacheendra](https://github.com/sacheendra) Sean DuBois [@Sean-Der](https://github.com/Sean-Der) +zakthomas [@zakthomas](https://github.com/zakthomas) diff --git a/_third_party/github.com/olivere/elastic/README.md b/_third_party/github.com/olivere/elastic/README.md index d3afead3f3..2365b8ca9e 100644 --- a/_third_party/github.com/olivere/elastic/README.md +++ b/_third_party/github.com/olivere/elastic/README.md @@ -16,15 +16,15 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for additional informati Here's the version matrix: -Elasticsearch version | Elastic version | Package URL -----------------------|-----------------|------------ -2.x | 3.0 | *not published yet* ([source](https://github.com/olivere/elastic/tree/release-branch.v3)) -1.x | 2.0 | [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2) ([source](https://github.com/olivere/elastic/tree/release-branch.v2) [doc](http://godoc.org/gopkg.in/olivere/elastic.v2)) -0.9-1.3 | 1.0 | [`gopkg.in/olivere/elastic.v1`](https://gopkg.in/olivere/elastic.v1) ([source](https://github.com/olivere/elastic/tree/release-branch.v1) [doc](http://godoc.org/gopkg.in/olivere/elastic.v1)) +Elasticsearch version | Elastic version -| Package URL +----------------------|------------------|------------ +2.x | 3.0 **beta** | [`gopkg.in/olivere/elastic.v3-unstable`](https://gopkg.in/olivere/elastic.v3-unstable) ([source](https://github.com/olivere/elastic/tree/release-branch.v3) [doc](http://godoc.org/gopkg.in/olivere/elastic.v3-unstable)) +1.x | 2.0 | [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2) ([source](https://github.com/olivere/elastic/tree/release-branch.v2) [doc](http://godoc.org/gopkg.in/olivere/elastic.v2)) +0.9-1.3 | 1.0 | [`gopkg.in/olivere/elastic.v1`](https://gopkg.in/olivere/elastic.v1) ([source](https://github.com/olivere/elastic/tree/release-branch.v1) [doc](http://godoc.org/gopkg.in/olivere/elastic.v1)) **Example:** -You have Elasticsearch 1.6.0 installed and want to use Elastic. As listed above, you should use Elastic 2.0. So you first install Elastic 2.0. +You have Elasticsearch 1.7.1 installed and want to use Elastic. As listed above, you should use Elastic 2.0. So you first install Elastic 2.0. ```sh $ go get gopkg.in/olivere/elastic.v2 @@ -311,7 +311,7 @@ on the command line. - [x] `terms` - [ ] `top_children` - [x] `wildcard` -- [ ] `minimum_should_match` +- [x] `minimum_should_match` - [ ] `multi_term_query_rewrite` - [x] `template_query` diff --git a/_third_party/github.com/olivere/elastic/client.go b/_third_party/github.com/olivere/elastic/client.go index e81dff3247..d8bde4cdee 100644 --- a/_third_party/github.com/olivere/elastic/client.go +++ b/_third_party/github.com/olivere/elastic/client.go @@ -22,7 +22,7 @@ import ( const ( // Version is the current version of Elastic. - Version = "2.0.0" + Version = "2.0.11" // DefaultUrl is the default endpoint of Elasticsearch on the local machine. // It is used e.g. when initializing a new Client without a specific URL. @@ -196,6 +196,11 @@ func NewClient(options ...ClientOptionFunc) (*Client, error) { } c.urls = canonicalize(c.urls...) + // Check if we can make a request to any of the specified URLs + if err := c.startupHealthcheck(c.healthcheckTimeoutStartup); err != nil { + return nil, err + } + if c.snifferEnabled { // Sniff the cluster initially if err := c.sniff(c.snifferTimeoutStartup); err != nil { @@ -694,7 +699,7 @@ func (c *Client) healthchecker() { // healthcheck does a health check on all nodes in the cluster. Depending on // the node state, it marks connections as dead, sets them alive etc. -// If healthchecks are disabled and force is false, this is a no-op. +// If healthchecks are disabled this is a no-op. // The timeout specifies how long to wait for a response from Elasticsearch. func (c *Client) healthcheck(timeout time.Duration, force bool) { c.mu.RLock() @@ -737,6 +742,31 @@ func (c *Client) healthcheck(timeout time.Duration, force bool) { } } +// startupHealthcheck is used at startup to check if the server is available +// at all. +func (c *Client) startupHealthcheck(timeout time.Duration) error { + c.mu.Lock() + urls := c.urls + c.mu.Unlock() + + // If we don't get a connection after "timeout", we bail. + start := time.Now() + for { + cl := &http.Client{Timeout: timeout} + for _, url := range urls { + res, err := cl.Head(url) + if err == nil && res != nil && res.StatusCode >= 200 && res.StatusCode < 300 { + return nil + } + } + time.Sleep(1 * time.Second) + if time.Now().Sub(start) > timeout { + break + } + } + return ErrNoClient +} + // next returns the next available connection, or ErrNoClient. func (c *Client) next() (*conn, error) { // We do round-robin here. diff --git a/_third_party/github.com/olivere/elastic/client_test.go b/_third_party/github.com/olivere/elastic/client_test.go index 05d464a944..705a48223a 100644 --- a/_third_party/github.com/olivere/elastic/client_test.go +++ b/_third_party/github.com/olivere/elastic/client_test.go @@ -138,10 +138,7 @@ func TestClientSniffDisabled(t *testing.T) { } // Make two requests, so that both connections are being used for i := 0; i < len(client.conns); i++ { - _, err = client.Flush().Do() - if err != nil { - t.Fatal(err) - } + client.Flush().Do() } // The first connection (localhost:9200) should now be okay. if i, found := findConn("http://localhost:9200", client.conns...); !found { @@ -161,6 +158,18 @@ func TestClientSniffDisabled(t *testing.T) { } } +func TestClientHealthcheckStartupTimeout(t *testing.T) { + start := time.Now() + _, err := NewClient(SetURL("http://localhost:9299"), SetHealthcheckTimeoutStartup(5*time.Second)) + duration := time.Now().Sub(start) + if err != ErrNoClient { + t.Fatal(err) + } + if duration < 5*time.Second { + t.Fatalf("expected a timeout in more than 5 seconds; got: %v", duration) + } +} + // -- Start and stop -- func TestClientStartAndStop(t *testing.T) { diff --git a/_third_party/github.com/olivere/elastic/errors.go b/_third_party/github.com/olivere/elastic/errors.go index ce2347ab93..abbb09c6cd 100644 --- a/_third_party/github.com/olivere/elastic/errors.go +++ b/_third_party/github.com/olivere/elastic/errors.go @@ -37,13 +37,16 @@ func checkResponse(res *http.Response) error { } errReply := new(Error) err = json.Unmarshal(slurp, errReply) - if err == nil && errReply != nil { + if err != nil { + return fmt.Errorf("elastic: Error %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) + } + if errReply != nil { if errReply.Status == 0 { errReply.Status = res.StatusCode } return errReply } - return nil + return fmt.Errorf("elastic: Error %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) } type Error struct { diff --git a/_third_party/github.com/olivere/elastic/errors_test.go b/_third_party/github.com/olivere/elastic/errors_test.go index ed12cb9f2b..553288d56c 100644 --- a/_third_party/github.com/olivere/elastic/errors_test.go +++ b/_third_party/github.com/olivere/elastic/errors_test.go @@ -43,3 +43,32 @@ func TestResponseError(t *testing.T) { t.Fatalf("expected error message %q; got: %q", message, e.Message) } } + +func TestResponseErrorHTML(t *testing.T) { + raw := "HTTP/1.1 413 Request Entity Too Large\r\n" + + "\r\n" + + ` +413 Request Entity Too Large + +

413 Request Entity Too Large

+
nginx/1.6.2
+ +` + "\r\n" + r := bufio.NewReader(strings.NewReader(raw)) + + resp, err := http.ReadResponse(r, nil) + if err != nil { + t.Fatal(err) + } + err = checkResponse(resp) + if err == nil { + t.Fatalf("expected error; got: %v", err) + } + + // Check for correct error message + expected := fmt.Sprintf("elastic: Error %d (%s)", http.StatusRequestEntityTooLarge, http.StatusText(http.StatusRequestEntityTooLarge)) + got := err.Error() + if got != expected { + t.Fatalf("expected %q; got: %q", expected, got) + } +} diff --git a/_third_party/github.com/olivere/elastic/exists.go b/_third_party/github.com/olivere/elastic/exists.go index efd852a69a..c9cd213b08 100644 --- a/_third_party/github.com/olivere/elastic/exists.go +++ b/_third_party/github.com/olivere/elastic/exists.go @@ -6,66 +6,171 @@ package elastic import ( "fmt" + "net/http" + "net/url" "bosun.org/_third_party/github.com/olivere/elastic/uritemplates" ) +// ExistsService checks if a document exists. +// +// See http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html +// for details. type ExistsService struct { - client *Client - index string - _type string - id string + client *Client + pretty bool + id string + index string + typ string + parent string + preference string + realtime *bool + refresh *bool + routing string } +// NewExistsService creates a new ExistsService. func NewExistsService(client *Client) *ExistsService { - builder := &ExistsService{ + return &ExistsService{ client: client, } - return builder } -func (s *ExistsService) String() string { - return fmt.Sprintf("exists([%v][%v][%v])", - s.index, - s._type, - s.id) +// Id is the document ID. +func (s *ExistsService) Id(id string) *ExistsService { + s.id = id + return s } +// Index is the name of the index. func (s *ExistsService) Index(index string) *ExistsService { s.index = index return s } -func (s *ExistsService) Type(_type string) *ExistsService { - s._type = _type +// Type is the type of the document (use `_all` to fetch the first +// document matching the ID across all types). +func (s *ExistsService) Type(typ string) *ExistsService { + s.typ = typ return s } -func (s *ExistsService) Id(id string) *ExistsService { - s.id = id +// Parent is the ID of the parent document. +func (s *ExistsService) Parent(parent string) *ExistsService { + s.parent = parent return s } -func (s *ExistsService) Do() (bool, error) { - // Build url +// Preference specifies the node or shard the operation should be +// performed on (default: random). +func (s *ExistsService) Preference(preference string) *ExistsService { + s.preference = preference + return s +} + +// Realtime specifies whether to perform the operation in realtime or search mode. +func (s *ExistsService) Realtime(realtime bool) *ExistsService { + s.realtime = &realtime + return s +} + +// Refresh the shard containing the document before performing the operation. +func (s *ExistsService) Refresh(refresh bool) *ExistsService { + s.refresh = &refresh + return s +} + +// Routing is the specific routing value. +func (s *ExistsService) Routing(routing string) *ExistsService { + s.routing = routing + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ExistsService) Pretty(pretty bool) *ExistsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ExistsService) buildURL() (string, url.Values, error) { + // Build URL path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ - "index": s.index, - "type": s._type, "id": s.id, + "index": s.index, + "type": s.typ, }) if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.realtime != nil { + params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.refresh != nil { + params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ExistsService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *ExistsService) Do() (bool, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { return false, err } - // Get response - res, err := s.client.PerformRequest("HEAD", path, nil, nil) + // Get URL for request + path, params, err := s.buildURL() if err != nil { return false, err } - if res.StatusCode == 200 { + + // Get HTTP response + res, err := s.client.PerformRequest("HEAD", path, params, nil) + if err != nil { + return false, err + } + + // Evaluate operation response + switch res.StatusCode { + case http.StatusOK: return true, nil - } else if res.StatusCode == 404 { + case http.StatusNotFound: return false, nil + default: + return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) } - return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) } diff --git a/_third_party/github.com/olivere/elastic/index_test.go b/_third_party/github.com/olivere/elastic/index_test.go index 40a5038728..187eab1ac0 100644 --- a/_third_party/github.com/olivere/elastic/index_test.go +++ b/_third_party/github.com/olivere/elastic/index_test.go @@ -7,6 +7,7 @@ package elastic import ( "encoding/json" "fmt" + "log" "os" "testing" "time" @@ -138,6 +139,10 @@ func setupTestClientAndCreateIndex(t logger, options ...ClientOptionFunc) *Clien return client } +func setupTestClientAndCreateIndexAndLog(t logger, options ...ClientOptionFunc) *Client { + return setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0))) +} + func setupTestClientAndCreateIndexAndAddDocs(t logger, options ...ClientOptionFunc) *Client { client := setupTestClientAndCreateIndex(t, options...) diff --git a/_third_party/github.com/olivere/elastic/reindexer.go b/_third_party/github.com/olivere/elastic/reindexer.go index e36c65633b..5810f19996 100644 --- a/_third_party/github.com/olivere/elastic/reindexer.go +++ b/_third_party/github.com/olivere/elastic/reindexer.go @@ -36,6 +36,7 @@ type Reindexer struct { query Query scanFields []string bulkSize int + size int scroll string reindexerFunc ReindexerFunc progress ReindexerProgressFunc @@ -116,8 +117,16 @@ func (ix *Reindexer) ScanFields(scanFields ...string) *Reindexer { // BulkSize returns the number of documents to send to Elasticsearch per chunk. // The default is 500. -func (ix *Reindexer) BulkSize(size int) *Reindexer { - ix.bulkSize = size +func (ix *Reindexer) BulkSize(bulkSize int) *Reindexer { + ix.bulkSize = bulkSize + return ix +} + +// Size is the number of results to return per shard, not per request. +// So a size of 10 which hits 5 shards will return a maximum of 50 results +// per scan request. +func (ix *Reindexer) Size(size int) *Reindexer { + ix.size = size return ix } @@ -179,6 +188,9 @@ func (ix *Reindexer) Do() (*ReindexerResponse, error) { if ix.query != nil { scanner = scanner.Query(ix.query) } + if ix.size > 0 { + scanner = scanner.Size(ix.size) + } cursor, err := scanner.Do() bulk := ix.targetClient.Bulk() diff --git a/_third_party/github.com/olivere/elastic/reindexer_test.go b/_third_party/github.com/olivere/elastic/reindexer_test.go index 26efe487d7..82f839e63b 100644 --- a/_third_party/github.com/olivere/elastic/reindexer_test.go +++ b/_third_party/github.com/olivere/elastic/reindexer_test.go @@ -3,6 +3,7 @@ package elastic import ( "encoding/json" "testing" + "time" ) func TestReindexer(t *testing.T) { @@ -169,6 +170,9 @@ func TestReindexerWithTargetClient(t *testing.T) { t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) } + // Timing issue with the target client + time.Sleep(2 * time.Second) + targetCount, err := targetClient.Count(testIndexName2).Do() if err != nil { t.Fatal(err) diff --git a/_third_party/github.com/olivere/elastic/scan.go b/_third_party/github.com/olivere/elastic/scan.go index e1a20e755c..fb417ff34d 100644 --- a/_third_party/github.com/olivere/elastic/scan.go +++ b/_third_party/github.com/olivere/elastic/scan.go @@ -28,25 +28,28 @@ var ( // ScanService manages a cursor through documents in Elasticsearch. type ScanService struct { - client *Client - indices []string - types []string - keepAlive string - fields []string - query Query - sorts []SortInfo - size *int - pretty bool + client *Client + indices []string + types []string + keepAlive string + searchSource *SearchSource + pretty bool + routing string + preference string + size *int } +// NewScanService creates a new service to iterate through the results +// of a query. func NewScanService(client *Client) *ScanService { builder := &ScanService{ - client: client, - query: NewMatchAllQuery(), + client: client, + searchSource: NewSearchSource().Query(NewMatchAllQuery()), } return builder } +// Index sets the name of the index to use for scan. func (s *ScanService) Index(index string) *ScanService { if s.indices == nil { s.indices = make([]string, 0) @@ -55,6 +58,7 @@ func (s *ScanService) Index(index string) *ScanService { return s } +// Indices sets the names of the indices to use for scan. func (s *ScanService) Indices(indices ...string) *ScanService { if s.indices == nil { s.indices = make([]string, 0) @@ -63,6 +67,7 @@ func (s *ScanService) Indices(indices ...string) *ScanService { return s } +// Type restricts the scan to the given type. func (s *ScanService) Type(typ string) *ScanService { if s.types == nil { s.types = make([]string, 0) @@ -71,6 +76,7 @@ func (s *ScanService) Type(typ string) *ScanService { return s } +// Types allows to restrict the scan to a list of types. func (s *ScanService) Types(types ...string) *ScanService { if s.types == nil { s.types = make([]string, 0) @@ -93,15 +99,67 @@ func (s *ScanService) KeepAlive(keepAlive string) *ScanService { return s } -// Fields specifies the fields the scan query should load. -// By default fields is nil so _source is loaded +// Fields tells Elasticsearch to only load specific fields from a search hit. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-fields.html. func (s *ScanService) Fields(fields ...string) *ScanService { - s.fields = fields + s.searchSource = s.searchSource.Fields(fields...) return s } +// SearchSource sets the search source builder to use with this service. +func (s *ScanService) SearchSource(searchSource *SearchSource) *ScanService { + s.searchSource = searchSource + if s.searchSource == nil { + s.searchSource = NewSearchSource().Query(NewMatchAllQuery()) + } + return s +} + +// Routing allows for (a comma-separated) list of specific routing values. +func (s *ScanService) Routing(routings ...string) *ScanService { + s.routing = strings.Join(routings, ",") + return s +} + +// Preference specifies the node or shard the operation should be +// performed on (default: "random"). +func (s *ScanService) Preference(preference string) *ScanService { + s.preference = preference + return s +} + +// Query sets the query to perform, e.g. MatchAllQuery. func (s *ScanService) Query(query Query) *ScanService { - s.query = query + s.searchSource = s.searchSource.Query(query) + return s +} + +// PostFilter is executed as the last filter. It only affects the +// search hits but not facets. See +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-post-filter.html +// for details. +func (s *ScanService) PostFilter(postFilter Filter) *ScanService { + s.searchSource = s.searchSource.PostFilter(postFilter) + return s +} + +// FetchSource indicates whether the response should contain the stored +// _source for every hit. +func (s *ScanService) FetchSource(fetchSource bool) *ScanService { + s.searchSource = s.searchSource.FetchSource(fetchSource) + return s +} + +// FetchSourceContext indicates how the _source should be fetched. +func (s *ScanService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *ScanService { + s.searchSource = s.searchSource.FetchSourceContext(fetchSourceContext) + return s +} + +// Version can be set to true to return a version for each search hit. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-version.html. +func (s *ScanService) Version(version bool) *ScanService { + s.searchSource = s.searchSource.Version(version) return s } @@ -110,7 +168,7 @@ func (s *ScanService) Query(query Query) *ScanService { // See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html // for detailed documentation of sorting. func (s *ScanService) Sort(field string, ascending bool) *ScanService { - s.sorts = append(s.sorts, SortInfo{Field: field, Ascending: ascending}) + s.searchSource = s.searchSource.Sort(field, ascending) return s } @@ -119,20 +177,34 @@ func (s *ScanService) Sort(field string, ascending bool) *ScanService { // See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html // for detailed documentation of sorting. func (s *ScanService) SortWithInfo(info SortInfo) *ScanService { - s.sorts = append(s.sorts, info) + s.searchSource = s.searchSource.SortWithInfo(info) + return s +} + +// SortBy defines how to sort results. +// Use the Sort func for a shortcut. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html +// for detailed documentation of sorting. +func (s *ScanService) SortBy(sorter ...Sorter) *ScanService { + s.searchSource = s.searchSource.SortBy(sorter...) return s } +// Pretty enables the caller to indent the JSON output. func (s *ScanService) Pretty(pretty bool) *ScanService { s.pretty = pretty return s } +// Size is the number of results to return per shard, not per request. +// So a size of 10 which hits 5 shards will return a maximum of 50 results +// per scan request. func (s *ScanService) Size(size int) *ScanService { s.size = &size return s } +// Do executes the query and returns a "server-side cursor". func (s *ScanService) Do() (*ScanCursor, error) { // Build url path := "/" @@ -172,7 +244,7 @@ func (s *ScanService) Do() (*ScanCursor, error) { // Parameters params := make(url.Values) - if len(s.sorts) == 0 { + if !s.searchSource.hasSort() { params.Set("search_type", "scan") } if s.pretty { @@ -186,24 +258,12 @@ func (s *ScanService) Do() (*ScanCursor, error) { if s.size != nil && *s.size > 0 { params.Set("size", fmt.Sprintf("%d", *s.size)) } - if s.fields != nil { - params.Set("fields", strings.Join(s.fields, ",")) - } - - // Set body - body := make(map[string]interface{}) - if s.query != nil { - body["query"] = s.query.Source() - } - if len(s.sorts) > 0 { - sortarr := make([]interface{}, 0) - for _, sort := range s.sorts { - sortarr = append(sortarr, sort.Source()) - } - body["sort"] = sortarr + if s.routing != "" { + params.Set("routing", s.routing) } // Get response + body := s.searchSource.Source() res, err := s.client.PerformRequest("POST", path, params, body) if err != nil { return nil, err @@ -302,6 +362,7 @@ func (c *ScanCursor) Next() (*SearchResult, error) { } // Return result + c.Results = &SearchResult{ScrollId: body} if err := json.Unmarshal(res.Body, c.Results); err != nil { return nil, err } diff --git a/_third_party/github.com/olivere/elastic/scan_test.go b/_third_party/github.com/olivere/elastic/scan_test.go index 76737d7510..7b1bca7407 100644 --- a/_third_party/github.com/olivere/elastic/scan_test.go +++ b/_third_party/github.com/olivere/elastic/scan_test.go @@ -45,16 +45,16 @@ func TestScan(t *testing.T) { } if cursor.Results == nil { - t.Errorf("expected results != nil; got nil") + t.Fatalf("expected results != nil; got nil") } if cursor.Results.Hits == nil { - t.Errorf("expected results.Hits != nil; got nil") + t.Fatalf("expected results.Hits != nil; got nil") } if cursor.Results.Hits.TotalHits != 3 { - t.Errorf("expected results.Hits.TotalHits = %d; got %d", 3, cursor.Results.Hits.TotalHits) + t.Fatalf("expected results.Hits.TotalHits = %d; got %d", 3, cursor.Results.Hits.TotalHits) } if len(cursor.Results.Hits.Hits) != 0 { - t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 0, len(cursor.Results.Hits.Hits)) + t.Fatalf("expected len(results.Hits.Hits) = %d; got %d", 0, len(cursor.Results.Hits.Hits)) } pages := 0 @@ -73,7 +73,7 @@ func TestScan(t *testing.T) { for _, hit := range searchResult.Hits.Hits { if hit.Index != testIndexName { - t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) } item := make(map[string]interface{}) err := json.Unmarshal(*hit.Source, &item) @@ -129,20 +129,20 @@ func TestScanWithSort(t *testing.T) { } if cursor.Results == nil { - t.Errorf("expected results != nil; got nil") + t.Fatalf("expected results != nil; got nil") } if cursor.Results.Hits == nil { - t.Errorf("expected results.Hits != nil; got nil") + t.Fatalf("expected results.Hits != nil; got nil") } if cursor.Results.Hits.TotalHits != 3 { - t.Errorf("expected results.Hits.TotalHits = %d; got %d", 3, cursor.Results.Hits.TotalHits) + t.Fatalf("expected results.Hits.TotalHits = %d; got %d", 3, cursor.Results.Hits.TotalHits) } if len(cursor.Results.Hits.Hits) != 1 { - t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 1, len(cursor.Results.Hits.Hits)) + t.Fatalf("expected len(results.Hits.Hits) = %d; got %d", 1, len(cursor.Results.Hits.Hits)) } if cursor.Results.Hits.Hits[0].Id != "3" { - t.Errorf("expected hitID = %d; got %d", "3", cursor.Results.Hits.Hits[0].Id) + t.Fatalf("expected hitID = %v; got %v", "3", cursor.Results.Hits.Hits[0].Id) } @@ -162,7 +162,7 @@ func TestScanWithSort(t *testing.T) { for _, hit := range searchResult.Hits.Hits { if hit.Index != testIndexName { - t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) } item := make(map[string]interface{}) err := json.Unmarshal(*hit.Source, &item) @@ -182,6 +182,92 @@ func TestScanWithSort(t *testing.T) { } } +func TestScanWithSearchSource(t *testing.T) { + //client := setupTestClientAndCreateIndexAndLog(t) + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch.", Retweets: 4} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic.", Retweets: 10} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun.", Retweets: 3} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + src := NewSearchSource(). + Query(NewTermQuery("user", "olivere")). + FetchSourceContext(NewFetchSourceContext(true).Include("retweets")) + cursor, err := client.Scan(testIndexName).SearchSource(src).Size(1).Do() + if err != nil { + t.Fatal(err) + } + + if cursor.Results == nil { + t.Fatalf("expected results != nil; got nil") + } + if cursor.Results.Hits == nil { + t.Fatalf("expected results.Hits != nil; got nil") + } + if cursor.Results.Hits.TotalHits != 2 { + t.Fatalf("expected results.Hits.TotalHits = %d; got %d", 2, cursor.Results.Hits.TotalHits) + } + + numDocs := 0 + pages := 0 + + for { + searchResult, err := cursor.Next() + if err == EOS { + break + } + if err != nil { + t.Fatal(err) + } + + pages += 1 + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + if _, found := item["message"]; found { + t.Fatalf("expected to not see field %q; got: %#v", "message", item) + } + numDocs += 1 + } + } + + if pages != 3 { + t.Errorf("expected to retrieve %d pages; got %d", 2, pages) + } + + if numDocs != 2 { + t.Errorf("expected to retrieve %d hits; got %d", 2, numDocs) + } +} + func TestScanWithQuery(t *testing.T) { client := setupTestClientAndCreateIndex(t) @@ -221,16 +307,16 @@ func TestScanWithQuery(t *testing.T) { } if cursor.Results == nil { - t.Errorf("expected results != nil; got nil") + t.Fatalf("expected results != nil; got nil") } if cursor.Results.Hits == nil { - t.Errorf("expected results.Hits != nil; got nil") + t.Fatalf("expected results.Hits != nil; got nil") } if cursor.Results.Hits.TotalHits != 2 { - t.Errorf("expected results.Hits.TotalHits = %d; got %d", 2, cursor.Results.Hits.TotalHits) + t.Fatalf("expected results.Hits.TotalHits = %d; got %d", 2, cursor.Results.Hits.TotalHits) } if len(cursor.Results.Hits.Hits) != 0 { - t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 0, len(cursor.Results.Hits.Hits)) + t.Fatalf("expected len(results.Hits.Hits) = %d; got %d", 0, len(cursor.Results.Hits.Hits)) } pages := 0 @@ -249,7 +335,7 @@ func TestScanWithQuery(t *testing.T) { for _, hit := range searchResult.Hits.Hits { if hit.Index != testIndexName { - t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) } item := make(map[string]interface{}) err := json.Unmarshal(*hit.Source, &item) @@ -320,22 +406,22 @@ func TestScanAndScrollWithEmptyIndex(t *testing.T) { t.Fatalf("expected results != nil; got: nil") } if res.ScrollId == "" { - t.Errorf("expected scrollId in results; got: %q", res.ScrollId) + t.Fatalf("expected scrollId in results; got: %q", res.ScrollId) } if res.TotalHits() != 0 { - t.Errorf("expected TotalHits() = %d; got %d", 0, res.TotalHits()) + t.Fatalf("expected TotalHits() = %d; got %d", 0, res.TotalHits()) } if res.Hits == nil { - t.Errorf("expected results.Hits != nil; got: nil") + t.Fatalf("expected results.Hits != nil; got: nil") } if res.Hits.TotalHits != 0 { - t.Errorf("expected results.Hits.TotalHits = %d; got %d", 0, res.Hits.TotalHits) + t.Fatalf("expected results.Hits.TotalHits = %d; got %d", 0, res.Hits.TotalHits) } if res.Hits.Hits == nil { - t.Errorf("expected results.Hits.Hits != nil; got: %v", res.Hits.Hits) + t.Fatalf("expected results.Hits.Hits != nil; got: %v", res.Hits.Hits) } if len(res.Hits.Hits) != 0 { - t.Errorf("expected len(results.Hits.Hits) == %d; got: %d", 0, len(res.Hits.Hits)) + t.Fatalf("expected len(results.Hits.Hits) == %d; got: %d", 0, len(res.Hits.Hits)) } // Subsequent requests return EOS @@ -355,3 +441,61 @@ func TestScanAndScrollWithEmptyIndex(t *testing.T) { t.Fatalf("expected results == %v; got: %v", nil, res) } } + +func TestIssue119(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + comment1 := comment{User: "nico", Comment: "You bet."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("1").Parent("1").BodyJson(&comment1).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + cursor, err := client.Scan(testIndexName).Fields("_source", "_parent").Size(1).Do() + if err != nil { + t.Fatal(err) + } + + for { + searchResult, err := cursor.Next() + if err == EOS { + break + } + if err != nil { + t.Fatal(err) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Type == "tweet" { + if _, ok := hit.Fields["_parent"].(string); ok { + t.Errorf("Type `tweet` cannot have any parent...") + + toPrint, _ := json.MarshalIndent(hit, "", " ") + t.Fatal(string(toPrint)) + } + } + + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } + } +} diff --git a/_third_party/github.com/olivere/elastic/search.go b/_third_party/github.com/olivere/elastic/search.go index e4eea932a6..773b4f582c 100644 --- a/_third_party/github.com/olivere/elastic/search.go +++ b/_third_party/github.com/olivere/elastic/search.go @@ -121,8 +121,8 @@ func (s *SearchService) SearchType(searchType string) *SearchService { } // Routing allows for (a comma-separated) list of specific routing values. -func (s *SearchService) Routing(routing string) *SearchService { - s.routing = routing +func (s *SearchService) Routing(routings ...string) *SearchService { + s.routing = strings.Join(routings, ",") return s } @@ -153,6 +153,19 @@ func (s *SearchService) PostFilter(postFilter Filter) *SearchService { return s } +// FetchSource indicates whether the response should contain the stored +// _source for every hit. +func (s *SearchService) FetchSource(fetchSource bool) *SearchService { + s.searchSource = s.searchSource.FetchSource(fetchSource) + return s +} + +// FetchSourceContext indicates how the _source should be fetched. +func (s *SearchService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchService { + s.searchSource = s.searchSource.FetchSourceContext(fetchSourceContext) + return s +} + // Highlight sets the highlighting. See // http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html // for details. @@ -313,6 +326,9 @@ func (s *SearchService) Do() (*SearchResult, error) { if s.searchType != "" { params.Set("search_type", s.searchType) } + if s.routing != "" { + params.Set("routing", s.routing) + } // Perform request var body interface{} @@ -454,11 +470,21 @@ type SearchFacet struct { Entries []searchFacetEntry `json:"entries"` } -// searchFacetTerm is the result of a terms facet. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-terms-facet.html. +// searchFacetTerm is the result of a terms/terms_stats facet. +// See https://www.elastic.co/guide/en/elasticsearch/reference/1.7/search-facets-terms-facet.html +// and https://www.elastic.co/guide/en/elasticsearch/reference/1.7/search-facets-terms-stats-facet.html. type searchFacetTerm struct { Term interface{} `json:"term"` Count int `json:"count"` + + // The following fields are returned for terms_stats facets. + // See https://www.elastic.co/guide/en/elasticsearch/reference/1.7/search-facets-terms-stats-facet.html. + + TotalCount int `json:"total_count"` + Min float64 `json:"min"` + Max float64 `json:"max"` + Total float64 `json:"total"` + Mean float64 `json:"mean"` } // searchFacetRange is the result of a range facet. diff --git a/_third_party/github.com/olivere/elastic/search_facets_test.go b/_third_party/github.com/olivere/elastic/search_facets_test.go index 6ae75abc34..f102158430 100644 --- a/_third_party/github.com/olivere/elastic/search_facets_test.go +++ b/_third_party/github.com/olivere/elastic/search_facets_test.go @@ -12,7 +12,7 @@ import ( ) func TestSearchFacets(t *testing.T) { - client := setupTestClientAndCreateIndex(t) + client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) tweet1 := tweet{ User: "olivere", @@ -95,6 +95,9 @@ func TestSearchFacets(t *testing.T) { Between(d20120101, d20130101). Gt(d20130101) + // Terms Stats Facet + termsStatsFacet := NewTermsStatsFacet().KeyField("user").ValueField("retweets") + // Run query searchResult, err := client.Search().Index(testIndexName). Query(&all). @@ -108,6 +111,8 @@ func TestSearchFacets(t *testing.T) { Facet("queryFacet", queryFacet). Facet("dateRangeFacet", dateRangeFacet). Facet("dateRangeWithTimeFacet", dateRangeWithTimeFacet). + Facet("termsStatsFacet", termsStatsFacet). + Pretty(true). Do() if err != nil { t.Fatal(err) @@ -473,4 +478,56 @@ func TestSearchFacets(t *testing.T) { t.Errorf("expected searchResult.Facets[\"dateRangeWithTimeFacet\"].Ranges[2].FromStr = %v; got %v", "2013-01-01T00:00:00Z", *facet.Ranges[2].FromStr) } + // Search for terms_stats facet + facet, found = searchResult.Facets["termsStatsFacet"] + if !found { + t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"] = %v; got %v", true, found) + } + if facet == nil { + t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"] != nil; got nil") + } + + // Check facet details + if got, want := facet.Type, "terms_stats"; got != want { + t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Type = %v; got %v", want, got) + } + if got, want := len(facet.Terms), 2; got != want { + t.Errorf("expected len(searchResult.Facets[\"termsStatsFacet\"].Terms) = %v; got %v", want, got) + } + if got, want := facet.Terms[0].Term, "olivere"; got != want { + t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[0].Term = %v; got %v", want, got) + } + if got, want := facet.Terms[0].Count, 2; got != want { + t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[0].Count = %v; got %v", want, got) + } + if got, want := facet.Terms[0].TotalCount, 2; got != want { + t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[0].TotalCount = %v; got %v", want, got) + } + if got, want := facet.Terms[0].Min, 0.0; got != want { + t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[0].Min = %v; got %v", want, got) + } + if got, want := facet.Terms[0].Max, 108.0; got != want { + t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[0].Max = %v; got %v", want, got) + } + if got, want := facet.Terms[0].Mean, 54.0; got != want { + t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[0].Mean = %v; got %v", want, got) + } + if got, want := facet.Terms[1].Term, "sandrae"; got != want { + t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[1].Term = %v; got %v", want, got) + } + if got, want := facet.Terms[1].Count, 1; got != want { + t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[1].Count = %v; got %v", want, got) + } + if got, want := facet.Terms[1].TotalCount, 1; got != want { + t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[1].TotalCount = %v; got %v", want, got) + } + if got, want := facet.Terms[1].Min, 12.0; got != want { + t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[1].Min = %v; got %v", want, got) + } + if got, want := facet.Terms[1].Max, 12.0; got != want { + t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[1].Max = %v; got %v", want, got) + } + if got, want := facet.Terms[1].Mean, 12.0; got != want { + t.Errorf("expected searchResult.Facets[\"termsStatsFacet\"].Terms[1].Mean = %v; got %v", want, got) + } } diff --git a/_third_party/github.com/olivere/elastic/search_filters_range.go b/_third_party/github.com/olivere/elastic/search_filters_range.go index 6c727b9033..4fc13494e3 100644 --- a/_third_party/github.com/olivere/elastic/search_filters_range.go +++ b/_third_party/github.com/olivere/elastic/search_filters_range.go @@ -13,6 +13,7 @@ type RangeFilter struct { from *interface{} to *interface{} timeZone string + format string includeLower bool includeUpper bool cache *bool @@ -26,11 +27,19 @@ func NewRangeFilter(name string) RangeFilter { return f } +// TimeZone allows for adjusting the from/to fields using a time zone. +// Only valid for date fields. func (f RangeFilter) TimeZone(timeZone string) RangeFilter { f.timeZone = timeZone return f } +// Format is a valid option for date fields in a Range filter. +func (f RangeFilter) Format(format string) RangeFilter { + f.format = format + return f +} + func (f RangeFilter) From(from interface{}) RangeFilter { f.from = &from return f @@ -117,6 +126,9 @@ func (f RangeFilter) Source() interface{} { if f.timeZone != "" { params["time_zone"] = f.timeZone } + if f.format != "" { + params["format"] = f.format + } params["include_lower"] = f.includeLower params["include_upper"] = f.includeUpper diff --git a/_third_party/github.com/olivere/elastic/search_filters_range_test.go b/_third_party/github.com/olivere/elastic/search_filters_range_test.go index 23e456d94d..70aea53f1e 100644 --- a/_third_party/github.com/olivere/elastic/search_filters_range_test.go +++ b/_third_party/github.com/olivere/elastic/search_filters_range_test.go @@ -56,3 +56,19 @@ func TestRangeFilterWithTimeZone(t *testing.T) { t.Errorf("expected\n%s\n,got:\n%s", expected, got) } } + +func TestRangeFilterWithFormat(t *testing.T) { + f := NewRangeFilter("born"). + Gte("2012/01/01"). + Lte("now"). + Format("yyyy/MM/dd") + data, err := json.Marshal(f.Source()) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"born":{"format":"yyyy/MM/dd","from":"2012/01/01","include_lower":true,"include_upper":true,"to":"now"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/_third_party/github.com/olivere/elastic/search_queries_more_like_this.go b/_third_party/github.com/olivere/elastic/search_queries_more_like_this.go index 2a47dafc6f..df12026b58 100644 --- a/_third_party/github.com/olivere/elastic/search_queries_more_like_this.go +++ b/_third_party/github.com/olivere/elastic/search_queries_more_like_this.go @@ -4,15 +4,21 @@ package elastic +import ( + "fmt" + "math" +) + // More like this query find documents that are “like” provided text // by running it against one or more fields. For more details, see // http://www.elasticsearch.org/guide/reference/query-dsl/mlt-query/ type MoreLikeThisQuery struct { - Query - fields []string likeText string - percentTermsToMatch *float32 + ids []string + docs []*MoreLikeThisQueryItem + include *bool + minimumShouldMatch string minTermFreq *int maxQueryTerms *int stopWords []string @@ -20,165 +26,374 @@ type MoreLikeThisQuery struct { maxDocFreq *int minWordLen *int maxWordLen *int - boostTerms *float32 - boost *float32 + boostTerms *float64 + boost *float64 analyzer string failOnUnsupportedField *bool + queryName string } -// Creates a new mlt query. +// NewMoreLikeThisQuery creates a new more-like-this query. func NewMoreLikeThisQuery(likeText string) MoreLikeThisQuery { - q := MoreLikeThisQuery{ + return MoreLikeThisQuery{ likeText: likeText, fields: make([]string, 0), + ids: make([]string, 0), + docs: make([]*MoreLikeThisQueryItem, 0), stopWords: make([]string, 0), } - return q } -func (q MoreLikeThisQuery) Field(field string) MoreLikeThisQuery { - q.fields = append(q.fields, field) +// Field adds one or more field names to the query. +func (q MoreLikeThisQuery) Field(fields ...string) MoreLikeThisQuery { + q.fields = append(q.fields, fields...) return q } +// Fields adds one or more field names to the query. +// Deprecated: Use Field for compatibility with elastic.v3. func (q MoreLikeThisQuery) Fields(fields ...string) MoreLikeThisQuery { q.fields = append(q.fields, fields...) return q } -func (q MoreLikeThisQuery) StopWord(stopWord string) MoreLikeThisQuery { - q.stopWords = append(q.stopWords, stopWord) +// StopWord sets the stopwords. Any word in this set is considered +// "uninteresting" and ignored. Even if your Analyzer allows stopwords, +// you might want to tell the MoreLikeThis code to ignore them, as for +// the purposes of document similarity it seems reasonable to assume that +// "a stop word is never interesting". +func (q MoreLikeThisQuery) StopWord(stopWords ...string) MoreLikeThisQuery { + q.stopWords = append(q.stopWords, stopWords...) return q } +// StopWords is an alias for StopWord. +// Deprecated: Use StopWord for compatibility with elastic.v3. func (q MoreLikeThisQuery) StopWords(stopWords ...string) MoreLikeThisQuery { q.stopWords = append(q.stopWords, stopWords...) return q } +// LikeText sets the text to use in order to find documents that are "like" this. func (q MoreLikeThisQuery) LikeText(likeText string) MoreLikeThisQuery { q.likeText = likeText return q } -func (q MoreLikeThisQuery) PercentTermsToMatch(percentTermsToMatch float32) MoreLikeThisQuery { - q.percentTermsToMatch = &percentTermsToMatch +// Docs sets the documents to use in order to find documents that are "like" this. +func (q MoreLikeThisQuery) Docs(docs ...*MoreLikeThisQueryItem) MoreLikeThisQuery { + q.docs = append(q.docs, docs...) + return q +} + +// Ids sets the document ids to use in order to find documents that are "like" this. +func (q MoreLikeThisQuery) Ids(ids ...string) MoreLikeThisQuery { + q.ids = append(q.ids, ids...) + return q +} + +// Include specifies whether the input documents should also be included +// in the results returned. Defaults to false. +func (q MoreLikeThisQuery) Include(include bool) MoreLikeThisQuery { + q.include = &include return q } +// PercentTermsToMatch will be changed to MinimumShouldMatch. +func (q MoreLikeThisQuery) PercentTermsToMatch(percentTermsToMatch float64) MoreLikeThisQuery { + q.minimumShouldMatch = fmt.Sprintf("%d%%", int(math.Floor(percentTermsToMatch*100))) + return q +} + +// MinimumShouldMatch sets the number of terms that must match the generated +// query expressed in the common syntax for minimum should match. +// The default value is "30%". +// +// This used to be "PercentTermsToMatch". +func (q MoreLikeThisQuery) MinimumShouldMatch(minimumShouldMatch string) MoreLikeThisQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +// MinTermFreq is the frequency below which terms will be ignored in the +// source doc. The default frequency is 2. func (q MoreLikeThisQuery) MinTermFreq(minTermFreq int) MoreLikeThisQuery { q.minTermFreq = &minTermFreq return q } +// MaxQueryTerms sets the maximum number of query terms that will be included +// in any generated query. It defaults to 25. func (q MoreLikeThisQuery) MaxQueryTerms(maxQueryTerms int) MoreLikeThisQuery { q.maxQueryTerms = &maxQueryTerms return q } +// MinDocFreq sets the frequency at which words will be ignored which do +// not occur in at least this many docs. The default is 5. func (q MoreLikeThisQuery) MinDocFreq(minDocFreq int) MoreLikeThisQuery { q.minDocFreq = &minDocFreq return q } +// MaxDocFreq sets the maximum frequency for which words may still appear. +// Words that appear in more than this many docs will be ignored. +// It defaults to unbounded. func (q MoreLikeThisQuery) MaxDocFreq(maxDocFreq int) MoreLikeThisQuery { q.maxDocFreq = &maxDocFreq return q } +// MinWordLength sets the minimum word length below which words will be +// ignored. It defaults to 0. func (q MoreLikeThisQuery) MinWordLen(minWordLen int) MoreLikeThisQuery { q.minWordLen = &minWordLen return q } +// MaxWordLen sets the maximum word length above which words will be ignored. +// Defaults to unbounded (0). func (q MoreLikeThisQuery) MaxWordLen(maxWordLen int) MoreLikeThisQuery { q.maxWordLen = &maxWordLen return q } -func (q MoreLikeThisQuery) BoostTerms(boostTerms float32) MoreLikeThisQuery { +// BoostTerms sets the boost factor to use when boosting terms. +// It defaults to 1. +func (q MoreLikeThisQuery) BoostTerms(boostTerms float64) MoreLikeThisQuery { q.boostTerms = &boostTerms return q } +// Analyzer specifies the analyzer that will be use to analyze the text. +// Defaults to the analyzer associated with the field. func (q MoreLikeThisQuery) Analyzer(analyzer string) MoreLikeThisQuery { q.analyzer = analyzer return q } -func (q MoreLikeThisQuery) Boost(boost float32) MoreLikeThisQuery { +// Boost sets the boost for this query. +func (q MoreLikeThisQuery) Boost(boost float64) MoreLikeThisQuery { q.boost = &boost return q } +// FailOnUnsupportedField indicates whether to fail or return no result +// when this query is run against a field which is not supported such as +// a binary/numeric field. func (q MoreLikeThisQuery) FailOnUnsupportedField(fail bool) MoreLikeThisQuery { q.failOnUnsupportedField = &fail return q } +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q MoreLikeThisQuery) QueryName(queryName string) MoreLikeThisQuery { + q.queryName = queryName + return q +} + // Creates the query source for the mlt query. func (q MoreLikeThisQuery) Source() interface{} { // { // "match_all" : { ... } // } - - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["more_like_this"] = params + source := make(map[string]interface{}) + source["mlt"] = params + + if q.likeText == "" && len(q.docs) == 0 && len(q.ids) == 0 { + // We have no form of returning errors for invalid queries as of Elastic v2. + // We also don't have access to the client here, so we can't log anything. + // All we can do is to return an empty query, I suppose. + // TODO Is there a better approach here? + //return nil, errors.New(`more_like_this requires some documents to be "liked"`) + return source + } if len(q.fields) > 0 { params["fields"] = q.fields } - - params["like_text"] = q.likeText - - if q.percentTermsToMatch != nil { - params["percent_terms_to_match"] = *q.percentTermsToMatch + if q.likeText != "" { + params["like_text"] = q.likeText + } + if q.minimumShouldMatch != "" { + params["minimum_should_match"] = q.minimumShouldMatch } - if q.minTermFreq != nil { params["min_term_freq"] = *q.minTermFreq } - if q.maxQueryTerms != nil { params["max_query_terms"] = *q.maxQueryTerms } - if len(q.stopWords) > 0 { params["stop_words"] = q.stopWords } - if q.minDocFreq != nil { params["min_doc_freq"] = *q.minDocFreq } - if q.maxDocFreq != nil { params["max_doc_freq"] = *q.maxDocFreq } - if q.minWordLen != nil { params["min_word_len"] = *q.minWordLen } - if q.maxWordLen != nil { params["max_word_len"] = *q.maxWordLen } - if q.boostTerms != nil { params["boost_terms"] = *q.boostTerms } - if q.boost != nil { params["boost"] = *q.boost } - if q.analyzer != "" { params["analyzer"] = q.analyzer } - if q.failOnUnsupportedField != nil { params["fail_on_unsupported_field"] = *q.failOnUnsupportedField } + if q.queryName != "" { + params["_name"] = q.queryName + } + if len(q.ids) > 0 { + params["ids"] = q.ids + } + if len(q.docs) > 0 { + docs := make([]interface{}, 0) + for _, doc := range q.docs { + docs = append(docs, doc.Source()) + } + params["docs"] = docs + } + if q.include != nil { + params["exclude"] = !(*q.include) // ES 1.x only has exclude + } + + return source +} + +// -- MoreLikeThisQueryItem -- + +// MoreLikeThisQueryItem represents a single item of a MoreLikeThisQuery +// to be "liked" or "unliked". +type MoreLikeThisQueryItem struct { + likeText string + + index string + typ string + id string + doc interface{} + fields []string + routing string + fsc *FetchSourceContext + version int64 + versionType string +} + +// NewMoreLikeThisQueryItem creates and initializes a MoreLikeThisQueryItem. +func NewMoreLikeThisQueryItem() *MoreLikeThisQueryItem { + return &MoreLikeThisQueryItem{ + version: -1, + } +} + +// LikeText represents a text to be "liked". +func (item *MoreLikeThisQueryItem) LikeText(likeText string) *MoreLikeThisQueryItem { + item.likeText = likeText + return item +} + +// Index represents the index of the item. +func (item *MoreLikeThisQueryItem) Index(index string) *MoreLikeThisQueryItem { + item.index = index + return item +} + +// Type represents the document type of the item. +func (item *MoreLikeThisQueryItem) Type(typ string) *MoreLikeThisQueryItem { + item.typ = typ + return item +} + +// Id represents the document id of the item. +func (item *MoreLikeThisQueryItem) Id(id string) *MoreLikeThisQueryItem { + item.id = id + return item +} + +// Doc represents a raw document template for the item. +func (item *MoreLikeThisQueryItem) Doc(doc interface{}) *MoreLikeThisQueryItem { + item.doc = doc + return item +} + +// Fields represents the list of fields of the item. +func (item *MoreLikeThisQueryItem) Fields(fields ...string) *MoreLikeThisQueryItem { + item.fields = append(item.fields, fields...) + return item +} + +// Routing sets the routing associated with the item. +func (item *MoreLikeThisQueryItem) Routing(routing string) *MoreLikeThisQueryItem { + item.routing = routing + return item +} + +// FetchSourceContext represents the fetch source of the item which controls +// if and how _source should be returned. +func (item *MoreLikeThisQueryItem) FetchSourceContext(fsc *FetchSourceContext) *MoreLikeThisQueryItem { + item.fsc = fsc + return item +} + +// Version specifies the version of the item. +func (item *MoreLikeThisQueryItem) Version(version int64) *MoreLikeThisQueryItem { + item.version = version + return item +} + +// VersionType represents the version type of the item. +func (item *MoreLikeThisQueryItem) VersionType(versionType string) *MoreLikeThisQueryItem { + item.versionType = versionType + return item +} + +// Source returns the JSON-serializable fragment of the entity. +func (item *MoreLikeThisQueryItem) Source() interface{} { + if item.likeText != "" { + return item.likeText + } + + source := make(map[string]interface{}) + + if item.index != "" { + source["_index"] = item.index + } + if item.typ != "" { + source["_type"] = item.typ + } + if item.id != "" { + source["_id"] = item.id + } + if item.doc != nil { + source["doc"] = item.doc + } + if len(item.fields) > 0 { + source["fields"] = item.fields + } + if item.routing != "" { + source["_routing"] = item.routing + } + if item.fsc != nil { + source["_source"] = item.fsc.Source() + } + if item.version >= 0 { + source["_version"] = item.version + } + if item.versionType != "" { + source["_version_type"] = item.versionType + } + return source } diff --git a/_third_party/github.com/olivere/elastic/search_queries_more_like_this_test.go b/_third_party/github.com/olivere/elastic/search_queries_more_like_this_test.go index 0143f8ca66..074474d87a 100644 --- a/_third_party/github.com/olivere/elastic/search_queries_more_like_this_test.go +++ b/_third_party/github.com/olivere/elastic/search_queries_more_like_this_test.go @@ -5,10 +5,56 @@ package elastic import ( + "encoding/json" "testing" ) -func TestMoreLikeThis(t *testing.T) { +func TestMoreLikeThisQuerySourceWithLikeText(t *testing.T) { + q := NewMoreLikeThisQuery("Golang topic").Field("message") + data, err := json.Marshal(q.Source()) + if err != nil { + t.Fatal(err) + } + got := string(data) + expected := `{"mlt":{"fields":["message"],"like_text":"Golang topic"}}` + if got != expected { + t.Fatalf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMoreLikeThisQuerySourceWithIds(t *testing.T) { + q := NewMoreLikeThisQuery("") + q = q.Ids("1", "2") + data, err := json.Marshal(q.Source()) + if err != nil { + t.Fatal(err) + } + got := string(data) + expected := `{"mlt":{"ids":["1","2"]}}` + if got != expected { + t.Fatalf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMoreLikeThisQuerySourceWithDocs(t *testing.T) { + q := NewMoreLikeThisQuery("") + q = q.Docs( + NewMoreLikeThisQueryItem().Id("1"), + NewMoreLikeThisQueryItem().Index(testIndexName2).Type("comment").Id("2").Routing("routing_id"), + ) + q = q.Include(false) + data, err := json.Marshal(q.Source()) + if err != nil { + t.Fatal(err) + } + got := string(data) + expected := `{"mlt":{"docs":[{"_id":"1"},{"_id":"2","_index":"elastic-test2","_routing":"routing_id","_type":"comment"}],"exclude":true}}` + if got != expected { + t.Fatalf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMoreLikeThisQuery(t *testing.T) { client := setupTestClientAndCreateIndex(t) tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} diff --git a/_third_party/github.com/olivere/elastic/search_queries_range.go b/_third_party/github.com/olivere/elastic/search_queries_range.go index 9d10fc3297..85121bcd34 100644 --- a/_third_party/github.com/olivere/elastic/search_queries_range.go +++ b/_third_party/github.com/olivere/elastic/search_queries_range.go @@ -13,6 +13,7 @@ type RangeQuery struct { from *interface{} to *interface{} timeZone string + format string includeLower bool includeUpper bool boost *float64 @@ -24,9 +25,17 @@ func NewRangeQuery(name string) RangeQuery { return q } -func (f RangeQuery) TimeZone(timeZone string) RangeQuery { - f.timeZone = timeZone - return f +// TimeZone allows for adjusting the from/to fields using a time zone. +// Only valid for date fields. +func (q RangeQuery) TimeZone(timeZone string) RangeQuery { + q.timeZone = timeZone + return q +} + +// Format is a valid option for date fields in a Range query. +func (q RangeQuery) Format(format string) RangeQuery { + q.format = format + return q } func (q RangeQuery) From(from interface{}) RangeQuery { @@ -105,6 +114,9 @@ func (q RangeQuery) Source() interface{} { if q.timeZone != "" { params["time_zone"] = q.timeZone } + if q.format != "" { + params["format"] = q.format + } params["include_lower"] = q.includeLower params["include_upper"] = q.includeUpper diff --git a/_third_party/github.com/olivere/elastic/search_queries_range_test.go b/_third_party/github.com/olivere/elastic/search_queries_range_test.go index f3f9aef0da..b1da0b69e9 100644 --- a/_third_party/github.com/olivere/elastic/search_queries_range_test.go +++ b/_third_party/github.com/olivere/elastic/search_queries_range_test.go @@ -53,3 +53,19 @@ func TestRangeQueryWithTimeZone(t *testing.T) { t.Errorf("expected\n%s\n,got:\n%s", expected, got) } } + +func TestRangeQueryWithFormat(t *testing.T) { + q := NewRangeQuery("born"). + Gte("2012/01/01"). + Lte("now"). + Format("yyyy/MM/dd") + data, err := json.Marshal(q.Source()) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"born":{"format":"yyyy/MM/dd","from":"2012/01/01","include_lower":true,"include_upper":true,"to":"now"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/_third_party/github.com/olivere/elastic/search_source.go b/_third_party/github.com/olivere/elastic/search_source.go index 6f740ed89d..4429540f42 100644 --- a/_third_party/github.com/olivere/elastic/search_source.go +++ b/_third_party/github.com/olivere/elastic/search_source.go @@ -39,6 +39,7 @@ type SearchSource struct { innerHits map[string]*InnerHit } +// NewSearchSource initializes a new SearchSource. func NewSearchSource() *SearchSource { return &SearchSource{ from: -1, @@ -64,88 +65,114 @@ func (s *SearchSource) Query(query Query) *SearchSource { return s } -// PostFilter is executed as the last filter. It only affects the -// search hits but not facets. +// PostFilter will be executed after the query has been executed and +// only affects the search hits, not the aggregations. +// This filter is always executed as the last filtering mechanism. func (s *SearchSource) PostFilter(postFilter Filter) *SearchSource { s.postFilter = postFilter return s } +// From index to start the search from. Defaults to 0. func (s *SearchSource) From(from int) *SearchSource { s.from = from return s } +// Size is the number of search hits to return. Defaults to 10. func (s *SearchSource) Size(size int) *SearchSource { s.size = size return s } +// MinScore sets the minimum score below which docs will be filtered out. func (s *SearchSource) MinScore(minScore float64) *SearchSource { s.minScore = &minScore return s } +// Explain indicates whether each search hit should be returned with +// an explanation of the hit (ranking). func (s *SearchSource) Explain(explain bool) *SearchSource { s.explain = &explain return s } +// Version indicates whether each search hit should be returned with +// a version associated to it. func (s *SearchSource) Version(version bool) *SearchSource { s.version = &version return s } +// Timeout controls how long a search is allowed to take, e.g. "1s" or "500ms". func (s *SearchSource) Timeout(timeout string) *SearchSource { s.timeout = timeout return s } +// TimeoutInMillis controls how many milliseconds a search is allowed +// to take before it is canceled. func (s *SearchSource) TimeoutInMillis(timeoutInMillis int) *SearchSource { s.timeout = fmt.Sprintf("%dms", timeoutInMillis) return s } +// Sort adds a sort order. func (s *SearchSource) Sort(field string, ascending bool) *SearchSource { s.sorts = append(s.sorts, SortInfo{Field: field, Ascending: ascending}) return s } +// SortWithInfo adds a sort order. func (s *SearchSource) SortWithInfo(info SortInfo) *SearchSource { s.sorts = append(s.sorts, info) return s } +// SortBy adds a sort order. func (s *SearchSource) SortBy(sorter ...Sorter) *SearchSource { s.sorters = append(s.sorters, sorter...) return s } +func (s *SearchSource) hasSort() bool { + return len(s.sorts) > 0 || len(s.sorters) > 0 +} + +// TrackScores is applied when sorting and controls if scores will be +// tracked as well. Defaults to false. func (s *SearchSource) TrackScores(trackScores bool) *SearchSource { s.trackScores = trackScores return s } +// Facet adds a facet to perform as part of the search. func (s *SearchSource) Facet(name string, facet Facet) *SearchSource { s.facets[name] = facet return s } +// Aggregation adds an aggreation to perform as part of the search. func (s *SearchSource) Aggregation(name string, aggregation Aggregation) *SearchSource { s.aggregations[name] = aggregation return s } +// DefaultRescoreWindowSize sets the rescore window size for rescores +// that don't specify their window. func (s *SearchSource) DefaultRescoreWindowSize(defaultRescoreWindowSize int) *SearchSource { s.defaultRescoreWindowSize = &defaultRescoreWindowSize return s } +// Highlight adds highlighting to the search. func (s *SearchSource) Highlight(highlight *Highlight) *SearchSource { s.highlight = highlight return s } +// Highlighter returns the highlighter. func (s *SearchSource) Highlighter() *Highlight { if s.highlight == nil { s.highlight = NewHighlight() @@ -153,26 +180,33 @@ func (s *SearchSource) Highlighter() *Highlight { return s.highlight } +// GlobalSuggestText defines the global text to use with all suggesters. +// This avoids repetition. func (s *SearchSource) GlobalSuggestText(text string) *SearchSource { s.globalSuggestText = text return s } +// Suggester adds a suggester to the search. func (s *SearchSource) Suggester(suggester Suggester) *SearchSource { s.suggesters = append(s.suggesters, suggester) return s } +// AddRescorer adds a rescorer to the search. func (s *SearchSource) AddRescore(rescore *Rescore) *SearchSource { s.rescores = append(s.rescores, rescore) return s } +// ClearRescorers removes all rescorers from the search. func (s *SearchSource) ClearRescores() *SearchSource { s.rescores = make([]*Rescore, 0) return s } +// FetchSource indicates whether the response should contain the stored +// _source for every hit. func (s *SearchSource) FetchSource(fetchSource bool) *SearchSource { if s.fetchSourceContext == nil { s.fetchSourceContext = NewFetchSourceContext(fetchSource) @@ -182,11 +216,14 @@ func (s *SearchSource) FetchSource(fetchSource bool) *SearchSource { return s } +// FetchSourceContext indicates how the _source should be fetched. func (s *SearchSource) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchSource { s.fetchSourceContext = fetchSourceContext return s } +// Fields sets the fields to load and return as part of the search request. +// If none are specified, the source of the document will be returned. func (s *SearchSource) Fields(fieldNames ...string) *SearchSource { if s.fieldNames == nil { s.fieldNames = make([]string, 0) @@ -195,6 +232,9 @@ func (s *SearchSource) Fields(fieldNames ...string) *SearchSource { return s } +// Field adds a single field to load and return (note, must be stored) as +// part of the search request. If none are specified, the source of the +// document will be returned. func (s *SearchSource) Field(fieldName string) *SearchSource { if s.fieldNames == nil { s.fieldNames = make([]string, 0) @@ -203,56 +243,71 @@ func (s *SearchSource) Field(fieldName string) *SearchSource { return s } +// NoFields indicates that no fields should be loaded, resulting in only +// id and type to be returned per field. func (s *SearchSource) NoFields() *SearchSource { s.fieldNames = make([]string, 0) return s } +// FieldDataFields adds one or more fields to load from the field data cache +// and return as part of the search request. func (s *SearchSource) FieldDataFields(fieldDataFields ...string) *SearchSource { s.fieldDataFields = append(s.fieldDataFields, fieldDataFields...) return s } +// FieldDataField adds a single field to load from the field data cache +// and return as part of the search request. func (s *SearchSource) FieldDataField(fieldDataField string) *SearchSource { s.fieldDataFields = append(s.fieldDataFields, fieldDataField) return s } +// ScriptFields adds one or more script fields with the provided scripts. func (s *SearchSource) ScriptFields(scriptFields ...*ScriptField) *SearchSource { s.scriptFields = append(s.scriptFields, scriptFields...) return s } +// ScriptField adds a single script field with the provided script. func (s *SearchSource) ScriptField(scriptField *ScriptField) *SearchSource { s.scriptFields = append(s.scriptFields, scriptField) return s } +// PartialFields adds partial fields. func (s *SearchSource) PartialFields(partialFields ...*PartialField) *SearchSource { s.partialFields = append(s.partialFields, partialFields...) return s } +// PartialField adds a partial field. func (s *SearchSource) PartialField(partialField *PartialField) *SearchSource { s.partialFields = append(s.partialFields, partialField) return s } +// IndexBoost sets the boost that a specific index will receive when the +// query is executed against it. func (s *SearchSource) IndexBoost(index string, boost float64) *SearchSource { s.indexBoosts[index] = boost return s } +// Stats group this request will be aggregated under. func (s *SearchSource) Stats(statsGroup ...string) *SearchSource { s.stats = append(s.stats, statsGroup...) return s } +// InnerHit adds an inner hit to return with the result. func (s *SearchSource) InnerHit(name string, innerHit *InnerHit) *SearchSource { s.innerHits[name] = innerHit return s } +// Source returns the serializable JSON for the source builder. func (s *SearchSource) Source() interface{} { source := make(map[string]interface{}) diff --git a/_third_party/github.com/olivere/elastic/suggester_completion_fuzzy.go b/_third_party/github.com/olivere/elastic/suggester_completion_fuzzy.go index f694e1d702..3539381b00 100644 --- a/_third_party/github.com/olivere/elastic/suggester_completion_fuzzy.go +++ b/_third_party/github.com/olivere/elastic/suggester_completion_fuzzy.go @@ -27,6 +27,10 @@ type FuzzyCompletionSuggester struct { unicodeAware *bool } +// Fuzziness defines the fuzziness which is used in FuzzyCompletionSuggester. +type Fuzziness struct { +} + // Creates a new completion suggester. func NewFuzzyCompletionSuggester(name string) FuzzyCompletionSuggester { return FuzzyCompletionSuggester{ @@ -139,20 +143,22 @@ func (q FuzzyCompletionSuggester) Source(includeName bool) interface{} { } // Fuzzy Completion Suggester fields + fuzzy := make(map[string]interface{}) + suggester["fuzzy"] = fuzzy if q.fuzziness != nil { - suggester["fuzziness"] = q.fuzziness + fuzzy["fuzziness"] = q.fuzziness } if q.fuzzyTranspositions != nil { - suggester["transpositions"] = *q.fuzzyTranspositions + fuzzy["transpositions"] = *q.fuzzyTranspositions } if q.fuzzyMinLength != nil { - suggester["min_length"] = *q.fuzzyMinLength + fuzzy["min_length"] = *q.fuzzyMinLength } if q.fuzzyPrefixLength != nil { - suggester["prefix_length"] = *q.fuzzyPrefixLength + fuzzy["prefix_length"] = *q.fuzzyPrefixLength } if q.unicodeAware != nil { - suggester["unicode_aware"] = *q.unicodeAware + fuzzy["unicode_aware"] = *q.unicodeAware } if !includeName { diff --git a/_third_party/github.com/olivere/elastic/suggester_completion_fuzzy_test.go b/_third_party/github.com/olivere/elastic/suggester_completion_fuzzy_test.go index a3104a70d0..a7d9afc844 100644 --- a/_third_party/github.com/olivere/elastic/suggester_completion_fuzzy_test.go +++ b/_third_party/github.com/olivere/elastic/suggester_completion_fuzzy_test.go @@ -19,7 +19,7 @@ func TestFuzzyCompletionSuggesterSource(t *testing.T) { t.Fatalf("marshaling to JSON failed: %v", err) } got := string(data) - expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest","fuzziness":2}}}` + expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest","fuzzy":{"fuzziness":2}}}}` if got != expected { t.Errorf("expected\n%s\n,got:\n%s", expected, got) } @@ -35,7 +35,7 @@ func TestFuzzyCompletionSuggesterWithStringFuzzinessSource(t *testing.T) { t.Fatalf("marshaling to JSON failed: %v", err) } got := string(data) - expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest","fuzziness":"1..4"}}}` + expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest","fuzzy":{"fuzziness":"1..4"}}}}` if got != expected { t.Errorf("expected\n%s\n,got:\n%s", expected, got) } diff --git a/_third_party/github.com/siddontang/go/bson/bson_test.go b/_third_party/github.com/siddontang/go/bson/bson_test.go index 8ead8216ca..3d97998503 100644 --- a/_third_party/github.com/siddontang/go/bson/bson_test.go +++ b/_third_party/github.com/siddontang/go/bson/bson_test.go @@ -36,8 +36,8 @@ import ( "testing" "time" - . "bosun.org/_third_party/gopkg.in/check.v1" - "bosun.org/_third_party/gopkg.in/mgo.v2/bson" + . "gopkg.in/check.v1" + "gopkg.in/mgo.v2/bson" ) func TestAll(t *testing.T) { diff --git a/_third_party/github.com/tatsushid/go-fastping/fastping.go b/_third_party/github.com/tatsushid/go-fastping/fastping.go index ecc17f2418..96950ca012 100644 --- a/_third_party/github.com/tatsushid/go-fastping/fastping.go +++ b/_third_party/github.com/tatsushid/go-fastping/fastping.go @@ -47,9 +47,9 @@ import ( "syscall" "time" - "bosun.org/_third_party/golang.org/x/net/icmp" - "bosun.org/_third_party/golang.org/x/net/ipv4" - "bosun.org/_third_party/golang.org/x/net/ipv6" + "golang.org/x/net/icmp" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" ) const ( @@ -130,6 +130,8 @@ type Pinger struct { // key string is IPAddr.String() addrs map[string]*net.IPAddr network string + source string + source6 string hasIPv4 bool hasIPv6 bool ctx *context @@ -158,6 +160,8 @@ func NewPinger() *Pinger { seq: rand.Intn(0xffff), addrs: make(map[string]*net.IPAddr), network: "ip", + source: "", + source6: "", hasIPv4: false, hasIPv6: false, Size: TimeSliceLength, @@ -185,6 +189,40 @@ func (p *Pinger) Network(network string) (string, error) { return origNet, nil } +// Source sets ipv4/ipv6 source IP for sending ICMP packets and returns the previous +// setting. Empty value indicates to use system default one (for both ipv4 and ipv6). +func (p *Pinger) Source(source string) (string, error) { + // using ipv4 previous value for new empty one + origSource := p.source + if "" == source { + p.mu.Lock() + p.source = "" + p.source6 = "" + p.mu.Unlock() + return origSource, nil + } + + addr := net.ParseIP(source) + if addr == nil { + return origSource, errors.New(source + " is not a valid textual representation of an IPv4/IPv6 address") + } + + if isIPv4(addr) { + p.mu.Lock() + p.source = source + p.mu.Unlock() + } else if isIPv6(addr) { + origSource = p.source6 + p.mu.Lock() + p.source6 = source + p.mu.Unlock() + } else { + return origSource, errors.New(source + " is not a valid textual representation of an IPv4/IPv6 address") + } + + return origSource, nil +} + // AddIP adds an IP address to Pinger. ipaddr arg should be a string like // "192.0.2.1". func (p *Pinger) AddIP(ipaddr string) error { @@ -347,8 +385,8 @@ func (p *Pinger) Err() error { return p.ctx.err } -func (p *Pinger) listen(netProto string) *icmp.PacketConn { - conn, err := icmp.ListenPacket(netProto, "") +func (p *Pinger) listen(netProto string, source string) *icmp.PacketConn { + conn, err := icmp.ListenPacket(netProto, source) if err != nil { p.mu.Lock() p.ctx.err = err @@ -364,14 +402,14 @@ func (p *Pinger) run(once bool) { p.debugln("Run(): Start") var conn, conn6 *icmp.PacketConn if p.hasIPv4 { - if conn = p.listen(ipv4Proto[p.network]); conn == nil { + if conn = p.listen(ipv4Proto[p.network], p.source); conn == nil { return } defer conn.Close() } if p.hasIPv6 { - if conn6 = p.listen(ipv6Proto[p.network]); conn6 == nil { + if conn6 = p.listen(ipv6Proto[p.network], p.source6); conn6 == nil { return } defer conn6.Close() diff --git a/_third_party/github.com/tatsushid/go-fastping/fastping_test.go b/_third_party/github.com/tatsushid/go-fastping/fastping_test.go index 12dd2f4c46..c708b82ef6 100644 --- a/_third_party/github.com/tatsushid/go-fastping/fastping_test.go +++ b/_third_party/github.com/tatsushid/go-fastping/fastping_test.go @@ -7,28 +7,82 @@ import ( "time" ) -type addHostTest struct { - host string - addr *net.IPAddr - expect bool -} +func TestSource(t *testing.T) { + for i, tt := range []struct { + firstAddr string + secondAddr string + invalid bool + }{ + {firstAddr: "192.0.2.10", secondAddr: "192.0.2.20", invalid: false}, + {firstAddr: "2001:0DB8::10", secondAddr: "2001:0DB8::20", invalid: false}, + {firstAddr: "192.0.2", invalid: true}, + } { + p := NewPinger() + + origSource, err := p.Source(tt.firstAddr) + if tt.invalid { + if err == nil { + t.Errorf("[%d] Source should return an error but nothing: %v", i) + } + continue + } + if err != nil { + t.Errorf("[%d] Source address failed: %v", i, err) + } + if origSource != "" { + t.Errorf("[%d] Source returned an unexpected value: got %q, expected %q", i, origSource, "") + } + + origSource, err = p.Source(tt.secondAddr) + if err != nil { + t.Errorf("[%d] Source address failed: %v", i, err) + } + if origSource != tt.firstAddr { + t.Errorf("[%d] Source returned an unexpected value: got %q, expected %q", i, origSource, tt.firstAddr) + } + } -var addHostTests = []addHostTest{ - {host: "127.0.0.1", addr: &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}, expect: true}, - {host: "localhost", addr: &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}, expect: false}, + v4Addr := "192.0.2.10" + v6Addr := "2001:0DB8::10" + + p := NewPinger() + _, err := p.Source(v4Addr) + if err != nil { + t.Errorf("Source address failed: %v", err) + } + _, err = p.Source(v6Addr) + if err != nil { + t.Errorf("Source address failed: %v", err) + } + origSource, err := p.Source("") + if err != nil { + t.Errorf("Source address failed: %v", err) + } + if origSource != v4Addr { + t.Errorf("Source returned an unexpected value: got %q, expected %q", origSource, v4Addr) + } } func TestAddIP(t *testing.T) { + addIPTests := []struct { + host string + addr *net.IPAddr + expect bool + }{ + {host: "127.0.0.1", addr: &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}, expect: true}, + {host: "localhost", addr: &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}, expect: false}, + } + p := NewPinger() - for _, tt := range addHostTests { + for _, tt := range addIPTests { if ok := p.AddIP(tt.host); ok != nil { if tt.expect != false { t.Errorf("AddIP failed: got %v, expected %v", ok, tt.expect) } } } - for _, tt := range addHostTests { + for _, tt := range addIPTests { if tt.expect { if !p.addrs[tt.host].IP.Equal(tt.addr.IP) { t.Errorf("AddIP didn't save IPAddr: %v", tt.host) @@ -37,6 +91,33 @@ func TestAddIP(t *testing.T) { } } +func TestAddIPAddr(t *testing.T) { + addIPAddrTests := []*net.IPAddr{ + {IP: net.IPv4(192, 0, 2, 10)}, + {IP: net.IP{0x20, 0x01, 0x0D, 0xB8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x10}}, + } + + p := NewPinger() + + for i, tt := range addIPAddrTests { + p.AddIPAddr(tt) + if !p.addrs[tt.String()].IP.Equal(tt.IP) { + t.Errorf("[%d] AddIPAddr didn't save IPAddr: %v", i, tt.IP) + } + if len(tt.IP.To4()) == net.IPv4len { + if p.hasIPv4 != true { + t.Errorf("[%d] AddIPAddr didn't save IPAddr type: got %v, expected %v", i, p.hasIPv4, true) + } + } else if len(tt.IP) == net.IPv6len { + if p.hasIPv6 != true { + t.Errorf("[%d] AddIPAddr didn't save IPAddr type: got %v, expected %v", i, p.hasIPv6, true) + } + } else { + t.Errorf("[%d] AddIPAddr encounted an unexpected error", i) + } + } +} + func TestRemoveIP(t *testing.T) { p := NewPinger() @@ -260,6 +341,39 @@ func TestRunLoop(t *testing.T) { } } +func TestErr(t *testing.T) { + invalidSource := "192.0.2" + + p := NewPinger() + p.ctx = newContext() + + _ = p.listen("ip4:icmp", invalidSource) + if p.Err() == nil { + t.Errorf("Err should return an error but nothing") + } +} + +func TestListen(t *testing.T) { + noSource := "" + invalidSource := "192.0.2" + + p := NewPinger() + p.ctx = newContext() + + conn := p.listen("ip4:icmp", noSource) + if conn == nil { + t.Errorf("listen failed: %v", p.Err()) + } else { + conn.Close() + } + + conn = p.listen("ip4:icmp", invalidSource) + if conn != nil { + t.Errorf("listen should return nothing but something: %v", conn) + conn.Close() + } +} + func TestTimeToBytes(t *testing.T) { // 2009-11-10 23:00:00 +0000 UTC = 1257894000000000000 expect := []byte{0x11, 0x74, 0xef, 0xed, 0xab, 0x18, 0x60, 0x00} diff --git a/_third_party/github.com/ugorji/go/codec/binc.go b/_third_party/github.com/ugorji/go/codec/binc.go index 251609e654..645376479a 100644 --- a/_third_party/github.com/ugorji/go/codec/binc.go +++ b/_third_party/github.com/ugorji/go/codec/binc.go @@ -5,6 +5,7 @@ package codec import ( "math" + "reflect" "time" ) @@ -905,5 +906,9 @@ func (h *BincHandle) newDecDriver(d *Decoder) decDriver { return &bincDecDriver{d: d, r: d.r, h: h, br: d.bytes} } +func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{b: ext}) +} + var _ decDriver = (*bincDecDriver)(nil) var _ encDriver = (*bincEncDriver)(nil) diff --git a/_third_party/github.com/ugorji/go/codec/cbor.go b/_third_party/github.com/ugorji/go/codec/cbor.go index c3b88da202..8b6e13a89e 100644 --- a/_third_party/github.com/ugorji/go/codec/cbor.go +++ b/_third_party/github.com/ugorji/go/codec/cbor.go @@ -3,7 +3,10 @@ package codec -import "math" +import ( + "math" + "reflect" +) const ( cborMajorUint byte = iota @@ -158,7 +161,11 @@ func (e *cborEncDriver) EncodeSymbol(v string) { } func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) { - e.encLen(cborBaseBytes, len(v)) + if c == c_RAW { + e.encLen(cborBaseBytes, len(v)) + } else { + e.encLen(cborBaseString, len(v)) + } e.w.writeb(v) } @@ -562,5 +569,9 @@ func (h *CborHandle) newDecDriver(d *Decoder) decDriver { return &cborDecDriver{d: d, r: d.r, h: h, br: d.bytes} } +func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{i: ext}) +} + var _ decDriver = (*cborDecDriver)(nil) var _ encDriver = (*cborEncDriver)(nil) diff --git a/_third_party/github.com/ugorji/go/codec/gen.go b/_third_party/github.com/ugorji/go/codec/gen.go index 64acfa3eba..6c94f9c846 100644 --- a/_third_party/github.com/ugorji/go/codec/gen.go +++ b/_third_party/github.com/ugorji/go/codec/gen.go @@ -12,8 +12,10 @@ import ( "io" "io/ioutil" "math/rand" + "os" "reflect" "regexp" + "sort" "strconv" "strings" "sync" @@ -164,7 +166,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, typ ...ref is: make(map[reflect.Type]struct{}), tm: make(map[reflect.Type]struct{}), ts: []reflect.Type{}, - bp: typ[0].PkgPath(), + bp: genImportPath(typ[0]), xs: uid, } if x.xs == "" { @@ -173,11 +175,11 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, typ ...ref } // gather imports first: - x.cp = reflect.TypeOf(x).PkgPath() + x.cp = genImportPath(reflect.TypeOf(x)) x.imn[x.cp] = genCodecPkg for _, t := range typ { - // fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", t.PkgPath(), t.Name()) - if t.PkgPath() != x.bp { + // fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) + if genImportPath(t) != x.bp { panic(genAllTypesSamePkgErr) } x.genRefPkgs(t) @@ -201,7 +203,13 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, typ ...ref x.cpfx = genCodecPkg + "." x.linef("%s \"%s\"", genCodecPkg, x.cp) } + // use a sorted set of im keys, so that we can get consistent output + imKeys := make([]string, 0, len(x.im)) for k, _ := range x.im { + imKeys = append(imKeys, k) + } + sort.Strings(imKeys) + for _, k := range imKeys { // for k, _ := range x.im { x.linef("%s \"%s\"", x.imn[k], k) } // add required packages @@ -219,8 +227,8 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, typ ...ref x.line("const (") x.linef("codecSelferC_UTF8%s = %v", x.xs, int64(c_UTF8)) x.linef("codecSelferC_RAW%s = %v", x.xs, int64(c_RAW)) - x.linef("codecSelverValueTypeArray%s = %v", x.xs, int64(valueTypeArray)) - x.linef("codecSelverValueTypeMap%s = %v", x.xs, int64(valueTypeMap)) + x.linef("codecSelferValueTypeArray%s = %v", x.xs, int64(valueTypeArray)) + x.linef("codecSelferValueTypeMap%s = %v", x.xs, int64(valueTypeMap)) x.line(")") x.line("var (") x.line("codecSelferBitsize" + x.xs + " = uint8(reflect.TypeOf(uint(0)).Bits())") @@ -247,7 +255,9 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, typ ...ref x.linef("}") x.line("if false { // reference the types, but skip this branch at build/run time") var n int - for k, t := range x.im { + // for k, t := range x.im { + for _, k := range imKeys { + t := x.im[k] x.linef("var v%v %s.%s", n, x.imn[k], t.Name()) n++ } @@ -337,9 +347,9 @@ func (x *genRunner) genRefPkgs(t reflect.Type) { if _, ok := x.is[t]; ok { return } - // fmt.Printf(">>>>>>: PkgPath: '%v', Name: '%s'\n", t.PkgPath(), t.Name()) + // fmt.Printf(">>>>>>: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) x.is[t] = struct{}{} - tpkg, tname := t.PkgPath(), t.Name() + tpkg, tname := genImportPath(t), t.Name() if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' { if _, ok := x.im[tpkg]; !ok { x.im[tpkg] = t @@ -429,10 +439,10 @@ func (x *genRunner) genTypeName(t reflect.Type) (n string) { func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) { if t.Name() == "" { return t.String() - } else if t.PkgPath() == "" || t.PkgPath() == x.tc.PkgPath() { + } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) { return t.Name() } else { - return x.imn[t.PkgPath()] + "." + t.Name() + return x.imn[genImportPath(t)] + "." + t.Name() // return t.String() // best way to get the package name inclusive } } @@ -644,7 +654,7 @@ func (x *genRunner) enc(varname string, t reflect.Type) { x.linef("r.EncodeBuiltin(%s, %s)", vrtid, varname) } // only check for extensions if the type is named, and has a packagePath. - if t.PkgPath() != "" && t.Name() != "" { + if genImportPath(t) != "" && t.Name() != "" { // first check if extensions are configued, before doing the interface conversion x.linef("} else if z.HasExtensions() && z.EncExt(%s) {", varname) } @@ -1056,7 +1066,7 @@ func (x *genRunner) dec(varname string, t reflect.Type) { x.linef("r.DecodeBuiltin(%s, %s)", vrtid, varname) } // only check for extensions if the type is named, and has a packagePath. - if t.PkgPath() != "" && t.Name() != "" { + if genImportPath(t) != "" && t.Name() != "" { // first check if extensions are configued, before doing the interface conversion x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname) } @@ -1453,7 +1463,7 @@ func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) { // if container is map // x.line("if z.DecContainerIsMap() { ") i := x.varsfx() - x.line("if r.IsContainerType(codecSelverValueTypeMap" + x.xs + ") {") + x.line("if r.IsContainerType(codecSelferValueTypeMap" + x.xs + ") {") x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()") x.linef("if %sl%s == 0 {", genTempVarPfx, i) x.line("r.ReadEnd()") @@ -1470,7 +1480,7 @@ func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) { // else if container is array // x.line("} else if z.DecContainerIsArray() { ") - x.line("} else if r.IsContainerType(codecSelverValueTypeArray" + x.xs + ") {") + x.line("} else if r.IsContainerType(codecSelferValueTypeArray" + x.xs + ") {") x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()") x.linef("if %sl%s == 0 {", genTempVarPfx, i) x.line("r.ReadEnd()") @@ -1514,6 +1524,29 @@ func (x *genV) MethodNamePfx(prefix string, prim bool) string { } +var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1" + +// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise. +// +// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled, +// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped. +// We strip it here. +func genImportPath(t reflect.Type) (s string) { + s = t.PkgPath() + if genCheckVendor { + // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later. + // if s contains /vendor/ OR startsWith vendor/, then return everything after it. + const vendorStart = "vendor/" + const vendorInline = "/vendor/" + if i := strings.LastIndex(s, vendorInline); i >= 0 { + s = s[i+len(vendorInline):] + } else if strings.HasPrefix(s, vendorStart) { + s = s[len(vendorStart):] + } + } + return +} + func genNonPtr(t reflect.Type) reflect.Type { for t.Kind() == reflect.Ptr { t = t.Elem() @@ -1538,7 +1571,7 @@ func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) { } tstr := t.String() if tn := t.Name(); tn != "" { - if tRef != nil && t.PkgPath() == tRef.PkgPath() { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { return ptrPfx + tn } else { if genQNameRegex.MatchString(tstr) { @@ -1570,7 +1603,7 @@ func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) { if t == intfTyp { return ptrPfx + "Interface" } else { - if tRef != nil && t.PkgPath() == tRef.PkgPath() { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { if t.Name() != "" { return ptrPfx + t.Name() } else { diff --git a/_third_party/github.com/ugorji/go/codec/helper.go b/_third_party/github.com/ugorji/go/codec/helper.go index 8b76e8e48f..d04c01cddb 100644 --- a/_third_party/github.com/ugorji/go/codec/helper.go +++ b/_third_party/github.com/ugorji/go/codec/helper.go @@ -309,32 +309,41 @@ type RawExt struct { Value interface{} } -// Ext handles custom (de)serialization of custom types / extensions. -type Ext interface { +// BytesExt handles custom (de)serialization of types to/from []byte. +// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types. +type BytesExt interface { // WriteExt converts a value to a []byte. - // It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types. WriteExt(v interface{}) []byte // ReadExt updates a value from a []byte. - // It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types. ReadExt(dst interface{}, src []byte) +} +// InterfaceExt handles custom (de)serialization of types to/from another interface{} value. +// The Encoder or Decoder will then handle the further (de)serialization of that known type. +// +// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types. +type InterfaceExt interface { // ConvertExt converts a value into a simpler interface for easy encoding e.g. convert time.Time to int64. - // It is used by codecs (e.g. cbor) which use the format to do custom serialization of the types. ConvertExt(v interface{}) interface{} // UpdateExt updates a value from a simpler interface for easy decoding e.g. convert int64 to time.Time. - // It is used by codecs (e.g. cbor) which use the format to do custom serialization of the types. UpdateExt(dst interface{}, src interface{}) } -// bytesExt is a wrapper implementation to support former AddExt exported method. -type bytesExt struct { +// Ext handles custom (de)serialization of custom types / extensions. +type Ext interface { + BytesExt + InterfaceExt +} + +// addExtWrapper is a wrapper implementation to support former AddExt exported method. +type addExtWrapper struct { encFn func(reflect.Value) ([]byte, error) decFn func(reflect.Value, []byte) error } -func (x bytesExt) WriteExt(v interface{}) []byte { +func (x addExtWrapper) WriteExt(v interface{}) []byte { // fmt.Printf(">>>>>>>>>> WriteExt: %T, %v\n", v, v) bs, err := x.encFn(reflect.ValueOf(v)) if err != nil { @@ -343,21 +352,57 @@ func (x bytesExt) WriteExt(v interface{}) []byte { return bs } -func (x bytesExt) ReadExt(v interface{}, bs []byte) { +func (x addExtWrapper) ReadExt(v interface{}, bs []byte) { // fmt.Printf(">>>>>>>>>> ReadExt: %T, %v\n", v, v) if err := x.decFn(reflect.ValueOf(v), bs); err != nil { panic(err) } } -func (x bytesExt) ConvertExt(v interface{}) interface{} { +func (x addExtWrapper) ConvertExt(v interface{}) interface{} { return x.WriteExt(v) } -func (x bytesExt) UpdateExt(dest interface{}, v interface{}) { +func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) { x.ReadExt(dest, v.([]byte)) } +type setExtWrapper struct { + b BytesExt + i InterfaceExt +} + +func (x *setExtWrapper) WriteExt(v interface{}) []byte { + if x.b == nil { + panic("BytesExt.WriteExt is not supported") + } + return x.b.WriteExt(v) +} + +func (x *setExtWrapper) ReadExt(v interface{}, bs []byte) { + if x.b == nil { + panic("BytesExt.WriteExt is not supported") + + } + x.b.ReadExt(v, bs) +} + +func (x *setExtWrapper) ConvertExt(v interface{}) interface{} { + if x.i == nil { + panic("InterfaceExt.ConvertExt is not supported") + + } + return x.i.ConvertExt(v) +} + +func (x *setExtWrapper) UpdateExt(dest interface{}, v interface{}) { + if x.i == nil { + panic("InterfaceExxt.UpdateExt is not supported") + + } + x.i.UpdateExt(dest, v) +} + // type errorString string // func (x errorString) Error() string { return string(x) } @@ -412,7 +457,7 @@ type extTypeTagFn struct { type extHandle []*extTypeTagFn -// DEPRECATED: AddExt is deprecated in favor of SetExt. It exists for compatibility only. +// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead. // // AddExt registes an encode and decode function for a reflect.Type. // AddExt internally calls SetExt. @@ -424,10 +469,10 @@ func (o *extHandle) AddExt( if encfn == nil || decfn == nil { return o.SetExt(rt, uint64(tag), nil) } - return o.SetExt(rt, uint64(tag), bytesExt{encfn, decfn}) + return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn}) } -// SetExt registers a tag and Ext for a reflect.Type. +// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead. // // Note that the type must be a named type, and specifically not // a pointer or Interface. An error is returned if that is not honored. diff --git a/_third_party/github.com/ugorji/go/codec/json.go b/_third_party/github.com/ugorji/go/codec/json.go index f2f7bceb84..ee134ac667 100644 --- a/_third_party/github.com/ugorji/go/codec/json.go +++ b/_third_party/github.com/ugorji/go/codec/json.go @@ -3,8 +3,9 @@ package codec -// This json support uses base64 encoding for bytes, because you cannot +// By default, this json support uses base64 encoding for bytes, because you cannot // store and read any arbitrary string in json (only unicode). +// However, the user can configre how to encode/decode bytes. // // This library specifically supports UTF-8 for encoding and decoding only. // @@ -36,6 +37,7 @@ import ( "bytes" "encoding/base64" "fmt" + "reflect" "strconv" "unicode/utf16" "unicode/utf8" @@ -167,6 +169,7 @@ type jsonEncDriver struct { h *JsonHandle b [64]byte // scratch bs []byte // scratch + se setExtWrapper s jsonStack noBuiltInTypes } @@ -282,6 +285,11 @@ func (e *jsonEncDriver) EncodeSymbol(v string) { } func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + // if encoding raw bytes and RawBytesExt is configured, use it to encode + if c == c_RAW && e.se.i != nil { + e.EncodeExt(v, 0, &e.se, e.e) + return + } if c := e.s.sc.sep(); c != 0 { e.w.writen1(c) } @@ -476,6 +484,8 @@ type jsonDecDriver struct { wsSkipped bool // whitespace skipped + se setExtWrapper + s jsonStack n jsonNum @@ -876,6 +886,12 @@ func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxta } func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { + // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode. + if !isstring && d.se.i != nil { + bsOut = bs + d.DecodeExt(&bsOut, 0, &d.se) + return + } if c := d.s.sc.sep(); c != 0 { d.expectChar(c) } @@ -1053,7 +1069,8 @@ func (d *jsonDecDriver) DecodeNaked() (v interface{}, vt valueType, decodeFurthe // // Json is comprehensively supported: // - decodes numbers into interface{} as int, uint or float64 -// - encodes and decodes []byte using base64 Std Encoding +// - configurable way to encode/decode []byte . +// by default, encodes and decodes []byte using base64 Std Encoding // - UTF-8 support for encoding and decoding // // It has better performance than the json library in the standard library, @@ -1067,19 +1084,29 @@ func (d *jsonDecDriver) DecodeNaked() (v interface{}, vt valueType, decodeFurthe type JsonHandle struct { BasicHandle textEncodingType + // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way. + // If not configured, raw bytes are encoded to/from base64 text. + RawBytesExt InterfaceExt } func (h *JsonHandle) newEncDriver(e *Encoder) encDriver { - return &jsonEncDriver{e: e, w: e.w, h: h} + hd := jsonEncDriver{e: e, w: e.w, h: h} + hd.se.i = h.RawBytesExt + return &hd } func (h *JsonHandle) newDecDriver(d *Decoder) decDriver { // d := jsonDecDriver{r: r.(*bytesDecReader), h: h} hd := jsonDecDriver{d: d, r: d.r, h: h} + hd.se.i = h.RawBytesExt hd.n.bytes = d.b[:] return &hd } +func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{i: ext}) +} + var jsonEncodeTerminate = []byte{' '} func (h *JsonHandle) rpcEncodeTerminate() []byte { diff --git a/_third_party/github.com/ugorji/go/codec/msgpack.go b/_third_party/github.com/ugorji/go/codec/msgpack.go index 7c9f8e6782..fd5f3895d8 100644 --- a/_third_party/github.com/ugorji/go/codec/msgpack.go +++ b/_third_party/github.com/ugorji/go/codec/msgpack.go @@ -24,6 +24,7 @@ import ( "io" "math" "net/rpc" + "reflect" ) const ( @@ -726,6 +727,10 @@ func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver { return &msgpackDecDriver{d: d, r: d.r, h: h, br: d.bytes} } +func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{b: ext}) +} + //-------------------------------------------------- type msgpackSpecRpcCodec struct { diff --git a/_third_party/github.com/ugorji/go/codec/simple.go b/_third_party/github.com/ugorji/go/codec/simple.go index e73beee72c..dfc04eab0d 100644 --- a/_third_party/github.com/ugorji/go/codec/simple.go +++ b/_third_party/github.com/ugorji/go/codec/simple.go @@ -3,7 +3,10 @@ package codec -import "math" +import ( + "math" + "reflect" +) const ( _ uint8 = iota @@ -501,5 +504,9 @@ func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver { return &simpleDecDriver{d: d, r: d.r, h: h, br: d.bytes} } +func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{b: ext}) +} + var _ decDriver = (*simpleDecDriver)(nil) var _ encDriver = (*simpleEncDriver)(nil) diff --git a/_third_party/github.com/ugorji/go/codec/tests.sh b/_third_party/github.com/ugorji/go/codec/tests.sh index f14a4be96d..54b89f9a49 100755 --- a/_third_party/github.com/ugorji/go/codec/tests.sh +++ b/_third_party/github.com/ugorji/go/codec/tests.sh @@ -24,17 +24,18 @@ _run() { esac done # shift $((OPTIND-1)) - echo ">>>>>>> tags: $ztags" + printf '............. TAGS: %s .............\n' "$ztags" + # echo ">>>>>>> TAGS: $ztags" OPTIND=1 while getopts "xurtcinsvg" flag do case "x$flag" in - 'xt') echo ">>>>>>> REGULAR "; go test "-tags=$ztags" "$zverbose" ; sleep 2 ;; - 'xc') echo ">>>>>>> CANONICAL "; go test "-tags=$ztags" "$zverbose" -tc; sleep 2 ;; - 'xi') echo ">>>>>>> I/O "; go test "-tags=$ztags" "$zverbose" -ti; sleep 2 ;; - 'xn') echo ">>>>>>> NO_SYMBOLS "; go test "-tags=$ztags" "$zverbose" -tn; sleep 2 ;; - 'xs') echo ">>>>>>> TO_ARRAY "; go test "-tags=$ztags" "$zverbose" -ts; sleep 2 ;; + 'xt') printf ">>>>>>> REGULAR : "; go test "-tags=$ztags" "$zverbose" ; sleep 2 ;; + 'xc') printf ">>>>>>> CANONICAL : "; go test "-tags=$ztags" "$zverbose" -tc; sleep 2 ;; + 'xi') printf ">>>>>>> I/O : "; go test "-tags=$ztags" "$zverbose" -ti; sleep 2 ;; + 'xn') printf ">>>>>>> NO_SYMBOLS : "; go test "-tags=$ztags" "$zverbose" -tn; sleep 2 ;; + 'xs') printf ">>>>>>> TO_ARRAY : "; go test "-tags=$ztags" "$zverbose" -ts; sleep 2 ;; *) ;; esac done @@ -43,7 +44,7 @@ _run() { OPTIND=1 } -echo ">>>>>>> RUNNING VARIATIONS OF TESTS" +# echo ">>>>>>> RUNNING VARIATIONS OF TESTS" if [[ "x$@" = x ]]; then # r, x, g, gu _run "-rtcins" diff --git a/_third_party/github.com/vdobler/chart/imgg/image.go b/_third_party/github.com/vdobler/chart/imgg/image.go index fb80031d7a..a47a768e27 100644 --- a/_third_party/github.com/vdobler/chart/imgg/image.go +++ b/_third_party/github.com/vdobler/chart/imgg/image.go @@ -1,17 +1,19 @@ package imgg import ( - "bosun.org/_third_party/code.google.com/p/draw2d/draw2d" - "bosun.org/_third_party/code.google.com/p/freetype-go/freetype" - "bosun.org/_third_party/code.google.com/p/freetype-go/freetype/raster" - "bosun.org/_third_party/code.google.com/p/freetype-go/freetype/truetype" - "bosun.org/_third_party/code.google.com/p/graphics-go/graphics" - "bosun.org/_third_party/github.com/vdobler/chart" "image" "image/color" - "image/draw" "log" "math" + + "bosun.org/_third_party/github.com/golang/freetype" + "bosun.org/_third_party/github.com/golang/freetype/truetype" + "bosun.org/_third_party/github.com/llgcode/draw2d" + "bosun.org/_third_party/github.com/llgcode/draw2d/draw2dimg" + "bosun.org/_third_party/github.com/vdobler/chart" + "golang.org/x/image/draw" + "golang.org/x/image/math/f64" + "golang.org/x/image/math/fixed" ) var ( @@ -43,7 +45,7 @@ type ImageGraphics struct { // If fontsize is empty useful default are used. func New(width, height int, bgcol color.RGBA, font *truetype.Font, fontsize map[chart.FontSize]float64) *ImageGraphics { img := image.NewRGBA(image.Rect(0, 0, width, height)) - gc := draw2d.NewGraphicContext(img) + gc := draw2dimg.NewGraphicContext(img) gc.SetLineJoin(draw2d.BevelJoin) gc.SetLineCap(draw2d.SquareCap) gc.SetStrokeColor(image.Black) @@ -64,7 +66,7 @@ func New(width, height int, bgcol color.RGBA, font *truetype.Font, fontsize map[ // area starting at (x,y) on the provided image img. The rest of the parameters // are the same as in New(). func AddTo(img *image.RGBA, x, y, width, height int, bgcol color.RGBA, font *truetype.Font, fontsize map[chart.FontSize]float64) *ImageGraphics { - gc := draw2d.NewGraphicContext(img) + gc := draw2dimg.NewGraphicContext(img) gc.SetLineJoin(draw2d.BevelJoin) gc.SetLineCap(draw2d.SquareCap) gc.SetStrokeColor(image.Black) @@ -102,18 +104,13 @@ func (ig *ImageGraphics) TextLen(s string, font chart.Font) int { c.SetFont(ig.font) fontsize := ig.relFontsizeToPixel(font.Size) c.SetFontSize(fontsize) - scale := int32(fontsize * dpi * (64.0 / 72.0)) - var p raster.Point - prev, hasPrev := truetype.Index(0), false - for _, rune := range s { - index := ig.font.Index(rune) - if hasPrev { - p.X += raster.Fix32(ig.font.Kerning(scale, prev, index)) << 2 - } - p.X += raster.Fix32(ig.font.HMetric(scale, index).AdvanceWidth) << 2 - prev, hasPrev = index, true + + // really draw it + width, err := c.DrawString(s, freetype.Pt(0, 0)) + if err != nil { + return 10 * len(s) // BUG } - return int((p.X + 127) / 256) + return int(width.X+32)>>6 + 1 } func (ig *ImageGraphics) setStyle(style chart.Style) { @@ -176,7 +173,14 @@ func (ig *ImageGraphics) Text(x, y int, t string, align string, rot int, f chart align = "c" + align } - textImage := ig.textBox(t, f) + var col color.Color + if f.Color != nil { + col = f.Color + } else { + col = color.RGBA{0, 0, 0, 0xff} + } + + textImage := ig.textBox(t, f, col) bounds := textImage.Bounds() w, h := bounds.Dx(), bounds.Dy() var centerX, centerY int @@ -185,33 +189,31 @@ func (ig *ImageGraphics) Text(x, y int, t string, align string, rot int, f chart alpha := float64(rot) / 180 * math.Pi cos := math.Cos(alpha) sin := math.Sin(alpha) - hs, hc := float64(h)*sin, float64(h)*cos - ws, wc := float64(w)*sin, float64(w)*cos - W := int(math.Ceil(hs + wc)) - H := int(math.Ceil(hc + ws)) - rotated := image.NewAlpha(image.Rect(0, 0, W, H)) - graphics.Rotate(rotated, textImage, &graphics.RotateOptions{-alpha}) - textImage = rotated - centerX, centerY = W/2, H/2 - - switch align { - case "bl": - centerX, centerY = int(hs), H - case "bc": - centerX, centerY = W-int(wc/2), int(ws/2) - case "br": - centerX, centerY = W, int(hc) - case "tl": - centerX, centerY = 0, H-int(hc) - case "tc": - centerX, centerY = int(ws/2), H-int(ws/2) - case "tr": - centerX, centerY = W-int(hs), 0 - case "cl": - centerX, centerY = int(hs/2), H-int(hc/2) - case "cr": - centerX, centerY = W-int(hs/2), int(hc/2) + + ax, ay := float64(w), float64(h) // anchor point + switch align[0] { + case 'b': + case 'c': + ay /= 2 + case 't': + ay = 0 + } + switch align[1] { + case 'l': + ax = 0 + case 'c': + ax /= 2 + case 'r': } + dx := float64(ax)*cos + float64(ay)*sin + dy := -float64(ax)*sin + float64(ay)*cos + trans := f64.Aff3{ + +cos, +sin, float64(x+ig.x0) - dx, + -sin, +cos, float64(y+ig.y0) - dy, + } + draw.BiLinear.Transform(ig.Image, trans, + textImage, textImage.Bounds(), draw.Over, nil) + return } else { centerX, centerY = w/2, h/2 switch align[0] { @@ -235,61 +237,47 @@ func (ig *ImageGraphics) Text(x, y int, t string, align string, rot int, f chart x += ig.x0 y += ig.y0 - var col color.Color - if f.Color != nil { - col = f.Color - } else { - col = color.NRGBA{0, 0, 0, 0xff} - } tcol := image.NewUniform(col) - draw.DrawMask(ig.Image, image.Rect(x, y, x+w, y+h), tcol, image.ZP, textImage, textImage.Bounds().Min, draw.Over) } // textBox renders t into a tight fitting image -func (ig *ImageGraphics) textBox(t string, font chart.Font) image.Image { +func (ig *ImageGraphics) textBox(t string, font chart.Font, textCol color.Color) image.Image { // Initialize the context. - fg := image.NewUniform(color.Alpha{0xff}) - bg := image.NewUniform(color.Alpha{0x00}) + bg := image.NewUniform(color.Alpha{0}) + fg := image.NewUniform(textCol) width := ig.TextLen(t, font) size := ig.relFontsizeToPixel(font.Size) - bb := ig.font.Bounds(int32(size)) - // TODO: Ugly, manual, heuristic hack to get "nicer" text for common latin characters - bb.YMin++ - if size >= 15 { - bb.YMin++ - bb.YMax-- - } - if size >= 20 { - bb.YMax-- - } - if size >= 25 { - bb.YMin++ - bb.YMax-- - } - - dy := int(bb.YMax - bb.YMin) - canvas := image.NewAlpha(image.Rect(0, 0, width, dy)) - draw.Draw(canvas, canvas.Bounds(), bg, image.ZP, draw.Src) c := freetype.NewContext() c.SetDPI(dpi) c.SetFont(ig.font) c.SetFontSize(size) - c.SetClip(canvas.Bounds()) + bb := ig.font.Bounds(c.PointToFixed(float64(size))) + bbDelta := bb.Max.Sub(bb.Min) + + height := int(bbDelta.Y+32) >> 6 + canvas := image.NewRGBA(image.Rect(0, 0, width, height)) + draw.Draw(canvas, canvas.Bounds(), bg, image.ZP, draw.Src) c.SetDst(canvas) c.SetSrc(fg) - + c.SetClip(canvas.Bounds()) // Draw the text. - pt := freetype.Pt(0, dy+int(bb.YMin)-1) - extent, err := c.DrawString(t, pt) + extent, err := c.DrawString(t, fixed.Point26_6{X: 0, Y: bb.Max.Y}) if err != nil { log.Println(err) return nil } - // log.Printf("text %q, extent: %v", t, extent) - return canvas.SubImage(image.Rect(0, 0, int((extent.X+127)/256), dy)) + + // Ugly heuristic hack: font bounds are pretty high resulting in white top border: Trim. + topskip := 1 + if size > 15 { + topskip = 2 + } else if size > 20 { + topskip = 3 + } + return canvas.SubImage(image.Rect(0, topskip, int(extent.X)>>6, height)) } func (ig *ImageGraphics) paint(x, y int, R, G, B uint32, alpha uint32) { diff --git a/_third_party/github.com/vdobler/chart/style.go b/_third_party/github.com/vdobler/chart/style.go index 191e290ed0..2da2b7f094 100644 --- a/_third_party/github.com/vdobler/chart/style.go +++ b/_third_party/github.com/vdobler/chart/style.go @@ -74,7 +74,7 @@ func init() { type Style struct { Symbol int // 0: no symbol; any codepoint: this symbol SymbolColor color.Color // color of symbol - SymbolSize float64 // ccaling factor of symbol + SymbolSize float64 // scaling factor of symbol LineStyle LineStyle // SolidLine, DashedLine, DottedLine, .... see below LineColor color.Color // color of line LineWidth int // 0: no line, >=1 width of line in pixel @@ -100,7 +100,7 @@ func (ps PlotStyle) undefined() bool { // LineStyle describes the different types of lines. type LineStyle int -// The supported line styles +// The supported line styles. const ( SolidLine LineStyle = iota // ---------------------- DashedLine // ---- ---- ---- ---- @@ -110,14 +110,14 @@ const ( LongDotLine ) -// Font describes a font +// Font describes a font. type Font struct { Name string // "": default Size FontSize // relative size of font to default in output graphics Color color.Color // "": default, other: use this } -// FontSize is the reletive font size used in chart. Five sizes seem enough. +// FontSize is the relative font size used in chart. Five sizes seem enough. type FontSize int const ( diff --git a/_third_party/golang.org/x/crypto/bcrypt/bcrypt.go b/_third_party/golang.org/x/crypto/bcrypt/bcrypt.go index 235585a0dc..2bb0445e31 100644 --- a/_third_party/golang.org/x/crypto/bcrypt/bcrypt.go +++ b/_third_party/golang.org/x/crypto/bcrypt/bcrypt.go @@ -8,11 +8,11 @@ package bcrypt // import "bosun.org/_third_party/golang.org/x/crypto/bcrypt" // The code is a port of Provos and Mazières's C implementation. import ( - "bosun.org/_third_party/golang.org/x/crypto/blowfish" "crypto/rand" "crypto/subtle" "errors" "fmt" + "golang.org/x/crypto/blowfish" "io" "strconv" ) diff --git a/_third_party/golang.org/x/net/html/example_test.go b/_third_party/golang.org/x/net/html/example_test.go index e1a87d4dcf..0b06ed7730 100644 --- a/_third_party/golang.org/x/net/html/example_test.go +++ b/_third_party/golang.org/x/net/html/example_test.go @@ -10,7 +10,7 @@ import ( "log" "strings" - "bosun.org/_third_party/golang.org/x/net/html" + "golang.org/x/net/html" ) func ExampleParse() { diff --git a/_third_party/golang.org/x/net/html/node.go b/_third_party/golang.org/x/net/html/node.go index e6d2de2a98..26b657aec8 100644 --- a/_third_party/golang.org/x/net/html/node.go +++ b/_third_party/golang.org/x/net/html/node.go @@ -5,7 +5,7 @@ package html import ( - "bosun.org/_third_party/golang.org/x/net/html/atom" + "golang.org/x/net/html/atom" ) // A NodeType is the type of a Node. diff --git a/_third_party/golang.org/x/net/html/parse.go b/_third_party/golang.org/x/net/html/parse.go index 8ff072b3d7..be4b2bf5aa 100644 --- a/_third_party/golang.org/x/net/html/parse.go +++ b/_third_party/golang.org/x/net/html/parse.go @@ -10,7 +10,7 @@ import ( "io" "strings" - a "bosun.org/_third_party/golang.org/x/net/html/atom" + a "golang.org/x/net/html/atom" ) // A parser implements the HTML5 parsing algorithm: diff --git a/_third_party/golang.org/x/net/html/parse_test.go b/_third_party/golang.org/x/net/html/parse_test.go index 7626583c55..7e47d11be8 100644 --- a/_third_party/golang.org/x/net/html/parse_test.go +++ b/_third_party/golang.org/x/net/html/parse_test.go @@ -18,7 +18,7 @@ import ( "strings" "testing" - "bosun.org/_third_party/golang.org/x/net/html/atom" + "golang.org/x/net/html/atom" ) // readParseTest reads a single test case from r. diff --git a/_third_party/golang.org/x/net/html/token.go b/_third_party/golang.org/x/net/html/token.go index b0fce5f69c..893e272a9e 100644 --- a/_third_party/golang.org/x/net/html/token.go +++ b/_third_party/golang.org/x/net/html/token.go @@ -11,7 +11,7 @@ import ( "strconv" "strings" - "bosun.org/_third_party/golang.org/x/net/html/atom" + "golang.org/x/net/html/atom" ) // A TokenType is the type of a Token. diff --git a/_third_party/golang.org/x/net/icmp/endpoint.go b/_third_party/golang.org/x/net/icmp/endpoint.go index e51c6fb4e9..0213d1a134 100644 --- a/_third_party/golang.org/x/net/icmp/endpoint.go +++ b/_third_party/golang.org/x/net/icmp/endpoint.go @@ -10,19 +10,18 @@ import ( "syscall" "time" - "bosun.org/_third_party/golang.org/x/net/ipv4" - "bosun.org/_third_party/golang.org/x/net/ipv6" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" ) var _ net.PacketConn = &PacketConn{} -type ipc interface{} - // A PacketConn represents a packet network endpoint that uses either // ICMPv4 or ICMPv6. type PacketConn struct { - c net.PacketConn - ipc // either ipv4.PacketConn or ipv6.PacketConn + c net.PacketConn + p4 *ipv4.PacketConn + p6 *ipv6.PacketConn } func (c *PacketConn) ok() bool { return c != nil && c.c != nil } @@ -33,8 +32,7 @@ func (c *PacketConn) IPv4PacketConn() *ipv4.PacketConn { if !c.ok() { return nil } - p, _ := c.ipc.(*ipv4.PacketConn) - return p + return c.p4 } // IPv6PacketConn returns the ipv6.PacketConn of c. @@ -43,8 +41,7 @@ func (c *PacketConn) IPv6PacketConn() *ipv6.PacketConn { if !c.ok() { return nil } - p, _ := c.ipc.(*ipv6.PacketConn) - return p + return c.p6 } // ReadFrom reads an ICMP message from the connection. @@ -55,11 +52,9 @@ func (c *PacketConn) ReadFrom(b []byte) (int, net.Addr, error) { // Please be informed that ipv4.NewPacketConn enables // IP_STRIPHDR option by default on Darwin. // See golang.org/issue/9395 for futher information. - if runtime.GOOS == "darwin" { - if p, _ := c.ipc.(*ipv4.PacketConn); p != nil { - n, _, peer, err := p.ReadFrom(b) - return n, peer, err - } + if runtime.GOOS == "darwin" && c.p4 != nil { + n, _, peer, err := c.p4.ReadFrom(b) + return n, peer, err } return c.c.ReadFrom(b) } diff --git a/_third_party/golang.org/x/net/icmp/example_test.go b/_third_party/golang.org/x/net/icmp/example_test.go index ea068994b3..1df4ceccdd 100644 --- a/_third_party/golang.org/x/net/icmp/example_test.go +++ b/_third_party/golang.org/x/net/icmp/example_test.go @@ -8,13 +8,22 @@ import ( "log" "net" "os" + "runtime" - "bosun.org/_third_party/golang.org/x/net/icmp" - "bosun.org/_third_party/golang.org/x/net/internal/iana" - "bosun.org/_third_party/golang.org/x/net/ipv6" + "golang.org/x/net/icmp" + "golang.org/x/net/ipv6" ) func ExamplePacketConn_nonPrivilegedPing() { + switch runtime.GOOS { + case "darwin": + case "linux": + log.Println("you may need to adjust the net.ipv4.ping_group_range kernel state") + default: + log.Println("not supported on", runtime.GOOS) + return + } + c, err := icmp.ListenPacket("udp6", "fe80::1%en0") if err != nil { log.Fatal(err) @@ -41,7 +50,7 @@ func ExamplePacketConn_nonPrivilegedPing() { if err != nil { log.Fatal(err) } - rm, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]) + rm, err := icmp.ParseMessage(58, rb[:n]) if err != nil { log.Fatal(err) } diff --git a/_third_party/golang.org/x/net/icmp/extension_test.go b/_third_party/golang.org/x/net/icmp/extension_test.go index f8546fb893..0b3f7b9e15 100644 --- a/_third_party/golang.org/x/net/icmp/extension_test.go +++ b/_third_party/golang.org/x/net/icmp/extension_test.go @@ -9,7 +9,7 @@ import ( "reflect" "testing" - "bosun.org/_third_party/golang.org/x/net/internal/iana" + "golang.org/x/net/internal/iana" ) var marshalAndParseExtensionTests = []struct { diff --git a/_third_party/golang.org/x/net/icmp/interface.go b/_third_party/golang.org/x/net/icmp/interface.go index c691a3f147..c7bf8dd1a6 100644 --- a/_third_party/golang.org/x/net/icmp/interface.go +++ b/_third_party/golang.org/x/net/icmp/interface.go @@ -8,7 +8,7 @@ import ( "net" "strings" - "bosun.org/_third_party/golang.org/x/net/internal/iana" + "golang.org/x/net/internal/iana" ) const ( diff --git a/_third_party/golang.org/x/net/icmp/ipv4.go b/_third_party/golang.org/x/net/icmp/ipv4.go index 72309ca41b..a252d730ed 100644 --- a/_third_party/golang.org/x/net/icmp/ipv4.go +++ b/_third_party/golang.org/x/net/icmp/ipv4.go @@ -9,7 +9,7 @@ import ( "runtime" "unsafe" - "bosun.org/_third_party/golang.org/x/net/ipv4" + "golang.org/x/net/ipv4" ) // See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. diff --git a/_third_party/golang.org/x/net/icmp/ipv4_test.go b/_third_party/golang.org/x/net/icmp/ipv4_test.go index e0271a7ae7..b05c697394 100644 --- a/_third_party/golang.org/x/net/icmp/ipv4_test.go +++ b/_third_party/golang.org/x/net/icmp/ipv4_test.go @@ -10,7 +10,7 @@ import ( "runtime" "testing" - "bosun.org/_third_party/golang.org/x/net/ipv4" + "golang.org/x/net/ipv4" ) var ( diff --git a/_third_party/golang.org/x/net/icmp/ipv6.go b/_third_party/golang.org/x/net/icmp/ipv6.go index 839f39a419..fe4031a2f2 100644 --- a/_third_party/golang.org/x/net/icmp/ipv6.go +++ b/_third_party/golang.org/x/net/icmp/ipv6.go @@ -7,7 +7,7 @@ package icmp import ( "net" - "bosun.org/_third_party/golang.org/x/net/internal/iana" + "golang.org/x/net/internal/iana" ) const ipv6PseudoHeaderLen = 2*net.IPv6len + 8 diff --git a/_third_party/golang.org/x/net/icmp/listen_posix.go b/_third_party/golang.org/x/net/icmp/listen_posix.go index 710579c4a6..b9f260796e 100644 --- a/_third_party/golang.org/x/net/icmp/listen_posix.go +++ b/_third_party/golang.org/x/net/icmp/listen_posix.go @@ -12,9 +12,9 @@ import ( "runtime" "syscall" - "bosun.org/_third_party/golang.org/x/net/internal/iana" - "bosun.org/_third_party/golang.org/x/net/ipv4" - "bosun.org/_third_party/golang.org/x/net/ipv6" + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" ) const sysIP_STRIPHDR = 0x17 // for now only darwin supports this option @@ -89,9 +89,9 @@ func ListenPacket(network, address string) (*PacketConn, error) { } switch proto { case iana.ProtocolICMP: - return &PacketConn{c: c, ipc: ipv4.NewPacketConn(c)}, nil + return &PacketConn{c: c, p4: ipv4.NewPacketConn(c)}, nil case iana.ProtocolIPv6ICMP: - return &PacketConn{c: c, ipc: ipv6.NewPacketConn(c)}, nil + return &PacketConn{c: c, p6: ipv6.NewPacketConn(c)}, nil default: return &PacketConn{c: c}, nil } diff --git a/_third_party/golang.org/x/net/icmp/message.go b/_third_party/golang.org/x/net/icmp/message.go index b6d019aa73..6fd68ab070 100644 --- a/_third_party/golang.org/x/net/icmp/message.go +++ b/_third_party/golang.org/x/net/icmp/message.go @@ -18,9 +18,9 @@ import ( "net" "syscall" - "bosun.org/_third_party/golang.org/x/net/internal/iana" - "bosun.org/_third_party/golang.org/x/net/ipv4" - "bosun.org/_third_party/golang.org/x/net/ipv6" + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" ) var ( diff --git a/_third_party/golang.org/x/net/icmp/message_test.go b/_third_party/golang.org/x/net/icmp/message_test.go index 71a6efad60..5d2605f8d1 100644 --- a/_third_party/golang.org/x/net/icmp/message_test.go +++ b/_third_party/golang.org/x/net/icmp/message_test.go @@ -9,10 +9,10 @@ import ( "reflect" "testing" - "bosun.org/_third_party/golang.org/x/net/icmp" - "bosun.org/_third_party/golang.org/x/net/internal/iana" - "bosun.org/_third_party/golang.org/x/net/ipv4" - "bosun.org/_third_party/golang.org/x/net/ipv6" + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" ) var marshalAndParseMessageForIPv4Tests = []icmp.Message{ diff --git a/_third_party/golang.org/x/net/icmp/multipart.go b/_third_party/golang.org/x/net/icmp/multipart.go index eb1eb8cdeb..54ac8bc12a 100644 --- a/_third_party/golang.org/x/net/icmp/multipart.go +++ b/_third_party/golang.org/x/net/icmp/multipart.go @@ -4,7 +4,7 @@ package icmp -import "bosun.org/_third_party/golang.org/x/net/internal/iana" +import "golang.org/x/net/internal/iana" // multipartMessageBodyDataLen takes b as an original datagram and // exts as extensions, and returns a required length for message body diff --git a/_third_party/golang.org/x/net/icmp/multipart_test.go b/_third_party/golang.org/x/net/icmp/multipart_test.go index 597a676942..9248e475fa 100644 --- a/_third_party/golang.org/x/net/icmp/multipart_test.go +++ b/_third_party/golang.org/x/net/icmp/multipart_test.go @@ -10,10 +10,10 @@ import ( "reflect" "testing" - "bosun.org/_third_party/golang.org/x/net/icmp" - "bosun.org/_third_party/golang.org/x/net/internal/iana" - "bosun.org/_third_party/golang.org/x/net/ipv4" - "bosun.org/_third_party/golang.org/x/net/ipv6" + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" ) var marshalAndParseMultipartMessageForIPv4Tests = []icmp.Message{ diff --git a/_third_party/golang.org/x/net/icmp/paramprob.go b/_third_party/golang.org/x/net/icmp/paramprob.go index bac8c0ce5e..f200a7c29c 100644 --- a/_third_party/golang.org/x/net/icmp/paramprob.go +++ b/_third_party/golang.org/x/net/icmp/paramprob.go @@ -4,7 +4,7 @@ package icmp -import "bosun.org/_third_party/golang.org/x/net/internal/iana" +import "golang.org/x/net/internal/iana" // A ParamProb represents an ICMP parameter problem message body. type ParamProb struct { diff --git a/_third_party/golang.org/x/net/icmp/ping_test.go b/_third_party/golang.org/x/net/icmp/ping_test.go index beac8aecde..4ec269284f 100644 --- a/_third_party/golang.org/x/net/icmp/ping_test.go +++ b/_third_party/golang.org/x/net/icmp/ping_test.go @@ -13,11 +13,11 @@ import ( "testing" "time" - "bosun.org/_third_party/golang.org/x/net/icmp" - "bosun.org/_third_party/golang.org/x/net/internal/iana" - "bosun.org/_third_party/golang.org/x/net/internal/nettest" - "bosun.org/_third_party/golang.org/x/net/ipv4" - "bosun.org/_third_party/golang.org/x/net/ipv6" + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" ) func googleAddr(c *icmp.PacketConn, protocol int) (net.Addr, error) { @@ -115,7 +115,7 @@ func doPing(tt pingTest, seq int) error { return err } - if tt.protocol == iana.ProtocolIPv6ICMP { + if tt.network != "udp6" && tt.protocol == iana.ProtocolIPv6ICMP { var f ipv6.ICMPFilter f.SetAll(true) f.Accept(ipv6.ICMPTypeDestinationUnreachable) diff --git a/_third_party/golang.org/x/net/ipv4/control_bsd.go b/_third_party/golang.org/x/net/ipv4/control_bsd.go index a4340647c1..33d8bc8b38 100644 --- a/_third_party/golang.org/x/net/ipv4/control_bsd.go +++ b/_third_party/golang.org/x/net/ipv4/control_bsd.go @@ -11,7 +11,7 @@ import ( "syscall" "unsafe" - "bosun.org/_third_party/golang.org/x/net/internal/iana" + "golang.org/x/net/internal/iana" ) func marshalDst(b []byte, cm *ControlMessage) []byte { diff --git a/_third_party/golang.org/x/net/ipv4/control_pktinfo.go b/_third_party/golang.org/x/net/ipv4/control_pktinfo.go index 525f14210e..444782f397 100644 --- a/_third_party/golang.org/x/net/ipv4/control_pktinfo.go +++ b/_third_party/golang.org/x/net/ipv4/control_pktinfo.go @@ -10,7 +10,7 @@ import ( "syscall" "unsafe" - "bosun.org/_third_party/golang.org/x/net/internal/iana" + "golang.org/x/net/internal/iana" ) func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { diff --git a/_third_party/golang.org/x/net/ipv4/control_unix.go b/_third_party/golang.org/x/net/ipv4/control_unix.go index bbd7a1d4be..3000c52e40 100644 --- a/_third_party/golang.org/x/net/ipv4/control_unix.go +++ b/_third_party/golang.org/x/net/ipv4/control_unix.go @@ -11,7 +11,7 @@ import ( "syscall" "unsafe" - "bosun.org/_third_party/golang.org/x/net/internal/iana" + "golang.org/x/net/internal/iana" ) func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { diff --git a/_third_party/golang.org/x/net/ipv4/doc.go b/_third_party/golang.org/x/net/ipv4/doc.go index 73dbafaa57..5e7647cb81 100644 --- a/_third_party/golang.org/x/net/ipv4/doc.go +++ b/_third_party/golang.org/x/net/ipv4/doc.go @@ -42,7 +42,7 @@ // The outgoing packets will be labeled DiffServ assured forwarding // class 1 low drop precedence, known as AF11 packets. // -// if err := ipv4.NewConn(c).SetTOS(DiffServAF11); err != nil { +// if err := ipv4.NewConn(c).SetTOS(0x28); err != nil { // // error handling // } // if _, err := c.Write(data); err != nil { @@ -124,7 +124,7 @@ // // The application can also send both unicast and multicast packets. // -// p.SetTOS(DiffServCS0) +// p.SetTOS(0x0) // p.SetTTL(16) // if _, err := p.WriteTo(data, nil, src); err != nil { // // error handling diff --git a/_third_party/golang.org/x/net/ipv4/example_test.go b/_third_party/golang.org/x/net/ipv4/example_test.go index 0840904220..2fdc6c6042 100644 --- a/_third_party/golang.org/x/net/ipv4/example_test.go +++ b/_third_party/golang.org/x/net/ipv4/example_test.go @@ -12,9 +12,8 @@ import ( "runtime" "time" - "bosun.org/_third_party/golang.org/x/net/icmp" - "bosun.org/_third_party/golang.org/x/net/internal/iana" - "bosun.org/_third_party/golang.org/x/net/ipv4" + "golang.org/x/net/icmp" + "golang.org/x/net/ipv4" ) func ExampleConn_markingTCP() { @@ -32,7 +31,7 @@ func ExampleConn_markingTCP() { go func(c net.Conn) { defer c.Close() p := ipv4.NewConn(c) - if err := p.SetTOS(iana.DiffServAF11); err != nil { + if err := p.SetTOS(0x28); err != nil { // DSCP AF11 log.Fatal(err) } if err := p.SetTTL(128); err != nil { @@ -102,7 +101,7 @@ func ExamplePacketConn_tracingIPPacketRoute() { log.Fatal("no A record found") } - c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolICMP), "0.0.0.0") // ICMP for IPv4 + c, err := net.ListenPacket("ip4:1", "0.0.0.0") // ICMP for IPv4 if err != nil { log.Fatal(err) } @@ -149,7 +148,7 @@ func ExamplePacketConn_tracingIPPacketRoute() { } log.Fatal(err) } - rm, err := icmp.ParseMessage(iana.ProtocolICMP, rb[:n]) + rm, err := icmp.ParseMessage(1, rb[:n]) if err != nil { log.Fatal(err) } @@ -173,7 +172,7 @@ func ExamplePacketConn_tracingIPPacketRoute() { } func ExampleRawConn_advertisingOSPFHello() { - c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolOSPFIGP), "0.0.0.0") // OSPF for IPv4 + c, err := net.ListenPacket("ip4:89", "0.0.0.0") // OSPF for IPv4 if err != nil { log.Fatal(err) } @@ -201,10 +200,10 @@ func ExampleRawConn_advertisingOSPFHello() { iph := &ipv4.Header{ Version: ipv4.Version, Len: ipv4.HeaderLen, - TOS: iana.DiffServCS6, + TOS: 0xc0, // DSCP CS6 TotalLen: ipv4.HeaderLen + len(ospf), TTL: 1, - Protocol: iana.ProtocolOSPFIGP, + Protocol: 89, Dst: allSPFRouters.IP.To4(), } diff --git a/_third_party/golang.org/x/net/ipv4/icmp.go b/_third_party/golang.org/x/net/ipv4/icmp.go index 919fa9b9b9..dbd05cff2c 100644 --- a/_third_party/golang.org/x/net/ipv4/icmp.go +++ b/_third_party/golang.org/x/net/ipv4/icmp.go @@ -4,7 +4,7 @@ package ipv4 -import "bosun.org/_third_party/golang.org/x/net/internal/iana" +import "golang.org/x/net/internal/iana" // An ICMPType represents a type of ICMP message. type ICMPType int diff --git a/_third_party/golang.org/x/net/ipv4/icmp_test.go b/_third_party/golang.org/x/net/ipv4/icmp_test.go index b917572add..3324b54df6 100644 --- a/_third_party/golang.org/x/net/ipv4/icmp_test.go +++ b/_third_party/golang.org/x/net/ipv4/icmp_test.go @@ -10,8 +10,8 @@ import ( "runtime" "testing" - "bosun.org/_third_party/golang.org/x/net/internal/nettest" - "bosun.org/_third_party/golang.org/x/net/ipv4" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" ) var icmpStringTests = []struct { diff --git a/_third_party/golang.org/x/net/ipv4/multicast_test.go b/_third_party/golang.org/x/net/ipv4/multicast_test.go index 8b89a41860..3f0304833d 100644 --- a/_third_party/golang.org/x/net/ipv4/multicast_test.go +++ b/_third_party/golang.org/x/net/ipv4/multicast_test.go @@ -12,10 +12,10 @@ import ( "testing" "time" - "bosun.org/_third_party/golang.org/x/net/icmp" - "bosun.org/_third_party/golang.org/x/net/internal/iana" - "bosun.org/_third_party/golang.org/x/net/internal/nettest" - "bosun.org/_third_party/golang.org/x/net/ipv4" + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" ) var packetConnReadWriteMulticastUDPTests = []struct { diff --git a/_third_party/golang.org/x/net/ipv4/multicastlistener_test.go b/_third_party/golang.org/x/net/ipv4/multicastlistener_test.go index 83e2e9d877..e342bf1d90 100644 --- a/_third_party/golang.org/x/net/ipv4/multicastlistener_test.go +++ b/_third_party/golang.org/x/net/ipv4/multicastlistener_test.go @@ -9,8 +9,8 @@ import ( "runtime" "testing" - "bosun.org/_third_party/golang.org/x/net/internal/nettest" - "bosun.org/_third_party/golang.org/x/net/ipv4" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" ) var udpMultipleGroupListenerTests = []net.Addr{ diff --git a/_third_party/golang.org/x/net/ipv4/multicastsockopt_test.go b/_third_party/golang.org/x/net/ipv4/multicastsockopt_test.go index 89fe46edee..c76dbe4def 100644 --- a/_third_party/golang.org/x/net/ipv4/multicastsockopt_test.go +++ b/_third_party/golang.org/x/net/ipv4/multicastsockopt_test.go @@ -9,8 +9,8 @@ import ( "runtime" "testing" - "bosun.org/_third_party/golang.org/x/net/internal/nettest" - "bosun.org/_third_party/golang.org/x/net/ipv4" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" ) var packetConnMulticastSocketOptionTests = []struct { diff --git a/_third_party/golang.org/x/net/ipv4/readwrite_test.go b/_third_party/golang.org/x/net/ipv4/readwrite_test.go index 80cb700fbf..5e6533ef8b 100644 --- a/_third_party/golang.org/x/net/ipv4/readwrite_test.go +++ b/_third_party/golang.org/x/net/ipv4/readwrite_test.go @@ -11,8 +11,8 @@ import ( "sync" "testing" - "bosun.org/_third_party/golang.org/x/net/internal/nettest" - "bosun.org/_third_party/golang.org/x/net/ipv4" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" ) func benchmarkUDPListener() (net.PacketConn, net.Addr, error) { diff --git a/_third_party/golang.org/x/net/ipv4/sockopt_asmreq_unix.go b/_third_party/golang.org/x/net/ipv4/sockopt_asmreq_unix.go index f3a790264d..fefa901e6d 100644 --- a/_third_party/golang.org/x/net/ipv4/sockopt_asmreq_unix.go +++ b/_third_party/golang.org/x/net/ipv4/sockopt_asmreq_unix.go @@ -11,7 +11,7 @@ import ( "os" "unsafe" - "bosun.org/_third_party/golang.org/x/net/internal/iana" + "golang.org/x/net/internal/iana" ) func setsockoptIPMreq(fd, name int, ifi *net.Interface, grp net.IP) error { diff --git a/_third_party/golang.org/x/net/ipv4/sockopt_asmreq_windows.go b/_third_party/golang.org/x/net/ipv4/sockopt_asmreq_windows.go index 16c3ea2e7e..431930df75 100644 --- a/_third_party/golang.org/x/net/ipv4/sockopt_asmreq_windows.go +++ b/_third_party/golang.org/x/net/ipv4/sockopt_asmreq_windows.go @@ -10,7 +10,7 @@ import ( "syscall" "unsafe" - "bosun.org/_third_party/golang.org/x/net/internal/iana" + "golang.org/x/net/internal/iana" ) func setsockoptIPMreq(fd syscall.Handle, name int, ifi *net.Interface, grp net.IP) error { diff --git a/_third_party/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go b/_third_party/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go index 6005522d2a..92c8e34cfa 100644 --- a/_third_party/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go +++ b/_third_party/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go @@ -11,7 +11,7 @@ import ( "os" "unsafe" - "bosun.org/_third_party/golang.org/x/net/internal/iana" + "golang.org/x/net/internal/iana" ) func getsockoptIPMreqn(fd, name int) (*net.Interface, error) { diff --git a/_third_party/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go b/_third_party/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go index 16127d079f..6f647bc58a 100644 --- a/_third_party/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go +++ b/_third_party/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go @@ -11,7 +11,7 @@ import ( "os" "unsafe" - "bosun.org/_third_party/golang.org/x/net/internal/iana" + "golang.org/x/net/internal/iana" ) var freebsd32o64 bool diff --git a/_third_party/golang.org/x/net/ipv4/sockopt_unix.go b/_third_party/golang.org/x/net/ipv4/sockopt_unix.go index ca838b0d50..50cdbd81e2 100644 --- a/_third_party/golang.org/x/net/ipv4/sockopt_unix.go +++ b/_third_party/golang.org/x/net/ipv4/sockopt_unix.go @@ -11,7 +11,7 @@ import ( "os" "unsafe" - "bosun.org/_third_party/golang.org/x/net/internal/iana" + "golang.org/x/net/internal/iana" ) func getInt(fd int, opt *sockOpt) (int, error) { diff --git a/_third_party/golang.org/x/net/ipv4/sockopt_windows.go b/_third_party/golang.org/x/net/ipv4/sockopt_windows.go index 3175bfa575..c4c2441ec5 100644 --- a/_third_party/golang.org/x/net/ipv4/sockopt_windows.go +++ b/_third_party/golang.org/x/net/ipv4/sockopt_windows.go @@ -10,7 +10,7 @@ import ( "syscall" "unsafe" - "bosun.org/_third_party/golang.org/x/net/internal/iana" + "golang.org/x/net/internal/iana" ) func getInt(fd syscall.Handle, opt *sockOpt) (int, error) { diff --git a/_third_party/golang.org/x/net/ipv4/unicast_test.go b/_third_party/golang.org/x/net/ipv4/unicast_test.go index ee14fb52ad..255096a8c3 100644 --- a/_third_party/golang.org/x/net/ipv4/unicast_test.go +++ b/_third_party/golang.org/x/net/ipv4/unicast_test.go @@ -12,10 +12,10 @@ import ( "testing" "time" - "bosun.org/_third_party/golang.org/x/net/icmp" - "bosun.org/_third_party/golang.org/x/net/internal/iana" - "bosun.org/_third_party/golang.org/x/net/internal/nettest" - "bosun.org/_third_party/golang.org/x/net/ipv4" + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" ) func TestPacketConnReadWriteUnicastUDP(t *testing.T) { diff --git a/_third_party/golang.org/x/net/ipv4/unicastsockopt_test.go b/_third_party/golang.org/x/net/ipv4/unicastsockopt_test.go index 428138aded..25606f21da 100644 --- a/_third_party/golang.org/x/net/ipv4/unicastsockopt_test.go +++ b/_third_party/golang.org/x/net/ipv4/unicastsockopt_test.go @@ -9,9 +9,9 @@ import ( "runtime" "testing" - "bosun.org/_third_party/golang.org/x/net/internal/iana" - "bosun.org/_third_party/golang.org/x/net/internal/nettest" - "bosun.org/_third_party/golang.org/x/net/ipv4" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" ) func TestConnUnicastSocketOptions(t *testing.T) { diff --git a/_third_party/golang.org/x/net/ipv6/control_rfc2292_unix.go b/_third_party/golang.org/x/net/ipv6/control_rfc2292_unix.go index cc5649a160..ce201ce363 100644 --- a/_third_party/golang.org/x/net/ipv6/control_rfc2292_unix.go +++ b/_third_party/golang.org/x/net/ipv6/control_rfc2292_unix.go @@ -10,7 +10,7 @@ import ( "syscall" "unsafe" - "bosun.org/_third_party/golang.org/x/net/internal/iana" + "golang.org/x/net/internal/iana" ) func marshal2292HopLimit(b []byte, cm *ControlMessage) []byte { diff --git a/_third_party/golang.org/x/net/ipv6/control_rfc3542_unix.go b/_third_party/golang.org/x/net/ipv6/control_rfc3542_unix.go index f71bb81aa3..e55c4aa973 100644 --- a/_third_party/golang.org/x/net/ipv6/control_rfc3542_unix.go +++ b/_third_party/golang.org/x/net/ipv6/control_rfc3542_unix.go @@ -10,7 +10,7 @@ import ( "syscall" "unsafe" - "bosun.org/_third_party/golang.org/x/net/internal/iana" + "golang.org/x/net/internal/iana" ) func marshalTrafficClass(b []byte, cm *ControlMessage) []byte { diff --git a/_third_party/golang.org/x/net/ipv6/control_unix.go b/_third_party/golang.org/x/net/ipv6/control_unix.go index a562426587..2af5beb43e 100644 --- a/_third_party/golang.org/x/net/ipv6/control_unix.go +++ b/_third_party/golang.org/x/net/ipv6/control_unix.go @@ -10,7 +10,7 @@ import ( "os" "syscall" - "bosun.org/_third_party/golang.org/x/net/internal/iana" + "golang.org/x/net/internal/iana" ) func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { diff --git a/_third_party/golang.org/x/net/ipv6/doc.go b/_third_party/golang.org/x/net/ipv6/doc.go index 406c0597f8..1e089ce10f 100644 --- a/_third_party/golang.org/x/net/ipv6/doc.go +++ b/_third_party/golang.org/x/net/ipv6/doc.go @@ -42,7 +42,7 @@ // The outgoing packets will be labeled DiffServ assured forwarding // class 1 low drop precedence, known as AF11 packets. // -// if err := ipv6.NewConn(c).SetTrafficClass(DiffServAF11); err != nil { +// if err := ipv6.NewConn(c).SetTrafficClass(0x28); err != nil { // // error handling // } // if _, err := c.Write(data); err != nil { @@ -124,7 +124,7 @@ // // The application can also send both unicast and multicast packets. // -// p.SetTrafficClass(DiffServCS0) +// p.SetTrafficClass(0x0) // p.SetHopLimit(16) // if _, err := p.WriteTo(data[:n], nil, src); err != nil { // // error handling diff --git a/_third_party/golang.org/x/net/ipv6/example_test.go b/_third_party/golang.org/x/net/ipv6/example_test.go index 1b7f48a784..a2a3030c1f 100644 --- a/_third_party/golang.org/x/net/ipv6/example_test.go +++ b/_third_party/golang.org/x/net/ipv6/example_test.go @@ -11,9 +11,8 @@ import ( "os" "time" - "bosun.org/_third_party/golang.org/x/net/icmp" - "bosun.org/_third_party/golang.org/x/net/internal/iana" - "bosun.org/_third_party/golang.org/x/net/ipv6" + "golang.org/x/net/icmp" + "golang.org/x/net/ipv6" ) func ExampleConn_markingTCP() { @@ -31,7 +30,7 @@ func ExampleConn_markingTCP() { go func(c net.Conn) { defer c.Close() p := ipv6.NewConn(c) - if err := p.SetTrafficClass(iana.DiffServAF11); err != nil { + if err := p.SetTrafficClass(0x28); err != nil { // DSCP AF11 log.Fatal(err) } if err := p.SetHopLimit(128); err != nil { @@ -103,7 +102,7 @@ func ExamplePacketConn_tracingIPPacketRoute() { log.Fatal("no AAAA record found") } - c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolIPv6ICMP), "::") // ICMP for IPv6 + c, err := net.ListenPacket("ip6:58", "::") // ICMP for IPv6 if err != nil { log.Fatal(err) } @@ -156,7 +155,7 @@ func ExamplePacketConn_tracingIPPacketRoute() { } log.Fatal(err) } - rm, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]) + rm, err := icmp.ParseMessage(58, rb[:n]) if err != nil { log.Fatal(err) } @@ -178,7 +177,7 @@ func ExamplePacketConn_tracingIPPacketRoute() { } func ExamplePacketConn_advertisingOSPFHello() { - c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolOSPFIGP), "::") // OSPF for IPv6 + c, err := net.ListenPacket("ip6:89", "::") // OSPF for IPv6 if err != nil { log.Fatal(err) } @@ -205,7 +204,7 @@ func ExamplePacketConn_advertisingOSPFHello() { } cm := ipv6.ControlMessage{ - TrafficClass: iana.DiffServCS6, + TrafficClass: 0xc0, // DSCP CS6 HopLimit: 1, IfIndex: en0.Index, } diff --git a/_third_party/golang.org/x/net/ipv6/header_test.go b/_third_party/golang.org/x/net/ipv6/header_test.go index 2cc0636b53..18e0023ef2 100644 --- a/_third_party/golang.org/x/net/ipv6/header_test.go +++ b/_third_party/golang.org/x/net/ipv6/header_test.go @@ -9,8 +9,8 @@ import ( "reflect" "testing" - "bosun.org/_third_party/golang.org/x/net/internal/iana" - "bosun.org/_third_party/golang.org/x/net/ipv6" + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv6" ) var ( diff --git a/_third_party/golang.org/x/net/ipv6/icmp.go b/_third_party/golang.org/x/net/ipv6/icmp.go index 0845d4de71..a2de65a08c 100644 --- a/_third_party/golang.org/x/net/ipv6/icmp.go +++ b/_third_party/golang.org/x/net/ipv6/icmp.go @@ -4,7 +4,7 @@ package ipv6 -import "bosun.org/_third_party/golang.org/x/net/internal/iana" +import "golang.org/x/net/internal/iana" // An ICMPType represents a type of ICMP message. type ICMPType int diff --git a/_third_party/golang.org/x/net/ipv6/icmp_test.go b/_third_party/golang.org/x/net/ipv6/icmp_test.go index f40c2adf69..e192d6d8c2 100644 --- a/_third_party/golang.org/x/net/ipv6/icmp_test.go +++ b/_third_party/golang.org/x/net/ipv6/icmp_test.go @@ -10,8 +10,8 @@ import ( "runtime" "testing" - "bosun.org/_third_party/golang.org/x/net/internal/nettest" - "bosun.org/_third_party/golang.org/x/net/ipv6" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" ) var icmpStringTests = []struct { diff --git a/_third_party/golang.org/x/net/ipv6/multicast_test.go b/_third_party/golang.org/x/net/ipv6/multicast_test.go index ea7d563106..fc10ce109f 100644 --- a/_third_party/golang.org/x/net/ipv6/multicast_test.go +++ b/_third_party/golang.org/x/net/ipv6/multicast_test.go @@ -12,10 +12,10 @@ import ( "testing" "time" - "bosun.org/_third_party/golang.org/x/net/icmp" - "bosun.org/_third_party/golang.org/x/net/internal/iana" - "bosun.org/_third_party/golang.org/x/net/internal/nettest" - "bosun.org/_third_party/golang.org/x/net/ipv6" + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" ) var packetConnReadWriteMulticastUDPTests = []struct { diff --git a/_third_party/golang.org/x/net/ipv6/multicastlistener_test.go b/_third_party/golang.org/x/net/ipv6/multicastlistener_test.go index 306e9a9cde..9711f7513f 100644 --- a/_third_party/golang.org/x/net/ipv6/multicastlistener_test.go +++ b/_third_party/golang.org/x/net/ipv6/multicastlistener_test.go @@ -10,8 +10,8 @@ import ( "runtime" "testing" - "bosun.org/_third_party/golang.org/x/net/internal/nettest" - "bosun.org/_third_party/golang.org/x/net/ipv6" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" ) var udpMultipleGroupListenerTests = []net.Addr{ diff --git a/_third_party/golang.org/x/net/ipv6/multicastsockopt_test.go b/_third_party/golang.org/x/net/ipv6/multicastsockopt_test.go index b797aea588..fe0e6e1b14 100644 --- a/_third_party/golang.org/x/net/ipv6/multicastsockopt_test.go +++ b/_third_party/golang.org/x/net/ipv6/multicastsockopt_test.go @@ -9,8 +9,8 @@ import ( "runtime" "testing" - "bosun.org/_third_party/golang.org/x/net/internal/nettest" - "bosun.org/_third_party/golang.org/x/net/ipv6" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" ) var packetConnMulticastSocketOptionTests = []struct { diff --git a/_third_party/golang.org/x/net/ipv6/readwrite_test.go b/_third_party/golang.org/x/net/ipv6/readwrite_test.go index 57454efeea..ff4ea2b590 100644 --- a/_third_party/golang.org/x/net/ipv6/readwrite_test.go +++ b/_third_party/golang.org/x/net/ipv6/readwrite_test.go @@ -11,9 +11,9 @@ import ( "sync" "testing" - "bosun.org/_third_party/golang.org/x/net/internal/iana" - "bosun.org/_third_party/golang.org/x/net/internal/nettest" - "bosun.org/_third_party/golang.org/x/net/ipv6" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" ) func benchmarkUDPListener() (net.PacketConn, net.Addr, error) { diff --git a/_third_party/golang.org/x/net/ipv6/sockopt_test.go b/_third_party/golang.org/x/net/ipv6/sockopt_test.go index edc75e511a..9c21903160 100644 --- a/_third_party/golang.org/x/net/ipv6/sockopt_test.go +++ b/_third_party/golang.org/x/net/ipv6/sockopt_test.go @@ -10,9 +10,9 @@ import ( "runtime" "testing" - "bosun.org/_third_party/golang.org/x/net/internal/iana" - "bosun.org/_third_party/golang.org/x/net/internal/nettest" - "bosun.org/_third_party/golang.org/x/net/ipv6" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" ) var supportsIPv6 bool = nettest.SupportsIPv6() diff --git a/_third_party/golang.org/x/net/ipv6/sys_bsd.go b/_third_party/golang.org/x/net/ipv6/sys_bsd.go index 83a3e3d8d7..75a8863b3e 100644 --- a/_third_party/golang.org/x/net/ipv6/sys_bsd.go +++ b/_third_party/golang.org/x/net/ipv6/sys_bsd.go @@ -10,7 +10,7 @@ import ( "net" "syscall" - "bosun.org/_third_party/golang.org/x/net/internal/iana" + "golang.org/x/net/internal/iana" ) type sysSockoptLen int32 diff --git a/_third_party/golang.org/x/net/ipv6/sys_darwin.go b/_third_party/golang.org/x/net/ipv6/sys_darwin.go index 531486adeb..411fb498c8 100644 --- a/_third_party/golang.org/x/net/ipv6/sys_darwin.go +++ b/_third_party/golang.org/x/net/ipv6/sys_darwin.go @@ -9,7 +9,7 @@ import ( "syscall" "unsafe" - "bosun.org/_third_party/golang.org/x/net/internal/iana" + "golang.org/x/net/internal/iana" ) type sysSockoptLen int32 diff --git a/_third_party/golang.org/x/net/ipv6/sys_freebsd.go b/_third_party/golang.org/x/net/ipv6/sys_freebsd.go index 3acbc592cc..b68725cba6 100644 --- a/_third_party/golang.org/x/net/ipv6/sys_freebsd.go +++ b/_third_party/golang.org/x/net/ipv6/sys_freebsd.go @@ -11,7 +11,7 @@ import ( "syscall" "unsafe" - "bosun.org/_third_party/golang.org/x/net/internal/iana" + "golang.org/x/net/internal/iana" ) type sysSockoptLen int32 diff --git a/_third_party/golang.org/x/net/ipv6/sys_linux.go b/_third_party/golang.org/x/net/ipv6/sys_linux.go index fcd1f8e3ce..2fa6088d0f 100644 --- a/_third_party/golang.org/x/net/ipv6/sys_linux.go +++ b/_third_party/golang.org/x/net/ipv6/sys_linux.go @@ -9,7 +9,7 @@ import ( "syscall" "unsafe" - "bosun.org/_third_party/golang.org/x/net/internal/iana" + "golang.org/x/net/internal/iana" ) type sysSockoptLen int32 diff --git a/_third_party/golang.org/x/net/ipv6/sys_windows.go b/_third_party/golang.org/x/net/ipv6/sys_windows.go index 0bc0150181..fda875736f 100644 --- a/_third_party/golang.org/x/net/ipv6/sys_windows.go +++ b/_third_party/golang.org/x/net/ipv6/sys_windows.go @@ -8,7 +8,7 @@ import ( "net" "syscall" - "bosun.org/_third_party/golang.org/x/net/internal/iana" + "golang.org/x/net/internal/iana" ) const ( diff --git a/_third_party/golang.org/x/net/ipv6/unicast_test.go b/_third_party/golang.org/x/net/ipv6/unicast_test.go index 97f60e2325..61656983b4 100644 --- a/_third_party/golang.org/x/net/ipv6/unicast_test.go +++ b/_third_party/golang.org/x/net/ipv6/unicast_test.go @@ -12,10 +12,10 @@ import ( "testing" "time" - "bosun.org/_third_party/golang.org/x/net/icmp" - "bosun.org/_third_party/golang.org/x/net/internal/iana" - "bosun.org/_third_party/golang.org/x/net/internal/nettest" - "bosun.org/_third_party/golang.org/x/net/ipv6" + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" ) func TestPacketConnReadWriteUnicastUDP(t *testing.T) { diff --git a/_third_party/golang.org/x/net/ipv6/unicastsockopt_test.go b/_third_party/golang.org/x/net/ipv6/unicastsockopt_test.go index 72ce6b270a..7bb2e440ac 100644 --- a/_third_party/golang.org/x/net/ipv6/unicastsockopt_test.go +++ b/_third_party/golang.org/x/net/ipv6/unicastsockopt_test.go @@ -9,9 +9,9 @@ import ( "runtime" "testing" - "bosun.org/_third_party/golang.org/x/net/internal/iana" - "bosun.org/_third_party/golang.org/x/net/internal/nettest" - "bosun.org/_third_party/golang.org/x/net/ipv6" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" ) func TestConnUnicastSocketOptions(t *testing.T) { diff --git a/_third_party/golang.org/x/sys/unix/creds_test.go b/_third_party/golang.org/x/sys/unix/creds_test.go index 4c55d7d3fd..eaae7c367f 100644 --- a/_third_party/golang.org/x/sys/unix/creds_test.go +++ b/_third_party/golang.org/x/sys/unix/creds_test.go @@ -13,7 +13,7 @@ import ( "syscall" "testing" - "bosun.org/_third_party/golang.org/x/sys/unix" + "golang.org/x/sys/unix" ) // TestSCMCredentials tests the sending and receiving of credentials diff --git a/_third_party/golang.org/x/sys/unix/mmap_unix_test.go b/_third_party/golang.org/x/sys/unix/mmap_unix_test.go index 79e1733c10..18ccec05f1 100644 --- a/_third_party/golang.org/x/sys/unix/mmap_unix_test.go +++ b/_third_party/golang.org/x/sys/unix/mmap_unix_test.go @@ -9,7 +9,7 @@ package unix_test import ( "testing" - "bosun.org/_third_party/golang.org/x/sys/unix" + "golang.org/x/sys/unix" ) func TestMmap(t *testing.T) { diff --git a/_third_party/golang.org/x/sys/unix/syscall_bsd_test.go b/_third_party/golang.org/x/sys/unix/syscall_bsd_test.go index e7ba5abeea..55d8843094 100644 --- a/_third_party/golang.org/x/sys/unix/syscall_bsd_test.go +++ b/_third_party/golang.org/x/sys/unix/syscall_bsd_test.go @@ -9,7 +9,7 @@ package unix_test import ( "testing" - "bosun.org/_third_party/golang.org/x/sys/unix" + "golang.org/x/sys/unix" ) const MNT_WAIT = 1 diff --git a/_third_party/golang.org/x/sys/unix/syscall_test.go b/_third_party/golang.org/x/sys/unix/syscall_test.go index 75f200af2a..95eac92aca 100644 --- a/_third_party/golang.org/x/sys/unix/syscall_test.go +++ b/_third_party/golang.org/x/sys/unix/syscall_test.go @@ -10,7 +10,7 @@ import ( "fmt" "testing" - "bosun.org/_third_party/golang.org/x/sys/unix" + "golang.org/x/sys/unix" ) func testSetGetenv(t *testing.T, key, value string) { diff --git a/_third_party/golang.org/x/sys/unix/syscall_unix_test.go b/_third_party/golang.org/x/sys/unix/syscall_unix_test.go index 8dec398eb0..bcc79d19ca 100644 --- a/_third_party/golang.org/x/sys/unix/syscall_unix_test.go +++ b/_third_party/golang.org/x/sys/unix/syscall_unix_test.go @@ -18,7 +18,7 @@ import ( "testing" "time" - "bosun.org/_third_party/golang.org/x/sys/unix" + "golang.org/x/sys/unix" ) // Tests that below functions, structures and constants are consistent diff --git a/_third_party/golang.org/x/sys/windows/registry/registry_test.go b/_third_party/golang.org/x/sys/windows/registry/registry_test.go index 617a2d29bb..6547a45b28 100644 --- a/_third_party/golang.org/x/sys/windows/registry/registry_test.go +++ b/_third_party/golang.org/x/sys/windows/registry/registry_test.go @@ -13,8 +13,9 @@ import ( "syscall" "testing" "time" + "unsafe" - "bosun.org/_third_party/golang.org/x/sys/windows/registry" + "golang.org/x/sys/windows/registry" ) func randKeyName(prefix string) string { @@ -680,3 +681,74 @@ func TestInvalidValues(t *testing.T) { } } } + +func TestGetMUIStringValue(t *testing.T) { + if err := registry.LoadRegLoadMUIString(); err != nil { + t.Skip("regLoadMUIString not supported; skipping") + } + if err := procGetDynamicTimeZoneInformation.Find(); err != nil { + t.Skipf("%s not supported; skipping", procGetDynamicTimeZoneInformation.Name) + } + var dtzi DynamicTimezoneinformation + if _, err := GetDynamicTimeZoneInformation(&dtzi); err != nil { + t.Fatal(err) + } + tzKeyName := syscall.UTF16ToString(dtzi.TimeZoneKeyName[:]) + timezoneK, err := registry.OpenKey(registry.LOCAL_MACHINE, + `SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones\`+tzKeyName, registry.READ) + if err != nil { + t.Fatal(err) + } + defer timezoneK.Close() + + var tests = []struct { + key registry.Key + name string + want string + }{ + {timezoneK, "MUI_Std", syscall.UTF16ToString(dtzi.StandardName[:])}, + {timezoneK, "MUI_Dlt", syscall.UTF16ToString(dtzi.DaylightName[:])}, + } + + for _, test := range tests { + got, err := test.key.GetMUIStringValue(test.name) + if err != nil { + t.Error("GetMUIStringValue:", err) + } + + if got != test.want { + t.Errorf("GetMUIStringValue: %s: Got %q, want %q", test.name, got, test.want) + } + } +} + +type DynamicTimezoneinformation struct { + Bias int32 + StandardName [32]uint16 + StandardDate syscall.Systemtime + StandardBias int32 + DaylightName [32]uint16 + DaylightDate syscall.Systemtime + DaylightBias int32 + TimeZoneKeyName [128]uint16 + DynamicDaylightTimeDisabled uint8 +} + +var ( + kernel32DLL = syscall.NewLazyDLL("kernel32") + + procGetDynamicTimeZoneInformation = kernel32DLL.NewProc("GetDynamicTimeZoneInformation") +) + +func GetDynamicTimeZoneInformation(dtzi *DynamicTimezoneinformation) (rc uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetDynamicTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(dtzi)), 0, 0) + rc = uint32(r0) + if rc == 0xffffffff { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/_third_party/golang.org/x/sys/windows/registry/syscall.go b/_third_party/golang.org/x/sys/windows/registry/syscall.go index 38e573fd22..5426cae909 100644 --- a/_third_party/golang.org/x/sys/windows/registry/syscall.go +++ b/_third_party/golang.org/x/sys/windows/registry/syscall.go @@ -19,10 +19,15 @@ const ( _ERROR_NO_MORE_ITEMS syscall.Errno = 259 ) +func LoadRegLoadMUIString() error { + return procRegLoadMUIStringW.Find() +} + //sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW //sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW //sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW //sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW //sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW +//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW //sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/_third_party/golang.org/x/sys/windows/registry/value.go b/_third_party/golang.org/x/sys/windows/registry/value.go index bb45a23643..71d4e15bab 100644 --- a/_third_party/golang.org/x/sys/windows/registry/value.go +++ b/_third_party/golang.org/x/sys/windows/registry/value.go @@ -108,10 +108,65 @@ func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) if len(data) == 0 { return "", typ, nil } - u := (*[1 << 10]uint16)(unsafe.Pointer(&data[0]))[:] + u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[:] return syscall.UTF16ToString(u), typ, nil } +// GetMUIStringValue retrieves the localized string value for +// the specified value name associated with an open key k. +// If the value name doesn't exist or the localized string value +// can't be resolved, GetMUIStringValue returns ErrNotExist. +// GetMUIStringValue panics if the system doesn't support +// regLoadMUIString; use LoadRegLoadMUIString to check if +// regLoadMUIString is supported before calling this function. +func (k Key) GetMUIStringValue(name string) (string, error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return "", err + } + + buf := make([]uint16, 1024) + var buflen uint32 + var pdir *uint16 + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path + + // Try to resolve the string value using the system directory as + // a DLL search path; this assumes the string value is of the form + // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320. + + // This approach works with tzres.dll but may have to be revised + // in the future to allow callers to provide custom search paths. + + var s string + s, err = ExpandString("%SystemRoot%\\system32\\") + if err != nil { + return "", err + } + pdir, err = syscall.UTF16PtrFromString(s) + if err != nil { + return "", err + } + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed + if buflen <= uint32(len(buf)) { + break // Buffer not growing, assume race; break + } + buf = make([]uint16, buflen) + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + if err != nil { + return "", err + } + + return syscall.UTF16ToString(buf), nil +} + // ExpandString expands environment-variable strings and replaces // them with the values defined for the current user. // Use ExpandString to expand EXPAND_SZ strings. @@ -130,7 +185,7 @@ func ExpandString(value string) (string, error) { return "", err } if n <= uint32(len(r)) { - u := (*[1 << 15]uint16)(unsafe.Pointer(&r[0]))[:] + u := (*[1 << 29]uint16)(unsafe.Pointer(&r[0]))[:] return syscall.UTF16ToString(u), nil } r = make([]uint16, n) @@ -153,7 +208,7 @@ func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err err if len(data) == 0 { return nil, typ, nil } - p := (*[1 << 24]uint16)(unsafe.Pointer(&data[0]))[:len(data)/2] + p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[:len(data)/2] if len(p) == 0 { return nil, typ, nil } @@ -241,7 +296,7 @@ func (k Key) setStringValue(name string, valtype uint32, value string) error { if err != nil { return err } - buf := (*[1 << 10]byte)(unsafe.Pointer(&v[0]))[:len(v)*2] + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[:len(v)*2] return k.setValue(name, valtype, buf) } @@ -271,7 +326,7 @@ func (k Key) SetStringsValue(name string, value []string) error { ss += s + "\x00" } v := utf16.Encode([]rune(ss + "\x00")) - buf := (*[1 << 10]byte)(unsafe.Pointer(&v[0]))[:len(v)*2] + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[:len(v)*2] return k.setValue(name, MULTI_SZ, buf) } @@ -310,7 +365,6 @@ loopItems: break } if err == syscall.ERROR_MORE_DATA { - println(len(buf), l) // Double buffer size and try again. l = uint32(2 * len(buf)) buf = make([]uint16, l) diff --git a/_third_party/golang.org/x/sys/windows/registry/zsyscall_windows.go b/_third_party/golang.org/x/sys/windows/registry/zsyscall_windows.go index 2b3de633c9..9c17675a24 100644 --- a/_third_party/golang.org/x/sys/windows/registry/zsyscall_windows.go +++ b/_third_party/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -16,6 +16,7 @@ var ( procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") + procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") ) @@ -59,6 +60,14 @@ func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { return } +func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) n = uint32(r0) diff --git a/_third_party/golang.org/x/sys/windows/svc/debug/service.go b/_third_party/golang.org/x/sys/windows/svc/debug/service.go index c6e2bd3db1..d5ab94b2c7 100644 --- a/_third_party/golang.org/x/sys/windows/svc/debug/service.go +++ b/_third_party/golang.org/x/sys/windows/svc/debug/service.go @@ -13,7 +13,7 @@ import ( "os/signal" "syscall" - "bosun.org/_third_party/golang.org/x/sys/windows/svc" + "golang.org/x/sys/windows/svc" ) // Run executes service name by calling appropriate handler function. diff --git a/_third_party/golang.org/x/sys/windows/svc/event.go b/_third_party/golang.org/x/sys/windows/svc/event.go index 74571ebdb8..0508e22881 100644 --- a/_third_party/golang.org/x/sys/windows/svc/event.go +++ b/_third_party/golang.org/x/sys/windows/svc/event.go @@ -9,7 +9,7 @@ package svc import ( "errors" - "bosun.org/_third_party/golang.org/x/sys/windows" + "golang.org/x/sys/windows" ) // event represents auto-reset, initially non-signaled Windows event. diff --git a/_third_party/golang.org/x/sys/windows/svc/eventlog/install.go b/_third_party/golang.org/x/sys/windows/svc/eventlog/install.go index 0c6f724481..c76a3760a4 100644 --- a/_third_party/golang.org/x/sys/windows/svc/eventlog/install.go +++ b/_third_party/golang.org/x/sys/windows/svc/eventlog/install.go @@ -9,8 +9,8 @@ package eventlog import ( "errors" - "bosun.org/_third_party/golang.org/x/sys/windows" - "bosun.org/_third_party/golang.org/x/sys/windows/registry" + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/registry" ) const ( diff --git a/_third_party/golang.org/x/sys/windows/svc/eventlog/log.go b/_third_party/golang.org/x/sys/windows/svc/eventlog/log.go index 88e3232cb0..46e5153d02 100644 --- a/_third_party/golang.org/x/sys/windows/svc/eventlog/log.go +++ b/_third_party/golang.org/x/sys/windows/svc/eventlog/log.go @@ -12,7 +12,7 @@ import ( "errors" "syscall" - "bosun.org/_third_party/golang.org/x/sys/windows" + "golang.org/x/sys/windows" ) // Log provides access to the system log. diff --git a/_third_party/golang.org/x/sys/windows/svc/eventlog/log_test.go b/_third_party/golang.org/x/sys/windows/svc/eventlog/log_test.go index 25e42716fc..4dd8ad9e74 100644 --- a/_third_party/golang.org/x/sys/windows/svc/eventlog/log_test.go +++ b/_third_party/golang.org/x/sys/windows/svc/eventlog/log_test.go @@ -9,7 +9,7 @@ package eventlog_test import ( "testing" - "bosun.org/_third_party/golang.org/x/sys/windows/svc/eventlog" + "golang.org/x/sys/windows/svc/eventlog" ) func TestLog(t *testing.T) { diff --git a/_third_party/golang.org/x/sys/windows/svc/mgr/config.go b/_third_party/golang.org/x/sys/windows/svc/mgr/config.go index 50ebafc05c..0a6edba4f5 100644 --- a/_third_party/golang.org/x/sys/windows/svc/mgr/config.go +++ b/_third_party/golang.org/x/sys/windows/svc/mgr/config.go @@ -11,7 +11,7 @@ import ( "unicode/utf16" "unsafe" - "bosun.org/_third_party/golang.org/x/sys/windows" + "golang.org/x/sys/windows" ) const ( diff --git a/_third_party/golang.org/x/sys/windows/svc/mgr/mgr.go b/_third_party/golang.org/x/sys/windows/svc/mgr/mgr.go index d7749269b4..4d7e72ec46 100644 --- a/_third_party/golang.org/x/sys/windows/svc/mgr/mgr.go +++ b/_third_party/golang.org/x/sys/windows/svc/mgr/mgr.go @@ -15,7 +15,7 @@ import ( "syscall" "unicode/utf16" - "bosun.org/_third_party/golang.org/x/sys/windows" + "golang.org/x/sys/windows" ) // Mgr is used to manage Windows service. diff --git a/_third_party/golang.org/x/sys/windows/svc/mgr/mgr_test.go b/_third_party/golang.org/x/sys/windows/svc/mgr/mgr_test.go index 4876e4548b..78be970c05 100644 --- a/_third_party/golang.org/x/sys/windows/svc/mgr/mgr_test.go +++ b/_third_party/golang.org/x/sys/windows/svc/mgr/mgr_test.go @@ -15,7 +15,7 @@ import ( "testing" "time" - "bosun.org/_third_party/golang.org/x/sys/windows/svc/mgr" + "golang.org/x/sys/windows/svc/mgr" ) func TestOpenLanManServer(t *testing.T) { diff --git a/_third_party/golang.org/x/sys/windows/svc/mgr/service.go b/_third_party/golang.org/x/sys/windows/svc/mgr/service.go index c8994e1bee..465f3c3d23 100644 --- a/_third_party/golang.org/x/sys/windows/svc/mgr/service.go +++ b/_third_party/golang.org/x/sys/windows/svc/mgr/service.go @@ -9,8 +9,8 @@ package mgr import ( "syscall" - "bosun.org/_third_party/golang.org/x/sys/windows" - "bosun.org/_third_party/golang.org/x/sys/windows/svc" + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/svc" ) // TODO(brainman): Use EnumDependentServices to enumerate dependent services. diff --git a/_third_party/golang.org/x/sys/windows/svc/security.go b/_third_party/golang.org/x/sys/windows/svc/security.go index 08cb78e0e0..6fbc9236ed 100644 --- a/_third_party/golang.org/x/sys/windows/svc/security.go +++ b/_third_party/golang.org/x/sys/windows/svc/security.go @@ -9,7 +9,7 @@ package svc import ( "unsafe" - "bosun.org/_third_party/golang.org/x/sys/windows" + "golang.org/x/sys/windows" ) func allocSid(subAuth0 uint32) (*windows.SID, error) { diff --git a/_third_party/golang.org/x/sys/windows/svc/service.go b/_third_party/golang.org/x/sys/windows/svc/service.go index 6699fe82f7..9864f7a72f 100644 --- a/_third_party/golang.org/x/sys/windows/svc/service.go +++ b/_third_party/golang.org/x/sys/windows/svc/service.go @@ -14,7 +14,7 @@ import ( "syscall" "unsafe" - "bosun.org/_third_party/golang.org/x/sys/windows" + "golang.org/x/sys/windows" ) // State describes service execution state (Stopped, Running and so on). diff --git a/_third_party/golang.org/x/sys/windows/svc/svc_test.go b/_third_party/golang.org/x/sys/windows/svc/svc_test.go index 070cd9f9a3..764da54a54 100644 --- a/_third_party/golang.org/x/sys/windows/svc/svc_test.go +++ b/_third_party/golang.org/x/sys/windows/svc/svc_test.go @@ -14,8 +14,8 @@ import ( "testing" "time" - "bosun.org/_third_party/golang.org/x/sys/windows/svc" - "bosun.org/_third_party/golang.org/x/sys/windows/svc/mgr" + "golang.org/x/sys/windows/svc" + "golang.org/x/sys/windows/svc/mgr" ) func getState(t *testing.T, s *mgr.Service) svc.State { diff --git a/_third_party/golang.org/x/sys/windows/syscall_test.go b/_third_party/golang.org/x/sys/windows/syscall_test.go index 86d2e4823a..62588b91bb 100644 --- a/_third_party/golang.org/x/sys/windows/syscall_test.go +++ b/_third_party/golang.org/x/sys/windows/syscall_test.go @@ -9,7 +9,7 @@ package windows_test import ( "testing" - "bosun.org/_third_party/golang.org/x/sys/windows" + "golang.org/x/sys/windows" ) func testSetGetenv(t *testing.T, key, value string) { diff --git a/_third_party/golang.org/x/sys/windows/syscall_windows_test.go b/_third_party/golang.org/x/sys/windows/syscall_windows_test.go index 2adaa86efb..0f73c11ba4 100644 --- a/_third_party/golang.org/x/sys/windows/syscall_windows_test.go +++ b/_third_party/golang.org/x/sys/windows/syscall_windows_test.go @@ -12,7 +12,7 @@ import ( "testing" "unsafe" - "bosun.org/_third_party/golang.org/x/sys/windows" + "golang.org/x/sys/windows" ) func TestWin32finddata(t *testing.T) { diff --git a/_third_party/gopkg.in/yaml.v1/decode_test.go b/_third_party/gopkg.in/yaml.v1/decode_test.go index e649dd84d6..ef3d37fb30 100644 --- a/_third_party/gopkg.in/yaml.v1/decode_test.go +++ b/_third_party/gopkg.in/yaml.v1/decode_test.go @@ -1,8 +1,8 @@ package yaml_test import ( - . "bosun.org/_third_party/gopkg.in/check.v1" - "bosun.org/_third_party/gopkg.in/yaml.v1" + . "gopkg.in/check.v1" + "gopkg.in/yaml.v1" "math" "reflect" "strings" diff --git a/_third_party/gopkg.in/yaml.v1/encode_test.go b/_third_party/gopkg.in/yaml.v1/encode_test.go index 4d25f48ee5..c9febc22a4 100644 --- a/_third_party/gopkg.in/yaml.v1/encode_test.go +++ b/_third_party/gopkg.in/yaml.v1/encode_test.go @@ -7,8 +7,8 @@ import ( "strings" "time" - . "bosun.org/_third_party/gopkg.in/check.v1" - "bosun.org/_third_party/gopkg.in/yaml.v1" + . "gopkg.in/check.v1" + "gopkg.in/yaml.v1" ) var marshalIntTest = 123 diff --git a/_third_party/gopkg.in/yaml.v1/suite_test.go b/_third_party/gopkg.in/yaml.v1/suite_test.go index 3be8e1fe86..c5cf1ed4f6 100644 --- a/_third_party/gopkg.in/yaml.v1/suite_test.go +++ b/_third_party/gopkg.in/yaml.v1/suite_test.go @@ -1,7 +1,7 @@ package yaml_test import ( - . "bosun.org/_third_party/gopkg.in/check.v1" + . "gopkg.in/check.v1" "testing" ) From e6b11a4e0c0ef116accf111738fc6b924037fb2e Mon Sep 17 00:00:00 2001 From: Craig Peterson Date: Fri, 2 Oct 2015 13:44:51 -0600 Subject: [PATCH 2/2] a bunch more dependencies --- .../github.com/PuerkitoBio/goquery/array.go | 2 +- .../github.com/PuerkitoBio/goquery/expand.go | 2 +- .../github.com/PuerkitoBio/goquery/filter.go | 2 +- .../PuerkitoBio/goquery/iteration_test.go | 2 +- .../PuerkitoBio/goquery/manipulation.go | 2 +- .../PuerkitoBio/goquery/property.go | 2 +- .../github.com/PuerkitoBio/goquery/query.go | 2 +- .../PuerkitoBio/goquery/traversal.go | 2 +- .../github.com/PuerkitoBio/goquery/type.go | 2 +- .../PuerkitoBio/goquery/type_test.go | 2 +- .../PuerkitoBio/goquery/utilities.go | 2 +- .../andybalholm/cascadia/benchmark_test.go | 2 +- .../github.com/andybalholm/cascadia/parser.go | 2 +- .../andybalholm/cascadia/selector.go | 2 +- .../andybalholm/cascadia/selector_test.go | 2 +- .../aymerick/douceur/inliner/inliner.go | 2 +- .../boltdb/bolt/bolt_unix_solaris.go | 2 +- .../github.com/golang/freetype/freetype.go | 4 +- .../github.com/golang/freetype/raster/geom.go | 2 +- .../golang/freetype/raster/raster.go | 2 +- .../golang/freetype/raster/stroke.go | 2 +- .../golang/freetype/truetype/face.go | 4 +- .../golang/freetype/truetype/face_test.go | 4 +- .../golang/freetype/truetype/glyph.go | 4 +- .../golang/freetype/truetype/hint.go | 2 +- .../golang/freetype/truetype/hint_test.go | 2 +- .../golang/freetype/truetype/truetype.go | 2 +- .../golang/freetype/truetype/truetype_test.go | 4 +- .../go-msgpack/codec/ext_dep_test.go | 2 +- .../github.com/influxdb/influxdb/CHANGELOG.md | 1705 +++++ .../influxdb/influxdb/CONTRIBUTING.md | 247 + .../github.com/influxdb/influxdb/DOCKER.md | 44 + .../github.com/influxdb/influxdb/Dockerfile | 24 + .../influxdb/Dockerfile_test_ubuntu32 | 12 + .../github.com/influxdb/influxdb/LICENSE | 20 + .../influxdb/LICENSE_OF_DEPENDENCIES.md | 19 + .../github.com/influxdb/influxdb/QUERIES.md | 180 + .../github.com/influxdb/influxdb/README.md | 72 + .../github.com/influxdb/influxdb/balancer.go | 78 + .../influxdb/influxdb/balancer_test.go | 115 + .../influxdb/influxdb/build-docker.sh | 9 + .../influxdb/influxdb/circle-test.sh | 95 + .../github.com/influxdb/influxdb/circle.yml | 16 + .../influxdb/influxdb/client/README.md | 4 +- .../influxdb/influxdb/client/influxdb.go | 30 +- .../github.com/influxdb/influxdb/errors.go | 82 + .../influxdb/influxdb/influxql/ast.go | 383 +- .../influxdb/influxdb/influxql/ast_test.go | 2 +- .../influxdb/influxdb/influxql/parser.go | 115 +- .../influxdb/influxdb/influxql/parser_test.go | 312 +- .../influxdb/influxdb/influxql/result.go | 67 +- .../influxdb/influxdb/influxql/scanner.go | 2 + .../influxdb/influxql/scanner_test.go | 1 + .../influxdb/influxdb/influxql/token.go | 4 + .../github.com/influxdb/influxdb/influxvar.go | 45 + .../github.com/influxdb/influxdb/meta/data.go | 86 +- .../influxdb/influxdb/meta/data_test.go | 60 +- .../influxdb/influxdb/meta/errors.go | 62 +- .../influxdb/meta/internal/meta.pb.go | 31 +- .../influxdb/meta/internal/meta.proto | 9 +- .../influxdb/influxdb/meta/rpc_test.go | 2 +- .../influxdb/meta/statement_executor.go | 74 +- .../influxdb/meta/statement_executor_test.go | 84 +- .../influxdb/influxdb/meta/store.go | 63 +- .../influxdb/influxdb/meta/store_test.go | 99 +- .../influxdb/influxdb/models/points.go | 1340 ++++ .../influxdb/influxdb/models/points_test.go | 1421 ++++ .../influxdb/influxdb/models/rows.go | 59 + .../github.com/influxdb/influxdb/nightly.sh | 17 + .../github.com/influxdb/influxdb/package.sh | 624 ++ .../influxdb/influxdb/pkg/escape/bytes.go | 45 + .../influxdb/influxdb/pkg/escape/strings.go | 34 + .../influxdb/influxdb/pkg/slices/strings.go | 37 + .../influxdb/influxdb/test-32bit-docker.sh | 4 + .../influxdb/influxdb/tsdb/batcher.go | 25 +- .../influxdb/influxdb/tsdb/batcher_test.go | 42 +- .../influxdb/influxdb/tsdb/config.go | 18 +- .../influxdb/influxdb/tsdb/cursor.go | 319 +- .../influxdb/influxdb/tsdb/cursor_test.go | 244 +- .../influxdb/influxdb/tsdb/engine.go | 15 +- .../influxdb/influxdb/tsdb/executor.go | 342 +- .../influxdb/influxdb/tsdb/executor_test.go | 324 +- .../influxdb/influxdb/tsdb/functions.go | 1722 +++++ .../influxdb/influxdb/tsdb/functions_test.go | 862 +++ .../influxdb/influxdb/tsdb/mapper.go | 1220 ++- .../influxdb/influxdb/tsdb/mapper_test.go | 114 +- .../github.com/influxdb/influxdb/tsdb/meta.go | 264 +- .../influxdb/influxdb/tsdb/query_executor.go | 200 +- .../influxdb/tsdb/query_executor_test.go | 41 +- .../influxdb/influxdb/tsdb/shard.go | 61 +- .../influxdb/influxdb/tsdb/shard_test.go | 31 +- .../influxdb/tsdb/show_measurements.go | 69 +- .../influxdb/influxdb/tsdb/show_tag_keys.go | 315 + .../influxdb/influxdb/tsdb/store.go | 65 +- .../influxdb/influxdb/tsdb/store_test.go | 9 +- .../github.com/jordan-wright/email/email.go | 204 +- .../llgcode/draw2d/draw2dimg/ftgc.go | 4 +- .../llgcode/draw2d/draw2dimg/ftpath.go | 2 +- .../llgcode/draw2d/draw2dimg/text.go | 2 +- .../siddontang/go/bson/bson_test.go | 4 +- .../tatsushid/go-fastping/fastping.go | 6 +- .../github.com/vdobler/chart/imgg/image.go | 6 +- .../golang.org/x/crypto/bcrypt/bcrypt.go | 2 +- _third_party/golang.org/x/image/draw/draw.go | 79 + .../golang.org/x/image/draw/example_test.go | 118 + _third_party/golang.org/x/image/draw/gen.go | 1403 ++++ _third_party/golang.org/x/image/draw/impl.go | 6668 +++++++++++++++++ _third_party/golang.org/x/image/draw/scale.go | 527 ++ .../golang.org/x/image/draw/scale_test.go | 731 ++ .../golang.org/x/image/draw/stdlib_test.go | 96 + _third_party/golang.org/x/image/font/font.go | 202 + .../golang.org/x/image/math/f64/f64.go | 37 + .../golang.org/x/image/math/fixed/fixed.go | 172 + .../x/image/math/fixed/fixed_test.go | 25 + .../golang.org/x/net/html/example_test.go | 2 +- _third_party/golang.org/x/net/html/node.go | 2 +- _third_party/golang.org/x/net/html/parse.go | 2 +- .../golang.org/x/net/html/parse_test.go | 2 +- _third_party/golang.org/x/net/html/token.go | 2 +- .../golang.org/x/net/icmp/endpoint.go | 4 +- .../golang.org/x/net/icmp/example_test.go | 4 +- .../golang.org/x/net/icmp/extension_test.go | 2 +- .../golang.org/x/net/icmp/interface.go | 2 +- _third_party/golang.org/x/net/icmp/ipv4.go | 2 +- .../golang.org/x/net/icmp/ipv4_test.go | 2 +- _third_party/golang.org/x/net/icmp/ipv6.go | 2 +- .../golang.org/x/net/icmp/listen_posix.go | 6 +- _third_party/golang.org/x/net/icmp/message.go | 6 +- .../golang.org/x/net/icmp/message_test.go | 8 +- .../golang.org/x/net/icmp/multipart.go | 2 +- .../golang.org/x/net/icmp/multipart_test.go | 8 +- .../golang.org/x/net/icmp/paramprob.go | 2 +- .../golang.org/x/net/icmp/ping_test.go | 10 +- .../golang.org/x/net/ipv4/control_bsd.go | 2 +- .../golang.org/x/net/ipv4/control_pktinfo.go | 2 +- .../golang.org/x/net/ipv4/control_unix.go | 2 +- .../golang.org/x/net/ipv4/example_test.go | 4 +- _third_party/golang.org/x/net/ipv4/icmp.go | 2 +- .../golang.org/x/net/ipv4/icmp_test.go | 4 +- .../golang.org/x/net/ipv4/multicast_test.go | 8 +- .../x/net/ipv4/multicastlistener_test.go | 4 +- .../x/net/ipv4/multicastsockopt_test.go | 4 +- .../golang.org/x/net/ipv4/readwrite_test.go | 4 +- .../x/net/ipv4/sockopt_asmreq_unix.go | 2 +- .../x/net/ipv4/sockopt_asmreq_windows.go | 2 +- .../x/net/ipv4/sockopt_asmreqn_unix.go | 2 +- .../x/net/ipv4/sockopt_ssmreq_unix.go | 2 +- .../golang.org/x/net/ipv4/sockopt_unix.go | 2 +- .../golang.org/x/net/ipv4/sockopt_windows.go | 2 +- .../golang.org/x/net/ipv4/unicast_test.go | 8 +- .../x/net/ipv4/unicastsockopt_test.go | 6 +- .../x/net/ipv6/control_rfc2292_unix.go | 2 +- .../x/net/ipv6/control_rfc3542_unix.go | 2 +- .../golang.org/x/net/ipv6/control_unix.go | 2 +- .../golang.org/x/net/ipv6/example_test.go | 4 +- .../golang.org/x/net/ipv6/header_test.go | 4 +- _third_party/golang.org/x/net/ipv6/icmp.go | 2 +- .../golang.org/x/net/ipv6/icmp_test.go | 4 +- .../golang.org/x/net/ipv6/multicast_test.go | 8 +- .../x/net/ipv6/multicastlistener_test.go | 4 +- .../x/net/ipv6/multicastsockopt_test.go | 4 +- .../golang.org/x/net/ipv6/readwrite_test.go | 6 +- .../golang.org/x/net/ipv6/sockopt_test.go | 6 +- _third_party/golang.org/x/net/ipv6/sys_bsd.go | 2 +- .../golang.org/x/net/ipv6/sys_darwin.go | 2 +- .../golang.org/x/net/ipv6/sys_freebsd.go | 2 +- .../golang.org/x/net/ipv6/sys_linux.go | 2 +- .../golang.org/x/net/ipv6/sys_windows.go | 2 +- .../golang.org/x/net/ipv6/unicast_test.go | 8 +- .../x/net/ipv6/unicastsockopt_test.go | 6 +- .../golang.org/x/sys/unix/creds_test.go | 2 +- .../golang.org/x/sys/unix/mmap_unix_test.go | 2 +- .../golang.org/x/sys/unix/syscall_bsd_test.go | 2 +- .../golang.org/x/sys/unix/syscall_test.go | 2 +- .../x/sys/unix/syscall_unix_test.go | 2 +- .../x/sys/windows/registry/registry_test.go | 2 +- .../x/sys/windows/svc/debug/service.go | 2 +- .../golang.org/x/sys/windows/svc/event.go | 2 +- .../x/sys/windows/svc/eventlog/install.go | 4 +- .../x/sys/windows/svc/eventlog/log.go | 2 +- .../x/sys/windows/svc/eventlog/log_test.go | 2 +- .../x/sys/windows/svc/mgr/config.go | 2 +- .../golang.org/x/sys/windows/svc/mgr/mgr.go | 2 +- .../x/sys/windows/svc/mgr/mgr_test.go | 2 +- .../x/sys/windows/svc/mgr/service.go | 4 +- .../golang.org/x/sys/windows/svc/security.go | 2 +- .../golang.org/x/sys/windows/svc/service.go | 2 +- .../golang.org/x/sys/windows/svc/svc_test.go | 4 +- .../golang.org/x/sys/windows/syscall_test.go | 2 +- .../x/sys/windows/syscall_windows_test.go | 2 +- _third_party/gopkg.in/yaml.v1/decode_test.go | 4 +- _third_party/gopkg.in/yaml.v1/encode_test.go | 4 +- _third_party/gopkg.in/yaml.v1/suite_test.go | 2 +- cmd/bosun/expr/influx.go | 5 +- 194 files changed, 22971 insertions(+), 1786 deletions(-) create mode 100644 _third_party/github.com/influxdb/influxdb/CHANGELOG.md create mode 100644 _third_party/github.com/influxdb/influxdb/CONTRIBUTING.md create mode 100644 _third_party/github.com/influxdb/influxdb/DOCKER.md create mode 100644 _third_party/github.com/influxdb/influxdb/Dockerfile create mode 100644 _third_party/github.com/influxdb/influxdb/Dockerfile_test_ubuntu32 create mode 100644 _third_party/github.com/influxdb/influxdb/LICENSE create mode 100644 _third_party/github.com/influxdb/influxdb/LICENSE_OF_DEPENDENCIES.md create mode 100644 _third_party/github.com/influxdb/influxdb/QUERIES.md create mode 100644 _third_party/github.com/influxdb/influxdb/README.md create mode 100644 _third_party/github.com/influxdb/influxdb/balancer.go create mode 100644 _third_party/github.com/influxdb/influxdb/balancer_test.go create mode 100755 _third_party/github.com/influxdb/influxdb/build-docker.sh create mode 100755 _third_party/github.com/influxdb/influxdb/circle-test.sh create mode 100644 _third_party/github.com/influxdb/influxdb/circle.yml create mode 100644 _third_party/github.com/influxdb/influxdb/errors.go create mode 100644 _third_party/github.com/influxdb/influxdb/influxvar.go create mode 100644 _third_party/github.com/influxdb/influxdb/models/points.go create mode 100644 _third_party/github.com/influxdb/influxdb/models/points_test.go create mode 100644 _third_party/github.com/influxdb/influxdb/models/rows.go create mode 100755 _third_party/github.com/influxdb/influxdb/nightly.sh create mode 100755 _third_party/github.com/influxdb/influxdb/package.sh create mode 100644 _third_party/github.com/influxdb/influxdb/pkg/escape/bytes.go create mode 100644 _third_party/github.com/influxdb/influxdb/pkg/escape/strings.go create mode 100644 _third_party/github.com/influxdb/influxdb/pkg/slices/strings.go create mode 100755 _third_party/github.com/influxdb/influxdb/test-32bit-docker.sh create mode 100644 _third_party/github.com/influxdb/influxdb/tsdb/functions.go create mode 100644 _third_party/github.com/influxdb/influxdb/tsdb/functions_test.go create mode 100644 _third_party/github.com/influxdb/influxdb/tsdb/show_tag_keys.go create mode 100644 _third_party/golang.org/x/image/draw/draw.go create mode 100644 _third_party/golang.org/x/image/draw/example_test.go create mode 100644 _third_party/golang.org/x/image/draw/gen.go create mode 100644 _third_party/golang.org/x/image/draw/impl.go create mode 100644 _third_party/golang.org/x/image/draw/scale.go create mode 100644 _third_party/golang.org/x/image/draw/scale_test.go create mode 100644 _third_party/golang.org/x/image/draw/stdlib_test.go create mode 100644 _third_party/golang.org/x/image/font/font.go create mode 100644 _third_party/golang.org/x/image/math/f64/f64.go create mode 100644 _third_party/golang.org/x/image/math/fixed/fixed.go create mode 100644 _third_party/golang.org/x/image/math/fixed/fixed_test.go diff --git a/_third_party/github.com/PuerkitoBio/goquery/array.go b/_third_party/github.com/PuerkitoBio/goquery/array.go index d7af5eee1c..78646539a5 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/array.go +++ b/_third_party/github.com/PuerkitoBio/goquery/array.go @@ -1,7 +1,7 @@ package goquery import ( - "golang.org/x/net/html" + "bosun.org/_third_party/golang.org/x/net/html" ) // First reduces the set of matched elements to the first in the set. diff --git a/_third_party/github.com/PuerkitoBio/goquery/expand.go b/_third_party/github.com/PuerkitoBio/goquery/expand.go index 286ee284ab..c42c75e58b 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/expand.go +++ b/_third_party/github.com/PuerkitoBio/goquery/expand.go @@ -2,7 +2,7 @@ package goquery import ( "bosun.org/_third_party/github.com/andybalholm/cascadia" - "golang.org/x/net/html" + "bosun.org/_third_party/golang.org/x/net/html" ) // Add adds the selector string's matching nodes to those in the current diff --git a/_third_party/github.com/PuerkitoBio/goquery/filter.go b/_third_party/github.com/PuerkitoBio/goquery/filter.go index 75da9c32c4..5b4da99efe 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/filter.go +++ b/_third_party/github.com/PuerkitoBio/goquery/filter.go @@ -2,7 +2,7 @@ package goquery import ( "bosun.org/_third_party/github.com/andybalholm/cascadia" - "golang.org/x/net/html" + "bosun.org/_third_party/golang.org/x/net/html" ) // Filter reduces the set of matched elements to those that match the selector string. diff --git a/_third_party/github.com/PuerkitoBio/goquery/iteration_test.go b/_third_party/github.com/PuerkitoBio/goquery/iteration_test.go index 9b6aafb7b6..33434047db 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/iteration_test.go +++ b/_third_party/github.com/PuerkitoBio/goquery/iteration_test.go @@ -3,7 +3,7 @@ package goquery import ( "testing" - "golang.org/x/net/html" + "bosun.org/_third_party/golang.org/x/net/html" ) func TestEach(t *testing.T) { diff --git a/_third_party/github.com/PuerkitoBio/goquery/manipulation.go b/_third_party/github.com/PuerkitoBio/goquery/manipulation.go index e673334f4e..8b278f0d49 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/manipulation.go +++ b/_third_party/github.com/PuerkitoBio/goquery/manipulation.go @@ -4,7 +4,7 @@ import ( "strings" "bosun.org/_third_party/github.com/andybalholm/cascadia" - "golang.org/x/net/html" + "bosun.org/_third_party/golang.org/x/net/html" ) // After applies the selector from the root document and inserts the matched elements diff --git a/_third_party/github.com/PuerkitoBio/goquery/property.go b/_third_party/github.com/PuerkitoBio/goquery/property.go index af3a9eacb5..ca8641a84c 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/property.go +++ b/_third_party/github.com/PuerkitoBio/goquery/property.go @@ -5,7 +5,7 @@ import ( "regexp" "strings" - "golang.org/x/net/html" + "bosun.org/_third_party/golang.org/x/net/html" ) var rxClassTrim = regexp.MustCompile("[\t\r\n]") diff --git a/_third_party/github.com/PuerkitoBio/goquery/query.go b/_third_party/github.com/PuerkitoBio/goquery/query.go index d16e323341..98b81dccfe 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/query.go +++ b/_third_party/github.com/PuerkitoBio/goquery/query.go @@ -2,7 +2,7 @@ package goquery import ( "bosun.org/_third_party/github.com/andybalholm/cascadia" - "golang.org/x/net/html" + "bosun.org/_third_party/golang.org/x/net/html" ) // Is checks the current matched set of elements against a selector and diff --git a/_third_party/github.com/PuerkitoBio/goquery/traversal.go b/_third_party/github.com/PuerkitoBio/goquery/traversal.go index f039755cd5..72f645ae28 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/traversal.go +++ b/_third_party/github.com/PuerkitoBio/goquery/traversal.go @@ -2,7 +2,7 @@ package goquery import ( "bosun.org/_third_party/github.com/andybalholm/cascadia" - "golang.org/x/net/html" + "bosun.org/_third_party/golang.org/x/net/html" ) type siblingType int diff --git a/_third_party/github.com/PuerkitoBio/goquery/type.go b/_third_party/github.com/PuerkitoBio/goquery/type.go index 2f871204e2..880a78cfc9 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/type.go +++ b/_third_party/github.com/PuerkitoBio/goquery/type.go @@ -6,7 +6,7 @@ import ( "net/http" "net/url" - "golang.org/x/net/html" + "bosun.org/_third_party/golang.org/x/net/html" ) // Document represents an HTML document to be manipulated. Unlike jQuery, which diff --git a/_third_party/github.com/PuerkitoBio/goquery/type_test.go b/_third_party/github.com/PuerkitoBio/goquery/type_test.go index 98ee3a64d1..36b142bdfd 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/type_test.go +++ b/_third_party/github.com/PuerkitoBio/goquery/type_test.go @@ -6,7 +6,7 @@ import ( "os" "testing" - "golang.org/x/net/html" + "bosun.org/_third_party/golang.org/x/net/html" ) // Test helper functions and members diff --git a/_third_party/github.com/PuerkitoBio/goquery/utilities.go b/_third_party/github.com/PuerkitoBio/goquery/utilities.go index aa509e6ab3..29f8ec4abd 100644 --- a/_third_party/github.com/PuerkitoBio/goquery/utilities.go +++ b/_third_party/github.com/PuerkitoBio/goquery/utilities.go @@ -1,7 +1,7 @@ package goquery import ( - "golang.org/x/net/html" + "bosun.org/_third_party/golang.org/x/net/html" ) func getChildren(n *html.Node) (result []*html.Node) { diff --git a/_third_party/github.com/andybalholm/cascadia/benchmark_test.go b/_third_party/github.com/andybalholm/cascadia/benchmark_test.go index 42bf50006b..df1c4c2bf6 100644 --- a/_third_party/github.com/andybalholm/cascadia/benchmark_test.go +++ b/_third_party/github.com/andybalholm/cascadia/benchmark_test.go @@ -4,7 +4,7 @@ import ( "strings" "testing" - "golang.org/x/net/html" + "bosun.org/_third_party/golang.org/x/net/html" ) func MustParseHTML(doc string) *html.Node { diff --git a/_third_party/github.com/andybalholm/cascadia/parser.go b/_third_party/github.com/andybalholm/cascadia/parser.go index 42af28c5bb..3973f959b6 100644 --- a/_third_party/github.com/andybalholm/cascadia/parser.go +++ b/_third_party/github.com/andybalholm/cascadia/parser.go @@ -8,7 +8,7 @@ import ( "strconv" "strings" - "golang.org/x/net/html" + "bosun.org/_third_party/golang.org/x/net/html" ) // a parser for CSS selectors diff --git a/_third_party/github.com/andybalholm/cascadia/selector.go b/_third_party/github.com/andybalholm/cascadia/selector.go index aeffdbd54d..7331709fff 100644 --- a/_third_party/github.com/andybalholm/cascadia/selector.go +++ b/_third_party/github.com/andybalholm/cascadia/selector.go @@ -6,7 +6,7 @@ import ( "regexp" "strings" - "golang.org/x/net/html" + "bosun.org/_third_party/golang.org/x/net/html" ) // the Selector type, and functions for creating them diff --git a/_third_party/github.com/andybalholm/cascadia/selector_test.go b/_third_party/github.com/andybalholm/cascadia/selector_test.go index 8438d384bd..62b5417f42 100644 --- a/_third_party/github.com/andybalholm/cascadia/selector_test.go +++ b/_third_party/github.com/andybalholm/cascadia/selector_test.go @@ -4,7 +4,7 @@ import ( "strings" "testing" - "golang.org/x/net/html" + "bosun.org/_third_party/golang.org/x/net/html" ) type selectorTest struct { diff --git a/_third_party/github.com/aymerick/douceur/inliner/inliner.go b/_third_party/github.com/aymerick/douceur/inliner/inliner.go index 43dc73d231..d0f6dce510 100644 --- a/_third_party/github.com/aymerick/douceur/inliner/inliner.go +++ b/_third_party/github.com/aymerick/douceur/inliner/inliner.go @@ -8,7 +8,7 @@ import ( "bosun.org/_third_party/github.com/PuerkitoBio/goquery" "bosun.org/_third_party/github.com/aymerick/douceur/css" "bosun.org/_third_party/github.com/aymerick/douceur/parser" - "golang.org/x/net/html" + "bosun.org/_third_party/golang.org/x/net/html" ) const ( diff --git a/_third_party/github.com/boltdb/bolt/bolt_unix_solaris.go b/_third_party/github.com/boltdb/bolt/bolt_unix_solaris.go index 3360e4ff89..6134ea9539 100644 --- a/_third_party/github.com/boltdb/bolt/bolt_unix_solaris.go +++ b/_third_party/github.com/boltdb/bolt/bolt_unix_solaris.go @@ -1,8 +1,8 @@ package bolt import ( + "bosun.org/_third_party/golang.org/x/sys/unix" "fmt" - "golang.org/x/sys/unix" "os" "syscall" "time" diff --git a/_third_party/github.com/golang/freetype/freetype.go b/_third_party/github.com/golang/freetype/freetype.go index a2d385c795..e6e941124b 100644 --- a/_third_party/github.com/golang/freetype/freetype.go +++ b/_third_party/github.com/golang/freetype/freetype.go @@ -15,8 +15,8 @@ import ( "bosun.org/_third_party/github.com/golang/freetype/raster" "bosun.org/_third_party/github.com/golang/freetype/truetype" - "golang.org/x/image/font" - "golang.org/x/image/math/fixed" + "bosun.org/_third_party/golang.org/x/image/font" + "bosun.org/_third_party/golang.org/x/image/math/fixed" ) // These constants determine the size of the glyph cache. The cache is keyed diff --git a/_third_party/github.com/golang/freetype/raster/geom.go b/_third_party/github.com/golang/freetype/raster/geom.go index f3696ea983..8669b55bd0 100644 --- a/_third_party/github.com/golang/freetype/raster/geom.go +++ b/_third_party/github.com/golang/freetype/raster/geom.go @@ -9,7 +9,7 @@ import ( "fmt" "math" - "golang.org/x/image/math/fixed" + "bosun.org/_third_party/golang.org/x/image/math/fixed" ) // maxAbs returns the maximum of abs(a) and abs(b). diff --git a/_third_party/github.com/golang/freetype/raster/raster.go b/_third_party/github.com/golang/freetype/raster/raster.go index 3503b650eb..d07f9369cc 100644 --- a/_third_party/github.com/golang/freetype/raster/raster.go +++ b/_third_party/github.com/golang/freetype/raster/raster.go @@ -18,7 +18,7 @@ package raster // import "bosun.org/_third_party/github.com/golang/freetype/rast import ( "strconv" - "golang.org/x/image/math/fixed" + "bosun.org/_third_party/golang.org/x/image/math/fixed" ) // A cell is part of a linked list (for a given yi co-ordinate) of accumulated diff --git a/_third_party/github.com/golang/freetype/raster/stroke.go b/_third_party/github.com/golang/freetype/raster/stroke.go index 8d43797573..319104cc7e 100644 --- a/_third_party/github.com/golang/freetype/raster/stroke.go +++ b/_third_party/github.com/golang/freetype/raster/stroke.go @@ -6,7 +6,7 @@ package raster import ( - "golang.org/x/image/math/fixed" + "bosun.org/_third_party/golang.org/x/image/math/fixed" ) // Two points are considered practically equal if the square of the distance diff --git a/_third_party/github.com/golang/freetype/truetype/face.go b/_third_party/github.com/golang/freetype/truetype/face.go index 0f056e6872..856666f312 100644 --- a/_third_party/github.com/golang/freetype/truetype/face.go +++ b/_third_party/github.com/golang/freetype/truetype/face.go @@ -9,8 +9,8 @@ import ( "image" "bosun.org/_third_party/github.com/golang/freetype/raster" - "golang.org/x/image/font" - "golang.org/x/image/math/fixed" + "bosun.org/_third_party/golang.org/x/image/font" + "bosun.org/_third_party/golang.org/x/image/math/fixed" ) func powerOf2(i int) bool { diff --git a/_third_party/github.com/golang/freetype/truetype/face_test.go b/_third_party/github.com/golang/freetype/truetype/face_test.go index 856581dff4..6aa35fc4a7 100644 --- a/_third_party/github.com/golang/freetype/truetype/face_test.go +++ b/_third_party/github.com/golang/freetype/truetype/face_test.go @@ -12,8 +12,8 @@ import ( "strings" "testing" - "golang.org/x/image/font" - "golang.org/x/image/math/fixed" + "bosun.org/_third_party/golang.org/x/image/font" + "bosun.org/_third_party/golang.org/x/image/math/fixed" ) func BenchmarkDrawString(b *testing.B) { diff --git a/_third_party/github.com/golang/freetype/truetype/glyph.go b/_third_party/github.com/golang/freetype/truetype/glyph.go index c2935a58eb..9f70bcfc03 100644 --- a/_third_party/github.com/golang/freetype/truetype/glyph.go +++ b/_third_party/github.com/golang/freetype/truetype/glyph.go @@ -6,8 +6,8 @@ package truetype import ( - "golang.org/x/image/font" - "golang.org/x/image/math/fixed" + "bosun.org/_third_party/golang.org/x/image/font" + "bosun.org/_third_party/golang.org/x/image/math/fixed" ) // TODO: implement VerticalHinting. diff --git a/_third_party/github.com/golang/freetype/truetype/hint.go b/_third_party/github.com/golang/freetype/truetype/hint.go index 0315de511c..1c8ee26885 100644 --- a/_third_party/github.com/golang/freetype/truetype/hint.go +++ b/_third_party/github.com/golang/freetype/truetype/hint.go @@ -12,7 +12,7 @@ import ( "errors" "math" - "golang.org/x/image/math/fixed" + "bosun.org/_third_party/golang.org/x/image/math/fixed" ) const ( diff --git a/_third_party/github.com/golang/freetype/truetype/hint_test.go b/_third_party/github.com/golang/freetype/truetype/hint_test.go index 7eb43dde07..11e48cb068 100644 --- a/_third_party/github.com/golang/freetype/truetype/hint_test.go +++ b/_third_party/github.com/golang/freetype/truetype/hint_test.go @@ -10,7 +10,7 @@ import ( "strings" "testing" - "golang.org/x/image/math/fixed" + "bosun.org/_third_party/golang.org/x/image/math/fixed" ) func TestBytecode(t *testing.T) { diff --git a/_third_party/github.com/golang/freetype/truetype/truetype.go b/_third_party/github.com/golang/freetype/truetype/truetype.go index 76b911782a..7c1d1add40 100644 --- a/_third_party/github.com/golang/freetype/truetype/truetype.go +++ b/_third_party/github.com/golang/freetype/truetype/truetype.go @@ -20,7 +20,7 @@ package truetype // import "bosun.org/_third_party/github.com/golang/freetype/tr import ( "fmt" - "golang.org/x/image/math/fixed" + "bosun.org/_third_party/golang.org/x/image/math/fixed" ) // An Index is a Font's index of a rune. diff --git a/_third_party/github.com/golang/freetype/truetype/truetype_test.go b/_third_party/github.com/golang/freetype/truetype/truetype_test.go index bd62d1da16..606892c4d7 100644 --- a/_third_party/github.com/golang/freetype/truetype/truetype_test.go +++ b/_third_party/github.com/golang/freetype/truetype/truetype_test.go @@ -15,8 +15,8 @@ import ( "strings" "testing" - "golang.org/x/image/font" - "golang.org/x/image/math/fixed" + "bosun.org/_third_party/golang.org/x/image/font" + "bosun.org/_third_party/golang.org/x/image/math/fixed" ) func parseTestdataFont(name string) (f *Font, testdataIsOptional bool, err error) { diff --git a/_third_party/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go b/_third_party/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go index bdf448d521..df2e49f851 100644 --- a/_third_party/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go +++ b/_third_party/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go @@ -19,7 +19,7 @@ package codec import ( "testing" - vmsgpack "gopkg.in/vmihailenco/msgpack.v2" + vmsgpack "bosun.org/_third_party/gopkg.in/vmihailenco/msgpack.v2" "labix.org/v2/mgo/bson" ) diff --git a/_third_party/github.com/influxdb/influxdb/CHANGELOG.md b/_third_party/github.com/influxdb/influxdb/CHANGELOG.md new file mode 100644 index 0000000000..ba2bed39d6 --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/CHANGELOG.md @@ -0,0 +1,1705 @@ +## v0.9.5 [unreleased] + +### Features +- [#4141](https://github.com/influxdb/influxdb/pull/4141): Control whether each query should be logged +- [#4065](https://github.com/influxdb/influxdb/pull/4065): Added precision support in cmd client. Thanks @sbouchex +- [#4140](https://github.com/influxdb/influxdb/pull/4140): Make storage engine configurable +- [#4161](https://github.com/influxdb/influxdb/pull/4161): Implement bottom selector function +- [#4204](https://github.com/influxdb/influxdb/pull/4204): Allow module-level selection for SHOW STATS +- [#4208](https://github.com/influxdb/influxdb/pull/4208): Allow module-level selection for SHOW DIAGNOSTICS +- [#4196](https://github.com/influxdb/influxdb/pull/4196): Export tsdb.Iterator +- [#4198](https://github.com/influxdb/influxdb/pull/4198): Add basic cluster-service stats +- [#4262](https://github.com/influxdb/influxdb/pull/4262): Allow configuration of UDP retention policy +- [#4265](https://github.com/influxdb/influxdb/pull/4265): Add statistics for Hinted-Handoff +- [#4284](https://github.com/influxdb/influxdb/pull/4284): Add exponential backoff for hinted-handoff failures + +### Bugfixes +- [#4166](https://github.com/influxdb/influxdb/pull/4166): Fix parser error on invalid SHOW +- [#3457](https://github.com/influxdb/influxdb/issues/3457): [0.9.3] cannot select field names with prefix + "." that match the measurement name +- [#4225](https://github.com/influxdb/influxdb/pull/4225): Always display diags in name-sorted order +- [#4111](https://github.com/influxdb/influxdb/pull/4111): Update pre-commit hook for go vet composites +- [#4136](https://github.com/influxdb/influxdb/pull/4136): Return an error-on-write if target retention policy does not exist. Thanks for the report @ymettier +- [#4228](https://github.com/influxdb/influxdb/pull/4228): Add build timestamp to version information. +- [#4124](https://github.com/influxdb/influxdb/issues/4124): Missing defer/recover/panic idiom in HTTPD service +- [#4238](https://github.com/influxdb/influxdb/pull/4238): Fully disable hinted-handoff service if so requested. +- [#4165](https://github.com/influxdb/influxdb/pull/4165): Tag all Go runtime stats when writing to internal database. +- [#4118](https://github.com/influxdb/influxdb/issues/4118): Return consistent, correct result for SHOW MEASUREMENTS with multiple AND conditions +- [#4191](https://github.com/influxdb/influxdb/pull/4191): Correctly marshal remote mapper responses. Fixes [#4170](https://github.com/influxdb/influxdb/issues/4170) +- [#4222](https://github.com/influxdb/influxdb/pull/4222): Graphite TCP connections should not block shutdown +- [#4180](https://github.com/influxdb/influxdb/pull/4180): Cursor & SelectMapper Refactor +- [#1577](https://github.com/influxdb/influxdb/issues/1577): selectors (e.g. min, max, first, last) should have equivalents to return the actual point +- [#4264](https://github.com/influxdb/influxdb/issues/4264): Refactor map functions to use list of values +- [#4278](https://github.com/influxdb/influxdb/pull/4278): Fix error marshalling across the cluster +- [#4149](https://github.com/influxdb/influxdb/pull/4149): Fix derivative unnecessarily requires aggregate function. Thanks @peekeri! +- [#4237](https://github.com/influxdb/influxdb/issues/4237): DERIVATIVE() edge conditions +- [#4263](https://github.com/influxdb/influxdb/issues/4263): derivative does not work when data is missing +- [#4293](https://github.com/influxdb/influxdb/pull/4293): Ensure shell is invoked when touching PID file. Thanks @christopherjdickson +- [#4296](https://github.com/influxdb/influxdb/pull/4296): Reject line protocol ending with '-'. Fixes [#4272](https://github.com/influxdb/influxdb/issues/4272) + +## v0.9.4 [2015-09-14] + +### Release Notes +With this release InfluxDB is moving to Go 1.5. + +### Features +- [#4050](https://github.com/influxdb/influxdb/pull/4050): Add stats to collectd +- [#3771](https://github.com/influxdb/influxdb/pull/3771): Close idle Graphite TCP connections +- [#3755](https://github.com/influxdb/influxdb/issues/3755): Add option to build script. Thanks @fg2it +- [#3863](https://github.com/influxdb/influxdb/pull/3863): Move to Go 1.5 +- [#3892](https://github.com/influxdb/influxdb/pull/3892): Support IF NOT EXISTS for CREATE DATABASE +- [#3916](https://github.com/influxdb/influxdb/pull/3916): New statistics and diagnostics support. Graphite first to be instrumented. +- [#3901](https://github.com/influxdb/influxdb/pull/3901): Add consistency level option to influx cli Thanks @takayuki +- [#4048](https://github.com/influxdb/influxdb/pull/4048): Add statistics to Continuous Query service +- [#4049](https://github.com/influxdb/influxdb/pull/4049): Add stats to the UDP input +- [#3876](https://github.com/influxdb/influxdb/pull/3876): Allow the following syntax in CQs: INTO "1hPolicy".:MEASUREMENT +- [#3975](https://github.com/influxdb/influxdb/pull/3975): Add shard copy service +- [#3986](https://github.com/influxdb/influxdb/pull/3986): Support sorting by time desc +- [#3930](https://github.com/influxdb/influxdb/pull/3930): Wire up TOP aggregate function - fixes [#1821](https://github.com/influxdb/influxdb/issues/1821) +- [#4045](https://github.com/influxdb/influxdb/pull/4045): Instrument cluster-level points writer +- [#3996](https://github.com/influxdb/influxdb/pull/3996): Add statistics to httpd package +- [#4003](https://github.com/influxdb/influxdb/pull/4033): Add logrotate configuration. +- [#4043](https://github.com/influxdb/influxdb/pull/4043): Add stats and batching to openTSDB input +- [#4042](https://github.com/influxdb/influxdb/pull/4042): Add pending batches control to batcher +- [#4006](https://github.com/influxdb/influxdb/pull/4006): Add basic statistics for shards +- [#4072](https://github.com/influxdb/influxdb/pull/4072): Add statistics for the WAL. + +### Bugfixes +- [#4042](https://github.com/influxdb/influxdb/pull/4042): Set UDP input batching defaults as needed. +- [#3785](https://github.com/influxdb/influxdb/issues/3785): Invalid time stamp in graphite metric causes panic +- [#3804](https://github.com/influxdb/influxdb/pull/3804): init.d script fixes, fixes issue 3803. +- [#3823](https://github.com/influxdb/influxdb/pull/3823): Deterministic ordering for first() and last() +- [#3869](https://github.com/influxdb/influxdb/issues/3869): Seemingly deadlocked when ingesting metrics via graphite plugin +- [#3856](https://github.com/influxdb/influxdb/pull/3856): Minor changes to retention enforcement. +- [#3884](https://github.com/influxdb/influxdb/pull/3884): Fix two panics in WAL that can happen at server startup +- [#3868](https://github.com/influxdb/influxdb/pull/3868): Add shell option to start the daemon on CentOS. Thanks @SwannCroiset. +- [#3886](https://github.com/influxdb/influxdb/pull/3886): Prevent write timeouts due to lock contention in WAL +- [#3574](https://github.com/influxdb/influxdb/issues/3574): Querying data node causes panic +- [#3913](https://github.com/influxdb/influxdb/issues/3913): Convert meta shard owners to objects +- [#4026](https://github.com/influxdb/influxdb/pull/4026): Support multiple Graphite inputs. Fixes issue [#3636](https://github.com/influxdb/influxdb/issues/3636) +- [#3927](https://github.com/influxdb/influxdb/issues/3927): Add WAL lock to prevent timing lock contention +- [#3928](https://github.com/influxdb/influxdb/issues/3928): Write fails for multiple points when tag starts with quote +- [#3901](https://github.com/influxdb/influxdb/pull/3901): Unblock relaxed write consistency level Thanks @takayuki! +- [#3950](https://github.com/influxdb/influxdb/pull/3950): Limit bz1 quickcheck tests to 10 iterations on CI +- [#3977](https://github.com/influxdb/influxdb/pull/3977): Silence wal logging during testing +- [#3931](https://github.com/influxdb/influxdb/pull/3931): Don't precreate shard groups entirely in the past +- [#3960](https://github.com/influxdb/influxdb/issues/3960): possible "catch up" bug with nodes down in a cluster +- [#3980](https://github.com/influxdb/influxdb/pull/3980): 'service stop' waits until service actually stops. Fixes issue #3548. +- [#4016](https://github.com/influxdb/influxdb/pull/4016): Shutdown Graphite UDP on SIGTERM. +- [#4034](https://github.com/influxdb/influxdb/pull/4034): Rollback bolt tx on mapper open error +- [#3848](https://github.com/influxdb/influxdb/issues/3848): restart influxdb causing panic +- [#3881](https://github.com/influxdb/influxdb/issues/3881): panic: runtime error: invalid memory address or nil pointer dereference +- [#3926](https://github.com/influxdb/influxdb/issues/3926): First or last value of `GROUP BY time(x)` is often null. Fixed by [#4038](https://github.com/influxdb/influxdb/pull/4038) +- [#4053](https://github.com/influxdb/influxdb/pull/4053): Prohibit dropping default retention policy. +- [#4060](https://github.com/influxdb/influxdb/pull/4060): Don't log EOF error in openTSDB input. +- [#3978](https://github.com/influxdb/influxdb/issues/3978): [0.9.3] (regression) cannot use GROUP BY * with more than a single field in SELECT clause +- [#4058](https://github.com/influxdb/influxdb/pull/4058): Disable bz1 recompression +- [#3902](https://github.com/influxdb/influxdb/issues/3902): [0.9.3] DB should not crash when using invalid expression "GROUP BY time" +- [#3718](https://github.com/influxdb/influxdb/issues/3718): Derivative query with group by time but no aggregate function should fail parse + +## v0.9.3 [2015-08-26] + +### Release Notes + +There are breaking changes in this release. + - To store data points as integers you must now append `i` to the number if using the line protocol. + - If you have a UDP input configured, you should check the UDP section of [the new sample configuration file](https://github.com/influxdb/influxdb/blob/master/etc/config.sample.toml) to learn how to modify existing configuration files, as 0.9.3 now expects multiple UDP inputs. + - Configuration files must now have an entry for `wal-dir` in the `[data]` section. Check [new sample configuration file](https://github.com/influxdb/influxdb/blob/master/etc/config.sample.toml) for more details. + - The implicit `GROUP BY *` that was added to every `SELECT *` has been removed. Instead any tags in the data are now part of the columns in the returned query. + +Please see the *Features* section below for full details. + +### Features +- [#3376](https://github.com/influxdb/influxdb/pull/3376): Support for remote shard query mapping +- [#3372](https://github.com/influxdb/influxdb/pull/3372): Support joining nodes to existing cluster +- [#3426](https://github.com/influxdb/influxdb/pull/3426): Additional logging for continuous queries. Thanks @jhorwit2 +- [#3478](https://github.com/influxdb/influxdb/pull/3478): Support incremental cluster joins +- [#3519](https://github.com/influxdb/influxdb/pull/3519): **--BREAKING CHANGE--** Update line protocol to require trailing i for field values that are integers +- [#3529](https://github.com/influxdb/influxdb/pull/3529): Add TLS support for OpenTSDB plugin. Thanks @nathanielc +- [#3421](https://github.com/influxdb/influxdb/issues/3421): Should update metastore and cluster if IP or hostname changes +- [#3502](https://github.com/influxdb/influxdb/pull/3502): Importer for 0.8.9 data via the CLI +- [#3564](https://github.com/influxdb/influxdb/pull/3564): Fix alias, maintain column sort order +- [#3585](https://github.com/influxdb/influxdb/pull/3585): Additional test coverage for non-existent fields +- [#3246](https://github.com/influxdb/influxdb/issues/3246): Allow overriding of configuration parameters using environment variables +- [#3599](https://github.com/influxdb/influxdb/pull/3599): **--BREAKING CHANGE--** Support multiple UDP inputs. Thanks @tpitale +- [#3636](https://github.com/influxdb/influxdb/pull/3639): Cap auto-created retention policy replica count at 3 +- [#3641](https://github.com/influxdb/influxdb/pull/3641): Logging enhancements and single-node rename +- [#3635](https://github.com/influxdb/influxdb/pull/3635): Add build branch to version output. +- [#3115](https://github.com/influxdb/influxdb/pull/3115): Various init.d script improvements. Thanks @KoeSystems. +- [#3628](https://github.com/influxdb/influxdb/pull/3628): Wildcard expansion of tags and fields for raw queries +- [#3721](https://github.com/influxdb/influxdb/pull/3721): interpret number literals compared against time as nanoseconds from epoch +- [#3514](https://github.com/influxdb/influxdb/issues/3514): Implement WAL outside BoltDB with compaction +- [#3544](https://github.com/influxdb/influxdb/pull/3544): Implement compression on top of BoltDB +- [#3795](https://github.com/influxdb/influxdb/pull/3795): Throttle import +- [#3584](https://github.com/influxdb/influxdb/pull/3584): Import/export documenation + +### Bugfixes +- [#3405](https://github.com/influxdb/influxdb/pull/3405): Prevent database panic when fields are missing. Thanks @jhorwit2 +- [#3411](https://github.com/influxdb/influxdb/issues/3411): 500 timeout on write +- [#3420](https://github.com/influxdb/influxdb/pull/3420): Catch opentsdb malformed tags. Thanks @nathanielc. +- [#3404](https://github.com/influxdb/influxdb/pull/3404): Added support for escaped single quotes in query string. Thanks @jhorwit2 +- [#3414](https://github.com/influxdb/influxdb/issues/3414): Shard mappers perform query re-writing +- [#3525](https://github.com/influxdb/influxdb/pull/3525): check if fields are valid during parse time. +- [#3511](https://github.com/influxdb/influxdb/issues/3511): Sending a large number of tag causes panic +- [#3288](https://github.com/influxdb/influxdb/issues/3288): Run go fuzz on the line-protocol input +- [#3545](https://github.com/influxdb/influxdb/issues/3545): Fix parsing string fields with newlines +- [#3579](https://github.com/influxdb/influxdb/issues/3579): Revert breaking change to `client.NewClient` function +- [#3580](https://github.com/influxdb/influxdb/issues/3580): Do not allow wildcards with fields in select statements +- [#3530](https://github.com/influxdb/influxdb/pull/3530): Aliasing a column no longer works +- [#3436](https://github.com/influxdb/influxdb/issues/3436): Fix panic in hinted handoff queue processor +- [#3401](https://github.com/influxdb/influxdb/issues/3401): Derivative on non-numeric fields panics db +- [#3583](https://github.com/influxdb/influxdb/issues/3583): Inserting value in scientific notation with a trailing i causes panic +- [#3611](https://github.com/influxdb/influxdb/pull/3611): Fix query arithmetic with integers +- [#3326](https://github.com/influxdb/influxdb/issues/3326): simple regex query fails with cryptic error +- [#3618](https://github.com/influxdb/influxdb/pull/3618): Fix collectd stats panic on i386. Thanks @richterger +- [#3625](https://github.com/influxdb/influxdb/pull/3625): Don't panic when aggregate and raw queries are in a single statement +- [#3629](https://github.com/influxdb/influxdb/pull/3629): Use sensible batching defaults for Graphite. +- [#3638](https://github.com/influxdb/influxdb/pull/3638): Cluster config fixes and removal of meta.peers config field +- [#3640](https://github.com/influxdb/influxdb/pull/3640): Shutdown Graphite service when signal received. +- [#3632](https://github.com/influxdb/influxdb/issues/3632): Make single-node host renames more seamless +- [#3656](https://github.com/influxdb/influxdb/issues/3656): Silence snapshotter logger for testing +- [#3651](https://github.com/influxdb/influxdb/pull/3651): Fully remove series when dropped. +- [#3517](https://github.com/influxdb/influxdb/pull/3517): Batch CQ writes to avoid timeouts. Thanks @dim. +- [#3522](https://github.com/influxdb/influxdb/pull/3522): Consume CQ results on request timeouts. Thanks @dim. +- [#3646](https://github.com/influxdb/influxdb/pull/3646): Fix nil FieldCodec panic. +- [#3672](https://github.com/influxdb/influxdb/pull/3672): Reduce in-memory index by 20%-30% +- [#3673](https://github.com/influxdb/influxdb/pull/3673): Improve query performance by removing unnecessary tagset sorting. +- [#3676](https://github.com/influxdb/influxdb/pull/3676): Improve query performance by memomizing mapper output keys. +- [#3686](https://github.com/influxdb/influxdb/pull/3686): Ensure 'p' parameter is not logged, even on OPTIONS requests. +- [#3687](https://github.com/influxdb/influxdb/issues/3687): Fix panic: runtime error: makeslice: len out of range in hinted handoff +- [#3697](https://github.com/influxdb/influxdb/issues/3697): Correctly merge non-chunked results for same series. Fix issue #3242. +- [#3708](https://github.com/influxdb/influxdb/issues/3708): Fix double escaping measurement name during cluster replication +- [#3704](https://github.com/influxdb/influxdb/issues/3704): cluster replication issue for measurement name containing backslash +- [#3681](https://github.com/influxdb/influxdb/issues/3681): Quoted measurement names fail +- [#3681](https://github.com/influxdb/influxdb/issues/3682): Fix inserting string value with backslashes +- [#3735](https://github.com/influxdb/influxdb/issues/3735): Append to small bz1 blocks +- [#3736](https://github.com/influxdb/influxdb/pull/3736): Update shard group duration with retention policy changes. Thanks for the report @papylhomme +- [#3539](https://github.com/influxdb/influxdb/issues/3539): parser incorrectly accepts NaN as numerical value, but not always +- [#3790](https://github.com/influxdb/influxdb/pull/3790): Fix line protocol parsing equals in measurements and NaN values +- [#3778](https://github.com/influxdb/influxdb/pull/3778): Don't panic if SELECT on time. +- [#3824](https://github.com/influxdb/influxdb/issues/3824): tsdb.Point.MarshalBinary needs to support all number types +- [#3828](https://github.com/influxdb/influxdb/pull/3828): Support all number types when decoding a point +- [#3853](https://github.com/influxdb/influxdb/pull/3853): Use 4KB default block size for bz1 +- [#3607](https://github.com/influxdb/influxdb/issues/3607): Fix unable to query influxdb due to deadlock in metastore. Thanks @ccutrer! + +## v0.9.2 [2015-07-24] + +### Features +- [#3177](https://github.com/influxdb/influxdb/pull/3177): Client supports making HTTPS requests. Thanks @jipperinbham +- [#3299](https://github.com/influxdb/influxdb/pull/3299): Refactor query engine for distributed query support. +- [#3334](https://github.com/influxdb/influxdb/pull/3334): Clean shutdown of influxd. Thanks @mcastilho + +### Bugfixes + +- [#3180](https://github.com/influxdb/influxdb/pull/3180): Log GOMAXPROCS, version, and commit on startup. +- [#3218](https://github.com/influxdb/influxdb/pull/3218): Allow write timeouts to be configurable. +- [#3184](https://github.com/influxdb/influxdb/pull/3184): Support basic auth in admin interface. Thanks @jipperinbham! +- [#3236](https://github.com/influxdb/influxdb/pull/3236): Fix display issues in admin interface. +- [#3232](https://github.com/influxdb/influxdb/pull/3232): Set logging prefix for metastore. +- [#3230](https://github.com/influxdb/influxdb/issues/3230): panic: unable to parse bool value +- [#3245](https://github.com/influxdb/influxdb/issues/3245): Error using graphite plugin with multiple filters +- [#3223](https://github.com/influxdb/influxdb/issues/323): default graphite template cannot have extra tags +- [#3255](https://github.com/influxdb/influxdb/pull/3255): Flush WAL on start-up as soon as possible. +- [#3289](https://github.com/influxdb/influxdb/issues/3289): InfluxDB crashes on floats without decimal +- [#3298](https://github.com/influxdb/influxdb/pull/3298): Corrected WAL & flush parameters in default config. Thanks @jhorwit2 +- [#3152](https://github.com/influxdb/influxdb/issues/3159): High CPU Usage with unsorted writes +- [#3307](https://github.com/influxdb/influxdb/pull/3307): Fix regression parsing boolean values True/False +- [#3304](https://github.com/influxdb/influxdb/pull/3304): Fixed httpd logger to log user from query params. Thanks @jhorwit2 +- [#3332](https://github.com/influxdb/influxdb/pull/3332): Add SLIMIT and SOFFSET to string version of AST. +- [#3335](https://github.com/influxdb/influxdb/pull/3335): Don't drop all data on DROP DATABASE. Thanks to @PierreF for the report +- [#2761](https://github.com/influxdb/influxdb/issues/2761): Make SHOW RETENTION POLICIES consistent with other queries. +- [#3356](https://github.com/influxdb/influxdb/pull/3356): Disregard semicolons after database name in use command. Thanks @timraymond. +- [#3351](https://github.com/influxdb/influxdb/pull/3351): Handle malformed regex comparisons during parsing. Thanks @rnubel +- [#3244](https://github.com/influxdb/influxdb/pull/3244): Wire up admin privilege grant and revoke. +- [#3259](https://github.com/influxdb/influxdb/issues/3259): Respect privileges for queries. +- [#3256](https://github.com/influxdb/influxdb/pull/3256): Remove unnecessary timeout in WaitForLeader(). Thanks @cannium. +- [#3380](https://github.com/influxdb/influxdb/issue/3380): Parser fix, only allow ORDER BY ASC and ORDER BY time ASC. +- [#3319](https://github.com/influxdb/influxdb/issues/3319): restarting process irrevocably BREAKS measurements with spaces +- [#3453](https://github.com/influxdb/influxdb/issues/3453): Remove outdated `dump` command from CLI. +- [#3463](https://github.com/influxdb/influxdb/issues/3463): Fix aggregate queries and time precision on where clauses. + +## v0.9.1 [2015-07-02] + +### Features + +- [2650](https://github.com/influxdb/influxdb/pull/2650): Add SHOW GRANTS FOR USER statement. Thanks @n1tr0g. +- [3125](https://github.com/influxdb/influxdb/pull/3125): Graphite Input Protocol Parsing +- [2746](https://github.com/influxdb/influxdb/pull/2746): New Admin UI/interface +- [3036](https://github.com/influxdb/influxdb/pull/3036): Write Ahead Log (WAL) +- [3014](https://github.com/influxdb/influxdb/issues/3014): Implement Raft snapshots + +### Bugfixes + +- [3013](https://github.com/influxdb/influxdb/issues/3013): Panic error with inserting values with commas +- [#2956](https://github.com/influxdb/influxdb/issues/2956): Type mismatch in derivative +- [#2908](https://github.com/influxdb/influxdb/issues/2908): Field mismatch error messages need to be updated +- [#2931](https://github.com/influxdb/influxdb/pull/2931): Services and reporting should wait until cluster has leader. +- [#2943](https://github.com/influxdb/influxdb/issues/2943): Ensure default retention policies are fully replicated +- [#2948](https://github.com/influxdb/influxdb/issues/2948): Field mismatch error message to include measurement name +- [#2919](https://github.com/influxdb/influxdb/issues/2919): Unable to insert negative floats +- [#2935](https://github.com/influxdb/influxdb/issues/2935): Hook CPU and memory profiling back up. +- [#2960](https://github.com/influxdb/influxdb/issues/2960): Cluster Write Errors. +- [#2928](https://github.com/influxdb/influxdb/pull/2928): Start work to set InfluxDB version in HTTP response headers. Thanks @neonstalwart. +- [#2969](https://github.com/influxdb/influxdb/pull/2969): Actually set HTTP version in responses. +- [#2993](https://github.com/influxdb/influxdb/pull/2993): Don't log each UDP batch. +- [#2994](https://github.com/influxdb/influxdb/pull/2994): Don't panic during wilcard expansion if no default database specified. +- [#3002](https://github.com/influxdb/influxdb/pull/3002): Remove measurement from shard's index on DROP MEASUREMENT. +- [#3021](https://github.com/influxdb/influxdb/pull/3021): Correct set HTTP write trace logging. Thanks @vladlopes. +- [#3027](https://github.com/influxdb/influxdb/pull/3027): Enforce minimum retention policy duration of 1 hour. +- [#3030](https://github.com/influxdb/influxdb/pull/3030): Fix excessive logging of shard creation. +- [#3038](https://github.com/influxdb/influxdb/pull/3038): Don't check deleted shards for precreation. Thanks @vladlopes. +- [#3033](https://github.com/influxdb/influxdb/pull/3033): Add support for marshaling `uint64` in client. +- [#3090](https://github.com/influxdb/influxdb/pull/3090): Remove database from TSDB index on DROP DATABASE. +- [#2944](https://github.com/influxdb/influxdb/issues/2944): Don't require "WHERE time" when creating continuous queries. +- [#3075](https://github.com/influxdb/influxdb/pull/3075): GROUP BY correctly when different tags have same value. +- [#3078](https://github.com/influxdb/influxdb/pull/3078): Fix CLI panic on malformed INSERT. +- [#2102](https://github.com/influxdb/influxdb/issues/2102): Re-work Graphite input and metric processing +- [#2996](https://github.com/influxdb/influxdb/issues/2996): Graphite Input Parsing +- [#3136](https://github.com/influxdb/influxdb/pull/3136): Fix various issues with init.d script. Thanks @ miguelcnf. +- [#2996](https://github.com/influxdb/influxdb/issues/2996): Graphite Input Parsing +- [#3127](https://github.com/influxdb/influxdb/issues/3127): Trying to insert a number larger than the largest signed 64-bit number kills influxd +- [#3131](https://github.com/influxdb/influxdb/pull/3131): Copy batch tags to each point before marshalling +- [#3155](https://github.com/influxdb/influxdb/pull/3155): Instantiate UDP batcher before listening for UDP traffic, otherwise a panic may result. +- [#2678](https://github.com/influxdb/influxdb/issues/2678): Server allows tags with an empty string for the key and/or value +- [#3061](https://github.com/influxdb/influxdb/issues/3061): syntactically incorrect line protocol insert panics the database +- [#2608](https://github.com/influxdb/influxdb/issues/2608): drop measurement while writing points to that measurement has race condition that can panic +- [#3183](https://github.com/influxdb/influxdb/issues/3183): using line protocol measurement names cannot contain commas +- [#3193](https://github.com/influxdb/influxdb/pull/3193): Fix panic for SHOW STATS and in collectd +- [#3102](https://github.com/influxdb/influxdb/issues/3102): Add authentication cache +- [#3209](https://github.com/influxdb/influxdb/pull/3209): Dump Run() errors to stderr +- [#3217](https://github.com/influxdb/influxdb/pull/3217): Allow WAL partition flush delay to be configurable. + +## v0.9.0 [2015-06-11] + +### Bugfixes + +- [#2869](https://github.com/influxdb/influxdb/issues/2869): Adding field to existing measurement causes panic +- [#2849](https://github.com/influxdb/influxdb/issues/2849): RC32: Frequent write errors +- [#2700](https://github.com/influxdb/influxdb/issues/2700): Incorrect error message in database EncodeFields +- [#2897](https://github.com/influxdb/influxdb/pull/2897): Ensure target Graphite database exists +- [#2898](https://github.com/influxdb/influxdb/pull/2898): Ensure target openTSDB database exists +- [#2895](https://github.com/influxdb/influxdb/pull/2895): Use Graphite input defaults where necessary +- [#2900](https://github.com/influxdb/influxdb/pull/2900): Use openTSDB input defaults where necessary +- [#2886](https://github.com/influxdb/influxdb/issues/2886): Refactor backup & restore +- [#2804](https://github.com/influxdb/influxdb/pull/2804): BREAKING: change time literals to be single quoted in InfluxQL. Thanks @nvcook42! +- [#2906](https://github.com/influxdb/influxdb/pull/2906): Restrict replication factor to the cluster size +- [#2905](https://github.com/influxdb/influxdb/pull/2905): Restrict clusters to 3 peers +- [#2904](https://github.com/influxdb/influxdb/pull/2904): Re-enable server reporting. +- [#2917](https://github.com/influxdb/influxdb/pull/2917): Fix int64 field values. +- [#2920](https://github.com/influxdb/influxdb/issues/2920): Ensure collectd database exists + +## v0.9.0-rc33 [2015-06-09] + +### Bugfixes + +- [#2816](https://github.com/influxdb/influxdb/pull/2816): Enable UDP service. Thanks @renan- +- [#2824](https://github.com/influxdb/influxdb/pull/2824): Add missing call to WaitGroup.Done in execConn. Thanks @liyichao +- [#2823](https://github.com/influxdb/influxdb/pull/2823): Convert OpenTSDB to a service. +- [#2838](https://github.com/influxdb/influxdb/pull/2838): Set auto-created retention policy period to infinite. +- [#2829](https://github.com/influxdb/influxdb/pull/2829): Re-enable Graphite support as a new Service-style component. +- [#2814](https://github.com/influxdb/influxdb/issues/2814): Convert collectd to a service. +- [#2852](https://github.com/influxdb/influxdb/pull/2852): Don't panic when altering retention policies. Thanks for the report @huhongbo +- [#2857](https://github.com/influxdb/influxdb/issues/2857): Fix parsing commas in string field values. +- [#2833](https://github.com/influxdb/influxdb/pull/2833): Make the default config valid. +- [#2859](https://github.com/influxdb/influxdb/pull/2859): Fix panic on aggregate functions. +- [#2878](https://github.com/influxdb/influxdb/pull/2878): Re-enable shard precreation. +- [2865](https://github.com/influxdb/influxdb/pull/2865) -- Return an empty set of results if database does not exist in shard metadata. + +### Features +- [2858](https://github.com/influxdb/influxdb/pull/2858): Support setting openTSDB write consistency. + +## v0.9.0-rc32 [2015-06-07] + +### Release Notes + +This released introduced an updated write path and clustering design. The data format has also changed, so you'll need to wipe out your data to upgrade from RC31. There should be no other data changes before v0.9.0 is released. + +### Features +- [#1997](https://github.com/influxdb/influxdb/pull/1997): Update SELECT * to return tag values. +- [#2599](https://github.com/influxdb/influxdb/issues/2599): Add "epoch" URL param and return JSON time values as epoch instead of date strings. +- [#2682](https://github.com/influxdb/influxdb/issues/2682): Adding pr checklist to CONTRIBUTING.md +- [#2683](https://github.com/influxdb/influxdb/issues/2683): Add batching support to Graphite inputs. +- [#2687](https://github.com/influxdb/influxdb/issues/2687): Add batching support to Collectd inputs. +- [#2696](https://github.com/influxdb/influxdb/pull/2696): Add line protocol. This is now the preferred way to write data. +- [#2751](https://github.com/influxdb/influxdb/pull/2751): Add UDP input. UDP only supports the line protocol now. +- [#2684](https://github.com/influxdb/influxdb/pull/2684): Include client timeout configuration. Thanks @vladlopes! + +### Bugfixes +- [#2776](https://github.com/influxdb/influxdb/issues/2776): Re-implement retention policy enforcement. +- [#2635](https://github.com/influxdb/influxdb/issues/2635): Fix querying against boolean field in WHERE clause. +- [#2644](https://github.com/influxdb/influxdb/issues/2644): Make SHOW queries work with FROM //. +- [#2501](https://github.com/influxdb/influxdb/issues/2501): Name the FlagSet for the shell and add a version flag. Thanks @neonstalwart +- [#2647](https://github.com/influxdb/influxdb/issues/2647): Fixes typos in sample config file - thanks @claws! + +## v0.9.0-rc31 [2015-05-21] + +### Features +- [#1822](https://github.com/influxdb/influxdb/issues/1822): Wire up DERIVATIVE aggregate +- [#1477](https://github.com/influxdb/influxdb/issues/1477): Wire up non_negative_derivative function +- [#2557](https://github.com/influxdb/influxdb/issues/2557): Fix false positive error with `GROUP BY time` +- [#1891](https://github.com/influxdb/influxdb/issues/1891): Wire up COUNT DISTINCT aggregate +- [#1989](https://github.com/influxdb/influxdb/issues/1989): Implement `SELECT tagName FROM m` + +### Bugfixes +- [#2545](https://github.com/influxdb/influxdb/pull/2545): Use "value" as the field name for graphite input. Thanks @cannium. +- [#2558](https://github.com/influxdb/influxdb/pull/2558): Fix client response check - thanks @vladlopes! +- [#2566](https://github.com/influxdb/influxdb/pull/2566): Wait until each data write has been commited by the Raft cluster. +- [#2602](https://github.com/influxdb/influxdb/pull/2602): CLI execute command exits without cleaning up liner package. +- [#2610](https://github.com/influxdb/influxdb/pull/2610): Fix shard group creation +- [#2596](https://github.com/influxdb/influxdb/pull/2596): RC30: `panic: runtime error: index out of range` when insert data points. +- [#2592](https://github.com/influxdb/influxdb/pull/2592): Should return an error if user attempts to group by a field. +- [#2499](https://github.com/influxdb/influxdb/pull/2499): Issuing a select query with tag as a values causes panic. +- [#2612](https://github.com/influxdb/influxdb/pull/2612): Query planner should validate distinct is passed a field. +- [#2531](https://github.com/influxdb/influxdb/issues/2531): Fix select with 3 or more terms in where clause. +- [#2564](https://github.com/influxdb/influxdb/issues/2564): Change "name" to "measurement" in JSON for writes. + +## PRs +- [#2569](https://github.com/influxdb/influxdb/pull/2569): Add derivative functions +- [#2598](https://github.com/influxdb/influxdb/pull/2598): Implement tag support in SELECT statements +- [#2624](https://github.com/influxdb/influxdb/pull/2624): Remove references to SeriesID in `DROP SERIES` handlers. + +## v0.9.0-rc30 [2015-05-12] + +### Release Notes + +This release has a breaking API change for writes -- the field previously called `timestamp` has been renamed to `time`. + +### Features +- [#2254](https://github.com/influxdb/influxdb/pull/2254): Add Support for OpenTSDB HTTP interface. Thanks @tcolgate +- [#2525](https://github.com/influxdb/influxdb/pull/2525): Serve broker diagnostics over HTTP +- [#2186](https://github.com/influxdb/influxdb/pull/2186): The default status code for queries is now `200 OK` +- [#2298](https://github.com/influxdb/influxdb/pull/2298): Successful writes now return a status code of `204 No Content` - thanks @neonstalwart! +- [#2549](https://github.com/influxdb/influxdb/pull/2549): Raft election timeout to 5 seconds, so system is more forgiving of CPU loads. +- [#2568](https://github.com/influxdb/influxdb/pull/2568): Wire up SELECT DISTINCT. + +### Bugfixes +- [#2535](https://github.com/influxdb/influxdb/pull/2535): Return exit status 0 if influxd already running. Thanks @haim0n. +- [#2521](https://github.com/influxdb/influxdb/pull/2521): Don't truncate topic data until fully replicated. +- [#2509](https://github.com/influxdb/influxdb/pull/2509): Parse config file correctly during restore. Thanks @neonstalwart +- [#2536](https://github.com/influxdb/influxdb/issues/2532): Set leader ID on restart of single-node cluster. +- [#2448](https://github.com/influxdb/influxdb/pull/2448): Fix inconsistent data type - thanks @cannium! +- [#2108](https://github.com/influxdb/influxdb/issues/2108): Change `timestamp` to `time` - thanks @neonstalwart! +- [#2539](https://github.com/influxdb/influxdb/issues/2539): Add additional vote request logging. +- [#2541](https://github.com/influxdb/influxdb/issues/2541): Update messaging client connection index with every message. +- [#2542](https://github.com/influxdb/influxdb/issues/2542): Throw parser error for invalid aggregate without where time. +- [#2548](https://github.com/influxdb/influxdb/issues/2548): Return an error when numeric aggregate applied to non-numeric data. +- [#2487](https://github.com/influxdb/influxdb/issues/2487): Aggregate query with exact timestamp causes panic. Thanks @neonstalwart! +- [#2552](https://github.com/influxdb/influxdb/issues/2552): Run CQ that is actually passed into go-routine. +- [#2553](https://github.com/influxdb/influxdb/issues/2553): Fix race condition during CQ execution. +- [#2557](https://github.com/influxdb/influxdb/issues/2557): RC30 WHERE time filter Regression. + +## v0.9.0-rc29 [2015-05-05] + +### Features +- [#2410](https://github.com/influxdb/influxdb/pull/2410): If needed, brokers respond with data nodes for peer shard replication. +- [#2469](https://github.com/influxdb/influxdb/pull/2469): Reduce default max topic size from 1GB to 50MB. +- [#1824](https://github.com/influxdb/influxdb/pull/1824): Wire up MEDIAN aggregate. Thanks @neonstalwart! + +### Bugfixes +- [#2446](https://github.com/influxdb/influxdb/pull/2446): Correctly count number of queries executed. Thanks @neonstalwart +- [#2452](https://github.com/influxdb/influxdb/issues/2452): Fix panic with shard stats on multiple clusters +- [#2453](https://github.com/influxdb/influxdb/pull/2453): Do not require snapshot on Log.WriteEntriesTo(). +- [#2460](https://github.com/influxdb/influxdb/issues/2460): Collectd input should use "value" for fields values. Fixes 2412. Thanks @josh-padnick +- [#2465](https://github.com/influxdb/influxdb/pull/2465): HTTP response logging paniced with chunked requests. Thanks @Jackkoz +- [#2475](https://github.com/influxdb/influxdb/pull/2475): RLock server when checking if shards groups are required during write. +- [#2471](https://github.com/influxdb/influxdb/issues/2471): Function calls normalized to be lower case. Fixes percentile not working when called uppercase. Thanks @neonstalwart +- [#2281](https://github.com/influxdb/influxdb/issues/2281): Fix Bad Escape error when parsing regex + +## v0.9.0-rc28 [2015-04-27] + +### Features +- [#2410](https://github.com/influxdb/influxdb/pull/2410) Allow configuration of Raft timers +- [#2354](https://github.com/influxdb/influxdb/pull/2354) Wire up STDDEV. Thanks @neonstalwart! + +### Bugfixes +- [#2374](https://github.com/influxdb/influxdb/issues/2374): Two different panics during SELECT percentile +- [#2404](https://github.com/influxdb/influxdb/pull/2404): Mean and percentile function fixes +- [#2408](https://github.com/influxdb/influxdb/pull/2408): Fix snapshot 500 error +- [#1896](https://github.com/influxdb/influxdb/issues/1896): Excessive heartbeater logging of "connection refused" on cluster node stop +- [#2418](https://github.com/influxdb/influxdb/pull/2418): Fix raft node getting stuck in candidate state +- [#2415](https://github.com/influxdb/influxdb/pull/2415): Raft leader ID now set on election after failover. Thanks @xiaost +- [#2426](https://github.com/influxdb/influxdb/pull/2426): Fix race condition around listener address in openTSDB server. +- [#2426](https://github.com/influxdb/influxdb/pull/2426): Fix race condition around listener address in Graphite server. +- [#2429](https://github.com/influxdb/influxdb/pull/2429): Ensure no field value is null. +- [#2431](https://github.com/influxdb/influxdb/pull/2431): Always append shard path in diags. Thanks @marcosnils +- [#2441](https://github.com/influxdb/influxdb/pull/2441): Correctly release server RLock during "drop series". +- [#2445](https://github.com/influxdb/influxdb/pull/2445): Read locks and data race fixes + +## v0.9.0-rc27 [04-23-2015] + +### Features +- [#2398](https://github.com/influxdb/influxdb/pull/2398) Track more stats and report errors for shards. + +### Bugfixes +- [#2370](https://github.com/influxdb/influxdb/pull/2370): Fix data race in openTSDB endpoint. +- [#2371](https://github.com/influxdb/influxdb/pull/2371): Don't set client to nil when closing broker Fixes #2352 +- [#2372](https://github.com/influxdb/influxdb/pull/2372): Fix data race in graphite endpoint. +- [#2373](https://github.com/influxdb/influxdb/pull/2373): Actually allow HTTP logging to be controlled. +- [#2376](https://github.com/influxdb/influxdb/pull/2376): Encode all types of integers. Thanks @jtakkala. +- [#2376](https://github.com/influxdb/influxdb/pull/2376): Add shard path to existing diags value. Fix issue #2369. +- [#2386](https://github.com/influxdb/influxdb/pull/2386): Fix shard datanodes stats getting appended too many times +- [#2393](https://github.com/influxdb/influxdb/pull/2393): Fix default hostname for connecting to cluster. +- [#2390](https://github.com/influxdb/influxdb/pull/2390): Handle large sums when calculating means - thanks @neonstalwart! +- [#2391](https://github.com/influxdb/influxdb/pull/2391): Unable to write points through Go client when authentication enabled +- [#2400](https://github.com/influxdb/influxdb/pull/2400): Always send auth headers for client requests if present + +## v0.9.0-rc26 [04-21-2015] + +### Features +- [#2301](https://github.com/influxdb/influxdb/pull/2301): Distributed query load balancing and failover +- [#2336](https://github.com/influxdb/influxdb/pull/2336): Handle distributed queries when shards != data nodes +- [#2353](https://github.com/influxdb/influxdb/pull/2353): Distributed Query/Clustering Fixes + +### Bugfixes +- [#2297](https://github.com/influxdb/influxdb/pull/2297): create /var/run during startup. Thanks @neonstalwart. +- [#2312](https://github.com/influxdb/influxdb/pull/2312): Re-use httpclient for continuous queries +- [#2318](https://github.com/influxdb/influxdb/pull/2318): Remove pointless use of 'done' channel for collectd. +- [#2242](https://github.com/influxdb/influxdb/pull/2242): Distributed Query should balance requests +- [#2243](https://github.com/influxdb/influxdb/pull/2243): Use Limit Reader instead of fixed 1MB/1GB slice for DQ +- [#2190](https://github.com/influxdb/influxdb/pull/2190): Implement failover to other data nodes for distributed queries +- [#2324](https://github.com/influxdb/influxdb/issues/2324): Race in Broker.Close()/Broker.RunContinousQueryProcessing() +- [#2325](https://github.com/influxdb/influxdb/pull/2325): Cluster open fixes +- [#2326](https://github.com/influxdb/influxdb/pull/2326): Fix parse error in CREATE CONTINUOUS QUERY +- [#2300](https://github.com/influxdb/influxdb/pull/2300): Refactor integration tests. Properly close Graphite/OpenTSDB listeners. +- [#2338](https://github.com/influxdb/influxdb/pull/2338): Fix panic if tag key isn't double quoted when it should have been +- [#2340](https://github.com/influxdb/influxdb/pull/2340): Fix SHOW DIAGNOSTICS panic if any shard was non-local. +- [#2351](https://github.com/influxdb/influxdb/pull/2351): Fix data race by rlocking shard during diagnostics. +- [#2348](https://github.com/influxdb/influxdb/pull/2348): Data node fail to join cluster in 0.9.0rc25 +- [#2343](https://github.com/influxdb/influxdb/pull/2343): Node falls behind Metastore updates +- [#2334](https://github.com/influxdb/influxdb/pull/2334): Test Partial replication is very problematic +- [#2272](https://github.com/influxdb/influxdb/pull/2272): clustering: influxdb 0.9.0-rc23 panics when doing a GET with merge_metrics in a +- [#2350](https://github.com/influxdb/influxdb/pull/2350): Issue fix for :influxd -hostname localhost. +- [#2367](https://github.com/influxdb/influxdb/pull/2367): PR for issue #2350 - Always use localhost, not host name. + +## v0.9.0-rc25 [2015-04-15] + +### Bugfixes +- [#2282](https://github.com/influxdb/influxdb/pull/2282): Use "value" as field name for OpenTSDB input. +- [#2283](https://github.com/influxdb/influxdb/pull/2283): Fix bug when restarting an entire existing cluster. +- [#2293](https://github.com/influxdb/influxdb/pull/2293): Open cluster listener before starting broker. +- [#2287](https://github.com/influxdb/influxdb/pull/2287): Fix data race during SHOW RETENTION POLICIES. +- [#2288](https://github.com/influxdb/influxdb/pull/2288): Fix expression parsing bug. +- [#2294](https://github.com/influxdb/influxdb/pull/2294): Fix async response flushing (invalid chunked response error). + +## Features +- [#2276](https://github.com/influxdb/influxdb/pull/2276): Broker topic truncation. +- [#2292](https://github.com/influxdb/influxdb/pull/2292): Wire up drop CQ statement - thanks @neonstalwart! +- [#2290](https://github.com/influxdb/influxdb/pull/2290): Allow hostname argument to override default config - thanks @neonstalwart! +- [#2295](https://github.com/influxdb/influxdb/pull/2295): Use nil as default return value for MapCount - thanks @neonstalwart! +- [#2246](https://github.com/influxdb/influxdb/pull/2246): Allow HTTP logging to be controlled. + +## v0.9.0-rc24 [2015-04-13] + +### Bugfixes +- [#2255](https://github.com/influxdb/influxdb/pull/2255): Fix panic when changing default retention policy. +- [#2257](https://github.com/influxdb/influxdb/pull/2257): Add "snapshotting" pseudo state & log entry cache. +- [#2261](https://github.com/influxdb/influxdb/pull/2261): Support int64 value types. +- [#2191](https://github.com/influxdb/influxdb/pull/2191): Case-insensitive check for "fill" +- [#2274](https://github.com/influxdb/influxdb/pull/2274): Snapshot and HTTP API endpoints +- [#2265](https://github.com/influxdb/influxdb/pull/2265): Fix auth for CLI. + +## v0.9.0-rc23 [2015-04-11] + +### Features +- [#2202](https://github.com/influxdb/influxdb/pull/2202): Initial implementation of Distributed Queries +- [#2202](https://github.com/influxdb/influxdb/pull/2202): 64-bit Series IDs. INCOMPATIBLE WITH PREVIOUS DATASTORES. + +### Bugfixes +- [#2225](https://github.com/influxdb/influxdb/pull/2225): Make keywords completely case insensitive +- [#2228](https://github.com/influxdb/influxdb/pull/2228): Accept keyword default unquoted in ALTER RETENTION POLICY statement +- [#2236](https://github.com/influxdb/influxdb/pull/2236): Immediate term changes, fix stale write issue, net/http/pprof +- [#2213](https://github.com/influxdb/influxdb/pull/2213): Seed random number generator for election timeout. Thanks @cannium. + +## v0.9.0-rc22 [2015-04-09] + +### Features +- [#2214](https://github.com/influxdb/influxdb/pull/2214): Added the option to influx CLI to execute single command and exit. Thanks @n1tr0g + +### Bugfixes +- [#2223](https://github.com/influxdb/influxdb/pull/2223): Always notify term change on RequestVote + +## v0.9.0-rc21 [2015-04-09] + +### Features +- [#870](https://github.com/influxdb/influxdb/pull/870): Add support for OpenTSDB telnet input protocol. Thanks @tcolgate +- [#2180](https://github.com/influxdb/influxdb/pull/2180): Allow http write handler to decode gzipped body +- [#2175](https://github.com/influxdb/influxdb/pull/2175): Separate broker and data nodes +- [#2158](https://github.com/influxdb/influxdb/pull/2158): Allow user password to be changed. Thanks @n1tr0g +- [#2201](https://github.com/influxdb/influxdb/pull/2201): Bring back config join URLs +- [#2121](https://github.com/influxdb/influxdb/pull/2121): Parser refactor + +### Bugfixes +- [#2181](https://github.com/influxdb/influxdb/pull/2181): Fix panic on "SHOW DIAGNOSTICS". +- [#2170](https://github.com/influxdb/influxdb/pull/2170): Make sure queries on missing tags return 200 status. +- [#2197](https://github.com/influxdb/influxdb/pull/2197): Lock server during Open(). +- [#2200](https://github.com/influxdb/influxdb/pull/2200): Re-enable Continuous Queries. +- [#2203](https://github.com/influxdb/influxdb/pull/2203): Fix race condition on continuous queries. +- [#2217](https://github.com/influxdb/influxdb/pull/2217): Only revert to follower if new term is greater. +- [#2219](https://github.com/influxdb/influxdb/pull/2219): Persist term change to disk when candidate. Thanks @cannium + +## v0.9.0-rc20 [2015-04-04] + +### Features +- [#2128](https://github.com/influxdb/influxdb/pull/2128): Data node discovery from brokers +- [#2142](https://github.com/influxdb/influxdb/pull/2142): Support chunked queries +- [#2154](https://github.com/influxdb/influxdb/pull/2154): Node redirection +- [#2168](https://github.com/influxdb/influxdb/pull/2168): Return raft term from vote, add term logging + +### Bugfixes +- [#2147](https://github.com/influxdb/influxdb/pull/2147): Set Go Max procs in a better location +- [#2137](https://github.com/influxdb/influxdb/pull/2137): Refactor `results` to `response`. Breaking Go Client change. +- [#2151](https://github.com/influxdb/influxdb/pull/2151): Ignore replay commands on the metastore. +- [#2152](https://github.com/influxdb/influxdb/issues/2152): Influxd process with stats enabled crashing with 'Unsuported protocol scheme for ""' +- [#2156](https://github.com/influxdb/influxdb/pull/2156): Propagate error when resolving UDP address in Graphite UDP server. +- [#2163](https://github.com/influxdb/influxdb/pull/2163): Fix up paths for default data and run storage. +- [#2164](https://github.com/influxdb/influxdb/pull/2164): Append STDOUT/STDERR in initscript. +- [#2165](https://github.com/influxdb/influxdb/pull/2165): Better name for config section for stats and diags. +- [#2165](https://github.com/influxdb/influxdb/pull/2165): Monitoring database and retention policy are not configurable. +- [#2167](https://github.com/influxdb/influxdb/pull/2167): Add broker log recovery. +- [#2166](https://github.com/influxdb/influxdb/pull/2166): Don't panic if presented with a field of unknown type. +- [#2149](https://github.com/influxdb/influxdb/pull/2149): Fix unit tests for win32 when directory doesn't exist. +- [#2150](https://github.com/influxdb/influxdb/pull/2150): Fix unit tests for win32 when a connection is refused. + +## v0.9.0-rc19 [2015-04-01] + +### Features +- [#2143](https://github.com/influxdb/influxdb/pull/2143): Add raft term logging. + +### Bugfixes +- [#2145](https://github.com/influxdb/influxdb/pull/2145): Encode toml durations correctly which fixes default configuration generation `influxd config`. + +## v0.9.0-rc18 [2015-03-31] + +### Bugfixes +- [#2100](https://github.com/influxdb/influxdb/pull/2100): Use channel to synchronize collectd shutdown. +- [#2100](https://github.com/influxdb/influxdb/pull/2100): Synchronize access to shard index. +- [#2131](https://github.com/influxdb/influxdb/pull/2131): Optimize marshalTags(). +- [#2130](https://github.com/influxdb/influxdb/pull/2130): Make fewer calls to marshalTags(). +- [#2105](https://github.com/influxdb/influxdb/pull/2105): Support != for tag values. Fix issue #2097, thanks to @smonkewitz for bug report. +- [#2105](https://github.com/influxdb/influxdb/pull/2105): Support !~ tags values. +- [#2138](https://github.com/influxdb/influxdb/pull/2136): Use map for marshaledTags cache. + +## v0.9.0-rc17 [2015-03-29] + +### Features +- [#2076](https://github.com/influxdb/influxdb/pull/2076): Separate stdout and stderr output in init.d script +- [#2091](https://github.com/influxdb/influxdb/pull/2091): Support disabling snapshot endpoint. +- [#2081](https://github.com/influxdb/influxdb/pull/2081): Support writing diagnostic data into the internal database. +- [#2095](https://github.com/influxdb/influxdb/pull/2095): Improved InfluxDB client docs. Thanks @derailed + +### Bugfixes +- [#2093](https://github.com/influxdb/influxdb/pull/2093): Point precision not marshalled correctly. Thanks @derailed +- [#2084](https://github.com/influxdb/influxdb/pull/2084): Allowing leading underscores in identifiers. +- [#2080](https://github.com/influxdb/influxdb/pull/2080): Graphite logs in seconds, not milliseconds. +- [#2101](https://github.com/influxdb/influxdb/pull/2101): SHOW DATABASES should name returned series "databases". +- [#2104](https://github.com/influxdb/influxdb/pull/2104): Include NEQ when calculating field filters. +- [#2112](https://github.com/influxdb/influxdb/pull/2112): Set GOMAXPROCS on startup. This may have been causing extra leader elections, which would cause a number of other bugs or instability. +- [#2111](https://github.com/influxdb/influxdb/pull/2111) and [#2025](https://github.com/influxdb/influxdb/issues/2025): Raft stability fixes. Non-contiguous log error and others. +- [#2114](https://github.com/influxdb/influxdb/pull/2114): Correctly start influxd on platforms without start-stop-daemon. + +## v0.9.0-rc16 [2015-03-24] + +### Features +- [#2058](https://github.com/influxdb/influxdb/pull/2058): Track number of queries executed in stats. +- [#2059](https://github.com/influxdb/influxdb/pull/2059): Retention policies sorted by name on return to client. +- [#2061](https://github.com/influxdb/influxdb/pull/2061): Implement SHOW DIAGNOSTICS. +- [#2064](https://github.com/influxdb/influxdb/pull/2064): Allow init.d script to return influxd version. +- [#2053](https://github.com/influxdb/influxdb/pull/2053): Implment backup and restore. +- [#1631](https://github.com/influxdb/influxdb/pull/1631): Wire up DROP CONTINUOUS QUERY. + +### Bugfixes +- [#2037](https://github.com/influxdb/influxdb/pull/2037): Don't check 'configExists' at Run() level. +- [#2039](https://github.com/influxdb/influxdb/pull/2039): Don't panic if getting current user fails. +- [#2034](https://github.com/influxdb/influxdb/pull/2034): GROUP BY should require an aggregate. +- [#2040](https://github.com/influxdb/influxdb/pull/2040): Add missing top-level help for config command. +- [#2057](https://github.com/influxdb/influxdb/pull/2057): Move racy "in order" test to integration test suite. +- [#2060](https://github.com/influxdb/influxdb/pull/2060): Reload server shard map on restart. +- [#2068](https://github.com/influxdb/influxdb/pull/2068): Fix misspelled JSON field. +- [#2067](https://github.com/influxdb/influxdb/pull/2067): Fixed issue where some queries didn't properly pull back data (introduced in RC15). Fixing intervals for GROUP BY. + +## v0.9.0-rc15 [2015-03-19] + +### Features +- [#2000](https://github.com/influxdb/influxdb/pull/2000): Log broker path when broker fails to start. Thanks @gst. +- [#2007](https://github.com/influxdb/influxdb/pull/2007): Track shard-level stats. + +### Bugfixes +- [#2001](https://github.com/influxdb/influxdb/pull/2001): Ensure measurement not found returns status code 200. +- [#1985](https://github.com/influxdb/influxdb/pull/1985): Set content-type JSON header before actually writing header. Thanks @dstrek. +- [#2003](https://github.com/influxdb/influxdb/pull/2003): Set timestamp when writing monitoring stats. +- [#2004](https://github.com/influxdb/influxdb/pull/2004): Limit group by to MaxGroupByPoints (currently 100,000). +- [#2016](https://github.com/influxdb/influxdb/pull/2016): Fixing bucket alignment for group by. Thanks @jnutzmann +- [#2021](https://github.com/influxdb/influxdb/pull/2021): Remove unnecessary formatting from log message. Thanks @simonkern + + +## v0.9.0-rc14 [2015-03-18] + +### Bugfixes +- [#1999](https://github.com/influxdb/influxdb/pull/1999): Return status code 200 for measurement not found errors on show series. + +## v0.9.0-rc13 [2015-03-17] + +### Features +- [#1974](https://github.com/influxdb/influxdb/pull/1974): Add time taken for request to the http server logs. + +### Bugfixes +- [#1971](https://github.com/influxdb/influxdb/pull/1971): Fix leader id initialization. +- [#1975](https://github.com/influxdb/influxdb/pull/1975): Require `q` parameter for query endpoint. +- [#1969](https://github.com/influxdb/influxdb/pull/1969): Print loaded config. +- [#1987](https://github.com/influxdb/influxdb/pull/1987): Fix config print startup statement for when no config is provided. +- [#1990](https://github.com/influxdb/influxdb/pull/1990): Drop measurement was taking too long due to transactions. + +## v0.9.0-rc12 [2015-03-15] + +### Bugfixes +- [#1942](https://github.com/influxdb/influxdb/pull/1942): Sort wildcard names. +- [#1957](https://github.com/influxdb/influxdb/pull/1957): Graphite numbers are always float64. +- [#1955](https://github.com/influxdb/influxdb/pull/1955): Prohibit creation of databases with no name. Thanks @dullgiulio +- [#1952](https://github.com/influxdb/influxdb/pull/1952): Handle delete statement with an error. Thanks again to @dullgiulio + +### Features +- [#1935](https://github.com/influxdb/influxdb/pull/1935): Implement stateless broker for Raft. +- [#1936](https://github.com/influxdb/influxdb/pull/1936): Implement "SHOW STATS" and self-monitoring + +### Features +- [#1909](https://github.com/influxdb/influxdb/pull/1909): Implement a dump command. + +## v0.9.0-rc11 [2015-03-13] + +### Bugfixes +- [#1917](https://github.com/influxdb/influxdb/pull/1902): Creating Infinite Retention Policy Failed. +- [#1758](https://github.com/influxdb/influxdb/pull/1758): Add Graphite Integration Test. +- [#1929](https://github.com/influxdb/influxdb/pull/1929): Default Retention Policy incorrectly auto created. +- [#1930](https://github.com/influxdb/influxdb/pull/1930): Auto create database for graphite if not specified. +- [#1908](https://github.com/influxdb/influxdb/pull/1908): Cosmetic CLI output fixes. +- [#1931](https://github.com/influxdb/influxdb/pull/1931): Add default column to SHOW RETENTION POLICIES. +- [#1937](https://github.com/influxdb/influxdb/pull/1937): OFFSET should be allowed to be 0. + +### Features +- [#1902](https://github.com/influxdb/influxdb/pull/1902): Enforce retention policies to have a minimum duration. +- [#1906](https://github.com/influxdb/influxdb/pull/1906): Add show servers to query language. +- [#1925](https://github.com/influxdb/influxdb/pull/1925): Add `fill(none)`, `fill(previous)`, and `fill()` to queries. + +## v0.9.0-rc10 [2015-03-09] + +### Bugfixes +- [#1867](https://github.com/influxdb/influxdb/pull/1867): Fix race accessing topic replicas map +- [#1864](https://github.com/influxdb/influxdb/pull/1864): fix race in startStateLoop +- [#1753](https://github.com/influxdb/influxdb/pull/1874): Do Not Panic on Missing Dirs +- [#1877](https://github.com/influxdb/influxdb/pull/1877): Broker clients track broker leader +- [#1862](https://github.com/influxdb/influxdb/pull/1862): Fix memory leak in `httpd.serveWait`. Thanks @mountkin +- [#1883](https://github.com/influxdb/influxdb/pull/1883): RLock server during retention policy enforcement. Thanks @grisha +- [#1868](https://github.com/influxdb/influxdb/pull/1868): Use `BatchPoints` for `client.Write` method. Thanks @vladlopes, @georgmu, @d2g, @evanphx, @akolosov. +- [#1881](https://github.com/influxdb/influxdb/pull/1881): Update documentation for `client` package. Misc library tweaks. +- Fix queries with multiple where clauses on tags, times and fields. Fix queries that have where clauses on fields not in the select + +### Features +- [#1875](https://github.com/influxdb/influxdb/pull/1875): Support trace logging of Raft. +- [#1895](https://github.com/influxdb/influxdb/pull/1895): Auto-create a retention policy when a database is created. +- [#1897](https://github.com/influxdb/influxdb/pull/1897): Pre-create shard groups. +- [#1900](https://github.com/influxdb/influxdb/pull/1900): Change `LIMIT` to `SLIMIT` and implement `LIMIT` and `OFFSET` + +## v0.9.0-rc9 [2015-03-06] + +### Bugfixes +- [#1872](https://github.com/influxdb/influxdb/pull/1872): Fix "stale term" errors with raft + +## v0.9.0-rc8 [2015-03-05] + +### Bugfixes +- [#1836](https://github.com/influxdb/influxdb/pull/1836): Store each parsed shell command in history file. +- [#1789](https://github.com/influxdb/influxdb/pull/1789): add --config-files option to fpm command. Thanks @kylezh +- [#1859](https://github.com/influxdb/influxdb/pull/1859): Queries with a `GROUP BY *` clause were returning a 500 if done against a measurement that didn't exist + +### Features +- [#1755](https://github.com/influxdb/influxdb/pull/1848): Support JSON data ingest over UDP +- [#1857](https://github.com/influxdb/influxdb/pull/1857): Support retention policies with infinite duration +- [#1858](https://github.com/influxdb/influxdb/pull/1858): Enable detailed tracing of write path + +## v0.9.0-rc7 [2015-03-02] + +### Features +- [#1813](https://github.com/influxdb/influxdb/pull/1813): Queries for missing measurements or fields now return a 200 with an error message in the series JSON. +- [#1826](https://github.com/influxdb/influxdb/pull/1826), [#1827](https://github.com/influxdb/influxdb/pull/1827): Fixed queries with `WHERE` clauses against fields. + +### Bugfixes + +- [#1744](https://github.com/influxdb/influxdb/pull/1744): Allow retention policies to be modified without specifying replication factor. Thanks @kylezh +- [#1809](https://github.com/influxdb/influxdb/pull/1809): Packaging post-install script unconditionally removes init.d symlink. Thanks @sineos + +## v0.9.0-rc6 [2015-02-27] + +### Bugfixes + +- [#1780](https://github.com/influxdb/influxdb/pull/1780): Malformed identifiers get through the parser +- [#1775](https://github.com/influxdb/influxdb/pull/1775): Panic "index out of range" on some queries +- [#1744](https://github.com/influxdb/influxdb/pull/1744): Select shard groups which completely encompass time range. Thanks @kylezh. + +## v0.9.0-rc5 [2015-02-27] + +### Bugfixes + +- [#1752](https://github.com/influxdb/influxdb/pull/1752): remove debug log output from collectd. +- [#1720](https://github.com/influxdb/influxdb/pull/1720): Parse Series IDs as unsigned 32-bits. +- [#1767](https://github.com/influxdb/influxdb/pull/1767): Drop Series was failing across shards. Issue #1761. +- [#1773](https://github.com/influxdb/influxdb/pull/1773): Fix bug when merging series together that have unequal number of points in a group by interval +- [#1771](https://github.com/influxdb/influxdb/pull/1771): Make `SHOW SERIES` return IDs and support `LIMIT` and `OFFSET` + +### Features + +- [#1698](https://github.com/influxdb/influxdb/pull/1698): Wire up DROP MEASUREMENT + +## v0.9.0-rc4 [2015-02-24] + +### Bugfixes + +- Fix authentication issue with continuous queries +- Print version in the log on startup + +## v0.9.0-rc3 [2015-02-23] + +### Features + +- [#1659](https://github.com/influxdb/influxdb/pull/1659): WHERE against regexes: `WHERE =~ '.*asdf' +- [#1580](https://github.com/influxdb/influxdb/pull/1580): Add support for fields with bool, int, or string data types +- [#1687](https://github.com/influxdb/influxdb/pull/1687): Change `Rows` to `Series` in results output. BREAKING API CHANGE +- [#1629](https://github.com/influxdb/influxdb/pull/1629): Add support for `DROP SERIES` queries +- [#1632](https://github.com/influxdb/influxdb/pull/1632): Add support for `GROUP BY *` to return all series within a measurement +- [#1689](https://github.com/influxdb/influxdb/pull/1689): Change `SHOW TAG VALUES WITH KEY="foo"` to use the key name in the result. BREAKING API CHANGE +- [#1699](https://github.com/influxdb/influxdb/pull/1699): Add CPU and memory profiling options to daemon +- [#1672](https://github.com/influxdb/influxdb/pull/1672): Add index tracking to metastore. Makes downed node recovery actually work +- [#1591](https://github.com/influxdb/influxdb/pull/1591): Add `spread` aggregate function +- [#1576](https://github.com/influxdb/influxdb/pull/1576): Add `first` and `last` aggregate functions +- [#1573](https://github.com/influxdb/influxdb/pull/1573): Add `stddev` aggregate function +- [#1565](https://github.com/influxdb/influxdb/pull/1565): Add the admin interface back into the server and update for new API +- [#1562](https://github.com/influxdb/influxdb/pull/1562): Enforce retention policies +- [#1700](https://github.com/influxdb/influxdb/pull/1700): Change `Values` to `Fields` on writes. BREAKING API CHANGE +- [#1706](https://github.com/influxdb/influxdb/pull/1706): Add support for `LIMIT` and `OFFSET`, which work on the number of series returned in a query. To limit the number of data points use a `WHERE time` clause + +### Bugfixes + +- [#1636](https://github.com/influxdb/influxdb/issues/1636): Don't store number of fields in raw data. THIS IS A BREAKING DATA CHANGE. YOU MUST START WITH A FRESH DATABASE +- [#1701](https://github.com/influxdb/influxdb/pull/1701), [#1667](https://github.com/influxdb/influxdb/pull/1667), [#1663](https://github.com/influxdb/influxdb/pull/1663), [#1615](https://github.com/influxdb/influxdb/pull/1615): Raft fixes +- [#1644](https://github.com/influxdb/influxdb/pull/1644): Add batching support for significantly improved write performance +- [#1704](https://github.com/influxdb/influxdb/pull/1704): Fix queries that pull back raw data (i.e. ones without aggregate functions) +- [#1718](https://github.com/influxdb/influxdb/pull/1718): Return an error on write if any of the points are don't have at least one field +- [#1806](https://github.com/influxdb/influxdb/pull/1806): Fix regex parsing. Change regex syntax to use / delimiters. + + +## v0.9.0-rc1,2 [no public release] + +### Features + +- Support for tags added +- New queries for showing measurement names, tag keys, and tag values +- Renamed shard spaces to retention policies +- Deprecated matching against regex in favor of explicit writing and querying on retention policies +- Pure Go InfluxQL parser +- Switch to BoltDB as underlying datastore +- BoltDB backed metastore to store schema information +- Updated HTTP API to only have two endpoints `/query` and `/write` +- Added all administrative functions to the query language +- Change cluster architecture to have brokers and data nodes +- Switch to streaming Raft implementation +- In memory inverted index of the tag data +- Pure Go implementation! + +## v0.8.6 [2014-11-15] + +### Features + +- [Issue #973](https://github.com/influxdb/influxdb/issues/973). Support + joining using a regex or list of time series +- [Issue #1068](https://github.com/influxdb/influxdb/issues/1068). Print + the processor chain when the query is started + +### Bugfixes + +- [Issue #584](https://github.com/influxdb/influxdb/issues/584). Don't + panic if the process died while initializing +- [Issue #663](https://github.com/influxdb/influxdb/issues/663). Make + sure all sub servies are closed when are stopping InfluxDB +- [Issue #671](https://github.com/influxdb/influxdb/issues/671). Fix + the Makefile package target for Mac OSX +- [Issue #800](https://github.com/influxdb/influxdb/issues/800). Use + su instead of sudo in the init script. This fixes the startup problem + on RHEL 6. +- [Issue #925](https://github.com/influxdb/influxdb/issues/925). Don't + generate invalid query strings for single point queries +- [Issue #943](https://github.com/influxdb/influxdb/issues/943). Don't + take two snapshots at the same time +- [Issue #947](https://github.com/influxdb/influxdb/issues/947). Exit + nicely if the daemon doesn't have permission to write to the log. +- [Issue #959](https://github.com/influxdb/influxdb/issues/959). Stop using + closed connections in the protobuf client. +- [Issue #978](https://github.com/influxdb/influxdb/issues/978). Check + for valgrind and mercurial in the configure script +- [Issue #996](https://github.com/influxdb/influxdb/issues/996). Fill should + fill the time range even if no points exists in the given time range +- [Issue #1008](https://github.com/influxdb/influxdb/issues/1008). Return + an appropriate exit status code depending on whether the process exits + due to an error or exits gracefully. +- [Issue #1024](https://github.com/influxdb/influxdb/issues/1024). Hitting + open files limit causes influxdb to create shards in loop. +- [Issue #1069](https://github.com/influxdb/influxdb/issues/1069). Fix + deprecated interface endpoint in Admin UI. +- [Issue #1076](https://github.com/influxdb/influxdb/issues/1076). Fix + the timestamps of data points written by the collectd plugin. (Thanks, + @renchap for reporting this bug) +- [Issue #1078](https://github.com/influxdb/influxdb/issues/1078). Make sure + we don't resurrect shard directories for shards that have already expired +- [Issue #1085](https://github.com/influxdb/influxdb/issues/1085). Set + the connection string of the local raft node +- [Issue #1092](https://github.com/influxdb/influxdb/issues/1093). Set + the connection string of the local node in the raft snapshot. +- [Issue #1100](https://github.com/influxdb/influxdb/issues/1100). Removing + a non-existent shard space causes the cluster to panic. +- [Issue #1113](https://github.com/influxdb/influxdb/issues/1113). A nil + engine.ProcessorChain causes a panic. + +## v0.8.5 [2014-10-27] + +### Features + +- [Issue #1055](https://github.com/influxdb/influxdb/issues/1055). Allow + graphite and collectd input plugins to have separate binding address + +### Bugfixes + +- [Issue #1058](https://github.com/influxdb/influxdb/issues/1058). Use + the query language instead of the continuous query endpoints that + were removed in 0.8.4 +- [Issue #1022](https://github.com/influxdb/influxdb/issues/1022). Return + an +Inf or NaN instead of panicing when we encounter a divide by zero +- [Issue #821](https://github.com/influxdb/influxdb/issues/821). Don't + scan through points when we hit the limit +- [Issue #1051](https://github.com/influxdb/influxdb/issues/1051). Fix + timestamps when the collectd is used and low resolution timestamps + is set. + +## v0.8.4 [2014-10-24] + +### Bugfixes + +- Remove the continuous query api endpoints since the query language + has all the features needed to list and delete continuous queries. +- [Issue #778](https://github.com/influxdb/influxdb/issues/778). Selecting + from a non-existent series should give a better error message indicating + that the series doesn't exist +- [Issue #988](https://github.com/influxdb/influxdb/issues/988). Check + the arguments of `top()` and `bottom()` +- [Issue #1021](https://github.com/influxdb/influxdb/issues/1021). Make + redirecting to standard output and standard error optional instead of + going to `/dev/null`. This can now be configured by setting `$STDOUT` + in `/etc/default/influxdb` +- [Issue #985](https://github.com/influxdb/influxdb/issues/985). Make + sure we drop a shard only when there's no one using it. Otherwise, the + shard can be closed when another goroutine is writing to it which will + cause random errors and possibly corruption of the database. + +### Features + +- [Issue #1047](https://github.com/influxdb/influxdb/issues/1047). Allow + merge() to take a list of series (as opposed to a regex in #72) + +## v0.8.4-rc.1 [2014-10-21] + +### Bugfixes + +- [Issue #1040](https://github.com/influxdb/influxdb/issues/1040). Revert + to older raft snapshot if the latest one is corrupted +- [Issue #1004](https://github.com/influxdb/influxdb/issues/1004). Querying + for data outside of existing shards returns an empty response instead of + throwing a `Couldn't lookup columns` error +- [Issue #1020](https://github.com/influxdb/influxdb/issues/1020). Change + init script exit codes to conform to the lsb standards. (Thanks, @spuder) +- [Issue #1011](https://github.com/influxdb/influxdb/issues/1011). Fix + the tarball for homebrew so that rocksdb is included and the directory + structure is clean +- [Issue #1007](https://github.com/influxdb/influxdb/issues/1007). Fix + the content type when an error occurs and the client requests + compression. +- [Issue #916](https://github.com/influxdb/influxdb/issues/916). Set + the ulimit in the init script with a way to override the limit +- [Issue #742](https://github.com/influxdb/influxdb/issues/742). Fix + rocksdb for Mac OSX +- [Issue #387](https://github.com/influxdb/influxdb/issues/387). Aggregations + with group by time(1w), time(1m) and time(1y) (for week, month and + year respectively) will cause the start time and end time of the bucket + to fall on the logical boundaries of the week, month or year. +- [Issue #334](https://github.com/influxdb/influxdb/issues/334). Derivative + for queries with group by time() and fill(), will take the difference + between the first value in the bucket and the first value of the next + bucket. +- [Issue #972](https://github.com/influxdb/influxdb/issues/972). Don't + assign duplicate server ids + +### Features + +- [Issue #722](https://github.com/influxdb/influxdb/issues/722). Add + an install target to the Makefile +- [Issue #1032](https://github.com/influxdb/influxdb/issues/1032). Include + the admin ui static assets in the binary +- [Issue #1019](https://github.com/influxdb/influxdb/issues/1019). Upgrade + to rocksdb 3.5.1 +- [Issue #992](https://github.com/influxdb/influxdb/issues/992). Add + an input plugin for collectd. (Thanks, @kimor79) +- [Issue #72](https://github.com/influxdb/influxdb/issues/72). Support merge + for multiple series using regex syntax + +## v0.8.3 [2014-09-24] + +### Bugfixes + +- [Issue #885](https://github.com/influxdb/influxdb/issues/885). Multiple + queries separated by semicolons work as expected. Queries are process + sequentially +- [Issue #652](https://github.com/influxdb/influxdb/issues/652). Return an + error if an invalid column is used in the where clause +- [Issue #794](https://github.com/influxdb/influxdb/issues/794). Fix case + insensitive regex matching +- [Issue #853](https://github.com/influxdb/influxdb/issues/853). Move + cluster config from raft to API. +- [Issue #714](https://github.com/influxdb/influxdb/issues/714). Don't + panic on invalid boolean operators. +- [Issue #843](https://github.com/influxdb/influxdb/issues/843). Prevent blank database names +- [Issue #780](https://github.com/influxdb/influxdb/issues/780). Fix + fill() for all aggregators +- [Issue #923](https://github.com/influxdb/influxdb/issues/923). Enclose + table names in double quotes in the result of GetQueryString() +- [Issue #923](https://github.com/influxdb/influxdb/issues/923). Enclose + table names in double quotes in the result of GetQueryString() +- [Issue #967](https://github.com/influxdb/influxdb/issues/967). Return an + error if the storage engine can't be created +- [Issue #954](https://github.com/influxdb/influxdb/issues/954). Don't automatically + create shards which was causing too many shards to be created when used with + grafana +- [Issue #939](https://github.com/influxdb/influxdb/issues/939). Aggregation should + ignore null values and invalid values, e.g. strings with mean(). +- [Issue #964](https://github.com/influxdb/influxdb/issues/964). Parse + big int in queries properly. + +## v0.8.2 [2014-09-05] + +### Bugfixes + +- [Issue #886](https://github.com/influxdb/influxdb/issues/886). Update shard space to not set defaults + +- [Issue #867](https://github.com/influxdb/influxdb/issues/867). Add option to return shard space mappings in list series + +### Bugfixes + +- [Issue #652](https://github.com/influxdb/influxdb/issues/652). Return + a meaningful error if an invalid column is used in where clause + after joining multiple series + +## v0.8.2 [2014-09-08] + +### Features + +- Added API endpoint to update shard space definitions + +### Bugfixes + +- [Issue #886](https://github.com/influxdb/influxdb/issues/886). Shard space regexes reset after restart of InfluxDB + +## v0.8.1 [2014-09-03] + +- [Issue #896](https://github.com/influxdb/influxdb/issues/896). Allow logging to syslog. Thanks @malthe + +### Bugfixes + +- [Issue #868](https://github.com/influxdb/influxdb/issues/868). Don't panic when upgrading a snapshot from 0.7.x +- [Issue #887](https://github.com/influxdb/influxdb/issues/887). The first continuous query shouldn't trigger backfill if it had backfill disabled +- [Issue #674](https://github.com/influxdb/influxdb/issues/674). Graceful exit when config file is invalid. (Thanks, @DavidBord) +- [Issue #857](https://github.com/influxdb/influxdb/issues/857). More informative list servers api. (Thanks, @oliveagle) + +## v0.8.0 [2014-08-22] + +### Features + +- [Issue #850](https://github.com/influxdb/influxdb/issues/850). Makes the server listing more informative + +### Bugfixes + +- [Issue #779](https://github.com/influxdb/influxdb/issues/779). Deleting expired shards isn't thread safe. +- [Issue #860](https://github.com/influxdb/influxdb/issues/860). Load database config should validate shard spaces. +- [Issue #862](https://github.com/influxdb/influxdb/issues/862). Data migrator should have option to set delay time. + +## v0.8.0-rc.5 [2014-08-15] + +### Features + +- [Issue #376](https://github.com/influxdb/influxdb/issues/376). List series should support regex filtering +- [Issue #745](https://github.com/influxdb/influxdb/issues/745). Add continuous queries to the database config +- [Issue #746](https://github.com/influxdb/influxdb/issues/746). Add data migration tool for 0.8.0 + +### Bugfixes + +- [Issue #426](https://github.com/influxdb/influxdb/issues/426). Fill should fill the entire time range that is requested +- [Issue #740](https://github.com/influxdb/influxdb/issues/740). Don't emit non existent fields when joining series with different fields +- [Issue #744](https://github.com/influxdb/influxdb/issues/744). Admin site should have all assets locally +- [Issue #767](https://github.com/influxdb/influxdb/issues/768). Remove shards whenever they expire +- [Issue #781](https://github.com/influxdb/influxdb/issues/781). Don't emit non existent fields when joining series with different fields +- [Issue #791](https://github.com/influxdb/influxdb/issues/791). Move database config loader to be an API endpoint +- [Issue #809](https://github.com/influxdb/influxdb/issues/809). Migration path from 0.7 -> 0.8 +- [Issue #811](https://github.com/influxdb/influxdb/issues/811). Gogoprotobuf removed `ErrWrongType`, which is depended on by Raft +- [Issue #820](https://github.com/influxdb/influxdb/issues/820). Query non-local shard with time range to avoid getting back points not in time range +- [Issue #827](https://github.com/influxdb/influxdb/issues/827). Don't leak file descriptors in the WAL +- [Issue #830](https://github.com/influxdb/influxdb/issues/830). List series should return series in lexicographic sorted order +- [Issue #831](https://github.com/influxdb/influxdb/issues/831). Move create shard space to be db specific + +## v0.8.0-rc.4 [2014-07-29] + +### Bugfixes + +- [Issue #774](https://github.com/influxdb/influxdb/issues/774). Don't try to parse "inf" shard retention policy +- [Issue #769](https://github.com/influxdb/influxdb/issues/769). Use retention duration when determining expired shards. (Thanks, @shugo) +- [Issue #736](https://github.com/influxdb/influxdb/issues/736). Only db admins should be able to drop a series +- [Issue #713](https://github.com/influxdb/influxdb/issues/713). Null should be a valid fill value +- [Issue #644](https://github.com/influxdb/influxdb/issues/644). Graphite api should write data in batches to the coordinator +- [Issue #740](https://github.com/influxdb/influxdb/issues/740). Panic when distinct fields are selected from an inner join +- [Issue #781](https://github.com/influxdb/influxdb/issues/781). Panic when distinct fields are added after an inner join + +## v0.8.0-rc.3 [2014-07-21] + +### Bugfixes + +- [Issue #752](https://github.com/influxdb/influxdb/issues/752). `./configure` should use goroot to find gofmt +- [Issue #758](https://github.com/influxdb/influxdb/issues/758). Clarify the reason behind graphite input plugin not starting. (Thanks, @otoolep) +- [Issue #759](https://github.com/influxdb/influxdb/issues/759). Don't revert the regex in the shard space. (Thanks, @shugo) +- [Issue #760](https://github.com/influxdb/influxdb/issues/760). Removing a server should remove it from the shard server ids. (Thanks, @shugo) +- [Issue #772](https://github.com/influxdb/influxdb/issues/772). Add sentinel values to all db. This caused the last key in the db to not be fetched properly. + + +## v0.8.0-rc.2 [2014-07-15] + +- This release is to fix a build error in rc1 which caused rocksdb to not be available +- Bump up the `max-open-files` option to 1000 on all storage engines +- Lower the `write-buffer-size` to 1000 + +## v0.8.0-rc.1 [2014-07-15] + +### Features + +- [Issue #643](https://github.com/influxdb/influxdb/issues/643). Support pretty print json. (Thanks, @otoolep) +- [Issue #641](https://github.com/influxdb/influxdb/issues/641). Support multiple storage engines +- [Issue #665](https://github.com/influxdb/influxdb/issues/665). Make build tmp directory configurable in the make file. (Thanks, @dgnorton) +- [Issue #667](https://github.com/influxdb/influxdb/issues/667). Enable compression on all GET requests and when writing data +- [Issue #648](https://github.com/influxdb/influxdb/issues/648). Return permissions when listing db users. (Thanks, @nicolai86) +- [Issue #682](https://github.com/influxdb/influxdb/issues/682). Allow continuous queries to run without backfill (Thanks, @dhammika) +- [Issue #689](https://github.com/influxdb/influxdb/issues/689). **REQUIRES DATA MIGRATION** Move metadata into raft +- [Issue #255](https://github.com/influxdb/influxdb/issues/255). Support millisecond precision using `ms` suffix +- [Issue #95](https://github.com/influxdb/influxdb/issues/95). Drop database should not be synchronous +- [Issue #571](https://github.com/influxdb/influxdb/issues/571). Add support for arbitrary number of shard spaces and retention policies +- Default storage engine changed to RocksDB + +### Bugfixes + +- [Issue #651](https://github.com/influxdb/influxdb/issues/651). Change permissions of symlink which fix some installation issues. (Thanks, @Dieterbe) +- [Issue #670](https://github.com/influxdb/influxdb/issues/670). Don't warn on missing influxdb user on fresh installs +- [Issue #676](https://github.com/influxdb/influxdb/issues/676). Allow storing high precision integer values without losing any information +- [Issue #695](https://github.com/influxdb/influxdb/issues/695). Prevent having duplicate field names in the write payload. (Thanks, @seunglee150) +- [Issue #731](https://github.com/influxdb/influxdb/issues/731). Don't enable the udp plugin if the `enabled` option is set to false +- [Issue #733](https://github.com/influxdb/influxdb/issues/733). Print an `INFO` message when the input plugin is disabled +- [Issue #707](https://github.com/influxdb/influxdb/issues/707). Graphite input plugin should work payload delimited by any whitespace character +- [Issue #734](https://github.com/influxdb/influxdb/issues/734). Don't buffer non replicated writes +- [Issue #465](https://github.com/influxdb/influxdb/issues/465). Recreating a currently deleting db or series doesn't bring back the old data anymore +- [Issue #358](https://github.com/influxdb/influxdb/issues/358). **BREAKING** List series should return as a single series +- [Issue #499](https://github.com/influxdb/influxdb/issues/499). **BREAKING** Querying non-existent database or series will return an error +- [Issue #570](https://github.com/influxdb/influxdb/issues/570). InfluxDB crashes during delete/drop of database +- [Issue #592](https://github.com/influxdb/influxdb/issues/592). Drop series is inefficient + +## v0.7.3 [2014-06-13] + +### Bugfixes + +- [Issue #637](https://github.com/influxdb/influxdb/issues/637). Truncate log files if the last request wasn't written properly +- [Issue #646](https://github.com/influxdb/influxdb/issues/646). CRITICAL: Duplicate shard ids for new shards if old shards are deleted. + +## v0.7.2 [2014-05-30] + +### Features + +- [Issue #521](https://github.com/influxdb/influxdb/issues/521). MODE works on all datatypes (Thanks, @richthegeek) + +### Bugfixes + +- [Issue #418](https://github.com/influxdb/influxdb/pull/418). Requests or responses larger than MAX_REQUEST_SIZE break things. +- [Issue #606](https://github.com/influxdb/influxdb/issues/606). InfluxDB will fail to start with invalid permission if log.txt didn't exist +- [Issue #602](https://github.com/influxdb/influxdb/issues/602). Merge will fail to work across shards + +### Features + +## v0.7.1 [2014-05-29] + +### Bugfixes + +- [Issue #579](https://github.com/influxdb/influxdb/issues/579). Reject writes to nonexistent databases +- [Issue #597](https://github.com/influxdb/influxdb/issues/597). Force compaction after deleting data + +### Features + +- [Issue #476](https://github.com/influxdb/influxdb/issues/476). Support ARM architecture +- [Issue #578](https://github.com/influxdb/influxdb/issues/578). Support aliasing for expressions in parenthesis +- [Issue #544](https://github.com/influxdb/influxdb/pull/544). Support forcing node removal from a cluster +- [Issue #591](https://github.com/influxdb/influxdb/pull/591). Support multiple udp input plugins (Thanks, @tpitale) +- [Issue #600](https://github.com/influxdb/influxdb/pull/600). Report version, os, arch, and raftName once per day. + +## v0.7.0 [2014-05-23] + +### Bugfixes + +- [Issue #557](https://github.com/influxdb/influxdb/issues/557). Group by time(1y) doesn't work while time(365d) works +- [Issue #547](https://github.com/influxdb/influxdb/issues/547). Add difference function (Thanks, @mboelstra) +- [Issue #550](https://github.com/influxdb/influxdb/issues/550). Fix tests on 32-bit ARM +- [Issue #524](https://github.com/influxdb/influxdb/issues/524). Arithmetic operators and where conditions don't play nice together +- [Issue #561](https://github.com/influxdb/influxdb/issues/561). Fix missing query in parsing errors +- [Issue #563](https://github.com/influxdb/influxdb/issues/563). Add sample config for graphite over udp +- [Issue #537](https://github.com/influxdb/influxdb/issues/537). Incorrect query syntax causes internal error +- [Issue #565](https://github.com/influxdb/influxdb/issues/565). Empty series names shouldn't cause a panic +- [Issue #575](https://github.com/influxdb/influxdb/issues/575). Single point select doesn't interpret timestamps correctly +- [Issue #576](https://github.com/influxdb/influxdb/issues/576). We shouldn't set timestamps and sequence numbers when listing cq +- [Issue #560](https://github.com/influxdb/influxdb/issues/560). Use /dev/urandom instead of /dev/random +- [Issue #502](https://github.com/influxdb/influxdb/issues/502). Fix a + race condition in assigning id to db+series+field (Thanks @ohurvitz + for reporting this bug and providing a script to repro) + +### Features + +- [Issue #567](https://github.com/influxdb/influxdb/issues/567). Allow selecting from multiple series names by separating them with commas (Thanks, @peekeri) + +### Deprecated + +- [Issue #460](https://github.com/influxdb/influxdb/issues/460). Don't start automatically after installing +- [Issue #529](https://github.com/influxdb/influxdb/issues/529). Don't run influxdb as root +- [Issue #443](https://github.com/influxdb/influxdb/issues/443). Use `name` instead of `username` when returning cluster admins + +## v0.6.5 [2014-05-19] + +### Features + +- [Issue #551](https://github.com/influxdb/influxdb/issues/551). Add TOP and BOTTOM aggregate functions (Thanks, @chobie) + +### Bugfixes + +- [Issue #555](https://github.com/influxdb/influxdb/issues/555). Fix a regression introduced in the raft snapshot format + +## v0.6.4 [2014-05-16] + +### Features + +- Make the write batch size configurable (also applies to deletes) +- Optimize writing to multiple series +- [Issue #546](https://github.com/influxdb/influxdb/issues/546). Add UDP support for Graphite API (Thanks, @peekeri) + +### Bugfixes + +- Fix a bug in shard logic that caused short term shards to be clobbered with long term shards +- [Issue #489](https://github.com/influxdb/influxdb/issues/489). Remove replication factor from CreateDatabase command + +## v0.6.3 [2014-05-13] + +### Features + +- [Issue #505](https://github.com/influxdb/influxdb/issues/505). Return a version header with http the response (Thanks, @majst01) +- [Issue #520](https://github.com/influxdb/influxdb/issues/520). Print the version to the log file + +### Bugfixes + +- [Issue #516](https://github.com/influxdb/influxdb/issues/516). Close WAL log/index files when they aren't being used +- [Issue #532](https://github.com/influxdb/influxdb/issues/532). Don't log graphite connection EOF as an error +- [Issue #535](https://github.com/influxdb/influxdb/issues/535). WAL Replay hangs if response isn't received +- [Issue #538](https://github.com/influxdb/influxdb/issues/538). Don't panic if the same series existed twice in the request with different columns +- [Issue #536](https://github.com/influxdb/influxdb/issues/536). Joining the cluster after shards are creating shouldn't cause new nodes to panic +- [Issue #539](https://github.com/influxdb/influxdb/issues/539). count(distinct()) with fill shouldn't panic on empty groups +- [Issue #534](https://github.com/influxdb/influxdb/issues/534). Create a new series when interpolating + +## v0.6.2 [2014-05-09] + +### Bugfixes + +- [Issue #511](https://github.com/influxdb/influxdb/issues/511). Don't automatically create the database when a db user is created +- [Issue #512](https://github.com/influxdb/influxdb/issues/512). Group by should respect null values +- [Issue #518](https://github.com/influxdb/influxdb/issues/518). Filter Infinities and NaNs from the returned json +- [Issue #522](https://github.com/influxdb/influxdb/issues/522). Committing requests while replaying caused the WAL to skip some log files +- [Issue #369](https://github.com/influxdb/influxdb/issues/369). Fix some edge cases with WAL recovery + +## v0.6.1 [2014-05-06] + +### Bugfixes + +- [Issue #500](https://github.com/influxdb/influxdb/issues/500). Support `y` suffix in time durations +- [Issue #501](https://github.com/influxdb/influxdb/issues/501). Writes with invalid payload should be rejected +- [Issue #507](https://github.com/influxdb/influxdb/issues/507). New cluster admin passwords don't propagate properly to other nodes in a cluster +- [Issue #508](https://github.com/influxdb/influxdb/issues/508). Don't replay WAL entries for servers with no shards +- [Issue #464](https://github.com/influxdb/influxdb/issues/464). Admin UI shouldn't draw graphs for string columns +- [Issue #480](https://github.com/influxdb/influxdb/issues/480). Large values on the y-axis get cut off + +## v0.6.0 [2014-05-02] + +### Feature + +- [Issue #477](https://github.com/influxdb/influxdb/issues/477). Add a udp json interface (Thanks, Julien Ammous) +- [Issue #491](https://github.com/influxdb/influxdb/issues/491). Make initial root password settable through env variable (Thanks, Edward Muller) + +### Bugfixes + +- [Issue #469](https://github.com/influxdb/influxdb/issues/469). Drop continuous queries when a database is dropped +- [Issue #431](https://github.com/influxdb/influxdb/issues/431). Don't log to standard output if a log file is specified in the config file +- [Issue #483](https://github.com/influxdb/influxdb/issues/483). Return 409 if a database already exist (Thanks, Edward Muller) +- [Issue #486](https://github.com/influxdb/influxdb/issues/486). Columns used in the target of continuous query shouldn't be inserted in the time series +- [Issue #490](https://github.com/influxdb/influxdb/issues/490). Database user password's cannot be changed (Thanks, Edward Muller) +- [Issue #495](https://github.com/influxdb/influxdb/issues/495). Enforce write permissions properly + +## v0.5.12 [2014-04-29] + +### Bugfixes + +- [Issue #419](https://github.com/influxdb/influxdb/issues/419),[Issue #478](https://github.com/influxdb/influxdb/issues/478). Allow hostname, raft and protobuf ports to be changed, without requiring manual intervention from the user + +## v0.5.11 [2014-04-25] + +### Features + +- [Issue #471](https://github.com/influxdb/influxdb/issues/471). Read and write permissions should be settable through the http api + +### Bugfixes + +- [Issue #323](https://github.com/influxdb/influxdb/issues/323). Continuous queries should guard against data loops +- [Issue #473](https://github.com/influxdb/influxdb/issues/473). Engine memory optimization + +## v0.5.10 [2014-04-22] + +### Features + +- [Issue #463](https://github.com/influxdb/influxdb/issues/463). Allow series names to use any character (escape by wrapping in double quotes) +- [Issue #447](https://github.com/influxdb/influxdb/issues/447). Allow @ in usernames +- [Issue #466](https://github.com/influxdb/influxdb/issues/466). Allow column names to use any character (escape by wrapping in double quotes) + +### Bugfixes + +- [Issue #458](https://github.com/influxdb/influxdb/issues/458). Continuous queries with group by time() and a column should insert sequence numbers of 1 +- [Issue #457](https://github.com/influxdb/influxdb/issues/457). Deleting series that start with capital letters should work + +## v0.5.9 [2014-04-18] + +### Bugfixes + +- [Issue #446](https://github.com/influxdb/influxdb/issues/446). Check for (de)serialization errors +- [Issue #456](https://github.com/influxdb/influxdb/issues/456). Continuous queries failed if one of the group by columns had null value +- [Issue #455](https://github.com/influxdb/influxdb/issues/455). Comparison operators should ignore null values + +## v0.5.8 [2014-04-17] + +- Renamed config.toml.sample to config.sample.toml + +### Bugfixes + +- [Issue #244](https://github.com/influxdb/influxdb/issues/244). Reconstruct the query from the ast +- [Issue #449](https://github.com/influxdb/influxdb/issues/449). Heartbeat timeouts can cause reading from connection to lock up +- [Issue #451](https://github.com/influxdb/influxdb/issues/451). Reduce the aggregation state that is kept in memory so that + aggregation queries over large periods of time don't take insance amount of memory + +## v0.5.7 [2014-04-15] + +### Features + +- Queries are now logged as INFO in the log file before they run + +### Bugfixes + +- [Issue #328](https://github.com/influxdb/influxdb/issues/328). Join queries with math expressions don't work +- [Issue #440](https://github.com/influxdb/influxdb/issues/440). Heartbeat timeouts in logs +- [Issue #442](https://github.com/influxdb/influxdb/issues/442). shouldQuerySequentially didn't work as expected + causing count(*) queries on large time series to use + lots of memory +- [Issue #437](https://github.com/influxdb/influxdb/issues/437). Queries with negative constants don't parse properly +- [Issue #432](https://github.com/influxdb/influxdb/issues/432). Deleted data using a delete query is resurrected after a server restart +- [Issue #439](https://github.com/influxdb/influxdb/issues/439). Report the right location of the error in the query +- Fix some bugs with the WAL recovery on startup + +## v0.5.6 [2014-04-08] + +### Features + +- [Issue #310](https://github.com/influxdb/influxdb/issues/310). Request should support multiple timeseries +- [Issue #416](https://github.com/influxdb/influxdb/issues/416). Improve the time it takes to drop database + +### Bugfixes + +- [Issue #413](https://github.com/influxdb/influxdb/issues/413). Don't assume that group by interval is greater than a second +- [Issue #415](https://github.com/influxdb/influxdb/issues/415). Include the database when sending an auth error back to the user +- [Issue #421](https://github.com/influxdb/influxdb/issues/421). Make read timeout a config option +- [Issue #392](https://github.com/influxdb/influxdb/issues/392). Different columns in different shards returns invalid results when a query spans those shards + +### Bugfixes + +## v0.5.5 [2014-04-04] + +- Upgrade leveldb 1.10 -> 1.15 + + This should be a backward compatible change, but is here for documentation only + +### Feature + +- Add a command line option to repair corrupted leveldb databases on startup +- [Issue #401](https://github.com/influxdb/influxdb/issues/401). No limit on the number of columns in the group by clause + +### Bugfixes + +- [Issue #398](https://github.com/influxdb/influxdb/issues/398). Support now() and NOW() in the query lang +- [Issue #403](https://github.com/influxdb/influxdb/issues/403). Filtering should work with join queries +- [Issue #404](https://github.com/influxdb/influxdb/issues/404). Filtering with invalid condition shouldn't crash the server +- [Issue #405](https://github.com/influxdb/influxdb/issues/405). Percentile shouldn't crash for small number of values +- [Issue #408](https://github.com/influxdb/influxdb/issues/408). Make InfluxDB recover from internal bugs and panics +- [Issue #390](https://github.com/influxdb/influxdb/issues/390). Multiple response.WriteHeader when querying as admin +- [Issue #407](https://github.com/influxdb/influxdb/issues/407). Start processing continuous queries only after the WAL is initialized +- Close leveldb databases properly if we couldn't create a new Shard. See leveldb\_shard\_datastore\_test:131 + +## v0.5.4 [2014-04-02] + +### Bugfixes + +- [Issue #386](https://github.com/influxdb/influxdb/issues/386). Drop series should work with series containing dots +- [Issue #389](https://github.com/influxdb/influxdb/issues/389). Filtering shouldn't stop prematurely +- [Issue #341](https://github.com/influxdb/influxdb/issues/341). Make the number of shards that are queried in parallel configurable +- [Issue #394](https://github.com/influxdb/influxdb/issues/394). Support count(distinct) and count(DISTINCT) +- [Issue #362](https://github.com/influxdb/influxdb/issues/362). Limit should be enforced after aggregation + +## v0.5.3 [2014-03-31] + +### Bugfixes + +- [Issue #378](https://github.com/influxdb/influxdb/issues/378). Indexing should return if there are no requests added since the last index +- [Issue #370](https://github.com/influxdb/influxdb/issues/370). Filtering and limit should be enforced on the shards +- [Issue #379](https://github.com/influxdb/influxdb/issues/379). Boolean columns should be usable in where clauses +- [Issue #381](https://github.com/influxdb/influxdb/issues/381). Should be able to do deletes as a cluster admin + +## v0.5.2 [2014-03-28] + +### Bugfixes + +- [Issue #342](https://github.com/influxdb/influxdb/issues/342). Data resurrected after a server restart +- [Issue #367](https://github.com/influxdb/influxdb/issues/367). Influxdb won't start if the api port is commented out +- [Issue #355](https://github.com/influxdb/influxdb/issues/355). Return an error on wrong time strings +- [Issue #331](https://github.com/influxdb/influxdb/issues/331). Allow negative time values in the where clause +- [Issue #371](https://github.com/influxdb/influxdb/issues/371). Seris index isn't deleted when the series is dropped +- [Issue #360](https://github.com/influxdb/influxdb/issues/360). Store and recover continuous queries + +## v0.5.1 [2014-03-24] + +### Bugfixes + +- Revert the version of goraft due to a bug found in the latest version + +## v0.5.0 [2014-03-24] + +### Features + +- [Issue #293](https://github.com/influxdb/influxdb/pull/293). Implement a Graphite listener + +### Bugfixes + +- [Issue #340](https://github.com/influxdb/influxdb/issues/340). Writing many requests while replaying seems to cause commits out of order + +## v0.5.0-rc.6 [2014-03-20] + +### Bugfixes + +- Increase raft election timeout to avoid unecessary relections +- Sort points before writing them to avoid an explosion in the request + number when the points are written randomly +- [Issue #335](https://github.com/influxdb/influxdb/issues/335). Fixes regexp for interpolating more than one column value in continuous queries +- [Issue #318](https://github.com/influxdb/influxdb/pull/318). Support EXPLAIN queries +- [Issue #333](https://github.com/influxdb/influxdb/pull/333). Fail + when the password is too short or too long instead of passing it to + the crypto library + +## v0.5.0-rc.5 [2014-03-11] + +### Bugfixes + +- [Issue #312](https://github.com/influxdb/influxdb/issues/312). WAL should wait for server id to be set before recovering +- [Issue #301](https://github.com/influxdb/influxdb/issues/301). Use ref counting to guard against race conditions in the shard cache +- [Issue #319](https://github.com/influxdb/influxdb/issues/319). Propagate engine creation error correctly to the user +- [Issue #316](https://github.com/influxdb/influxdb/issues/316). Make + sure we don't starve goroutines if we get an access denied error + from one of the shards +- [Issue #306](https://github.com/influxdb/influxdb/issues/306). Deleting/Dropping database takes a lot of memory +- [Issue #302](https://github.com/influxdb/influxdb/issues/302). Should be able to set negative timestamps on points +- [Issue #327](https://github.com/influxdb/influxdb/issues/327). Make delete queries not use WAL. This addresses #315, #317 and #314 +- [Issue #321](https://github.com/influxdb/influxdb/issues/321). Make sure we split points on shards properly + +## v0.5.0-rc.4 [2014-03-07] + +### Bugfixes + +- [Issue #298](https://github.com/influxdb/influxdb/issues/298). Fix limit when querying multiple shards +- [Issue #305](https://github.com/influxdb/influxdb/issues/305). Shard ids not unique after restart +- [Issue #309](https://github.com/influxdb/influxdb/issues/309). Don't relog the requests on the remote server +- Fix few bugs in the WAL and refactor the way it works (this requires purging the WAL from previous rc) + +## v0.5.0-rc.3 [2014-03-03] + +### Bugfixes +- [Issue #69](https://github.com/influxdb/influxdb/issues/69). Support column aliases +- [Issue #287](https://github.com/influxdb/influxdb/issues/287). Make the lru cache size configurable +- [Issue #38](https://github.com/influxdb/influxdb/issues/38). Fix a memory leak discussed in this story +- [Issue #286](https://github.com/influxdb/influxdb/issues/286). Make the number of open shards configurable +- Make LevelDB use the max open files configuration option. + +## v0.5.0-rc.2 [2014-02-27] + +### Bugfixes + +- [Issue #274](https://github.com/influxdb/influxdb/issues/274). Crash after restart +- [Issue #277](https://github.com/influxdb/influxdb/issues/277). Ensure duplicate shards won't be created +- [Issue #279](https://github.com/influxdb/influxdb/issues/279). Limits not working on regex queries +- [Issue #281](https://github.com/influxdb/influxdb/issues/281). `./influxdb -v` should print the sha when building from source +- [Issue #283](https://github.com/influxdb/influxdb/issues/283). Dropping shard and restart in cluster causes panic. +- [Issue #288](https://github.com/influxdb/influxdb/issues/288). Sequence numbers should be unique per server id + +## v0.5.0-rc.1 [2014-02-25] + +### Bugfixes + +- Ensure large deletes don't take too much memory +- [Issue #240](https://github.com/influxdb/influxdb/pull/240). Unable to query against columns with `.` in the name. +- [Issue #250](https://github.com/influxdb/influxdb/pull/250). different result between normal and continuous query with "group by" clause +- [Issue #216](https://github.com/influxdb/influxdb/pull/216). Results with no points should exclude columns and points + +### Features + +- [Issue #243](https://github.com/influxdb/influxdb/issues/243). Should have endpoint to GET a user's attributes. +- [Issue #269](https://github.com/influxdb/influxdb/pull/269), [Issue #65](https://github.com/influxdb/influxdb/issues/65) New clustering architecture (see docs), with the side effect that queries can be distributed between multiple shards +- [Issue #164](https://github.com/influxdb/influxdb/pull/269),[Issue #103](https://github.com/influxdb/influxdb/pull/269),[Issue #166](https://github.com/influxdb/influxdb/pull/269),[Issue #165](https://github.com/influxdb/influxdb/pull/269),[Issue #132](https://github.com/influxdb/influxdb/pull/269) Make request log a log file instead of leveldb with recovery on startup + +### Deprecated + +- [Issue #189](https://github.com/influxdb/influxdb/issues/189). `/cluster_admins` and `/db/:db/users` return usernames in a `name` key instead of `username` key. +- [Issue #216](https://github.com/influxdb/influxdb/pull/216). Results with no points should exclude columns and points + +## v0.4.4 [2014-02-05] + +### Features + +- Make the leveldb max open files configurable in the toml file + +## v0.4.3 [2014-01-31] + +### Bugfixes + +- [Issue #225](https://github.com/influxdb/influxdb/issues/225). Remove a hard limit on the points returned by the datastore +- [Issue #223](https://github.com/influxdb/influxdb/issues/223). Null values caused count(distinct()) to panic +- [Issue #224](https://github.com/influxdb/influxdb/issues/224). Null values broke replication due to protobuf limitation + +## v0.4.1 [2014-01-30] + +### Features + +- [Issue #193](https://github.com/influxdb/influxdb/issues/193). Allow logging to stdout. Thanks @schmurfy +- [Issue #190](https://github.com/influxdb/influxdb/pull/190). Add support for SSL. +- [Issue #194](https://github.com/influxdb/influxdb/pull/194). Should be able to disable Admin interface. + +### Bugfixes + +- [Issue #33](https://github.com/influxdb/influxdb/issues/33). Don't call WriteHeader more than once per request +- [Issue #195](https://github.com/influxdb/influxdb/issues/195). Allow the bind address to be configurable, Thanks @schmurfy. +- [Issue #199](https://github.com/influxdb/influxdb/issues/199). Make the test timeout configurable +- [Issue #200](https://github.com/influxdb/influxdb/issues/200). Selecting `time` or `sequence_number` silently fail +- [Issue #215](https://github.com/influxdb/influxdb/pull/215). Server fails to start up after Raft log compaction and restart. + +## v0.4.0 [2014-01-17] + +## Features + +- [Issue #86](https://github.com/influxdb/influxdb/issues/86). Support arithmetic expressions in select clause +- [Issue #92](https://github.com/influxdb/influxdb/issues/92). Change '==' to '=' and '!=' to '<>' +- [Issue #88](https://github.com/influxdb/influxdb/issues/88). Support datetime strings +- [Issue #64](https://github.com/influxdb/influxdb/issues/64). Shard writes and queries across cluster with replay for briefly downed nodes (< 24 hrs) +- [Issue #78](https://github.com/influxdb/influxdb/issues/78). Sequence numbers persist across restarts so they're not reused +- [Issue #102](https://github.com/influxdb/influxdb/issues/102). Support expressions in where condition +- [Issue #101](https://github.com/influxdb/influxdb/issues/101). Support expressions in aggregates +- [Issue #62](https://github.com/influxdb/influxdb/issues/62). Support updating and deleting column values +- [Issue #96](https://github.com/influxdb/influxdb/issues/96). Replicate deletes in a cluster +- [Issue #94](https://github.com/influxdb/influxdb/issues/94). delete queries +- [Issue #116](https://github.com/influxdb/influxdb/issues/116). Use proper logging +- [Issue #40](https://github.com/influxdb/influxdb/issues/40). Use TOML instead of JSON in the config file +- [Issue #99](https://github.com/influxdb/influxdb/issues/99). Support list series in the query language +- [Issue #149](https://github.com/influxdb/influxdb/issues/149). Cluster admins should be able to perform reads and writes. +- [Issue #108](https://github.com/influxdb/influxdb/issues/108). Querying one point using `time =` +- [Issue #114](https://github.com/influxdb/influxdb/issues/114). Servers should periodically check that they're consistent. +- [Issue #93](https://github.com/influxdb/influxdb/issues/93). Should be able to drop a time series +- [Issue #177](https://github.com/influxdb/influxdb/issues/177). Support drop series in the query language. +- [Issue #184](https://github.com/influxdb/influxdb/issues/184). Implement Raft log compaction. +- [Issue #153](https://github.com/influxdb/influxdb/issues/153). Implement continuous queries + +### Bugfixes + +- [Issue #90](https://github.com/influxdb/influxdb/issues/90). Group by multiple columns panic +- [Issue #89](https://github.com/influxdb/influxdb/issues/89). 'Group by' combined with 'where' not working +- [Issue #106](https://github.com/influxdb/influxdb/issues/106). Don't panic if we only see one point and can't calculate derivative +- [Issue #105](https://github.com/influxdb/influxdb/issues/105). Panic when using a where clause that reference columns with null values +- [Issue #61](https://github.com/influxdb/influxdb/issues/61). Remove default limits from queries +- [Issue #118](https://github.com/influxdb/influxdb/issues/118). Make column names starting with '_' legal +- [Issue #121](https://github.com/influxdb/influxdb/issues/121). Don't fall back to the cluster admin auth if the db user auth fails +- [Issue #127](https://github.com/influxdb/influxdb/issues/127). Return error on delete queries with where condition that don't have time +- [Issue #117](https://github.com/influxdb/influxdb/issues/117). Fill empty groups with default values +- [Issue #150](https://github.com/influxdb/influxdb/pull/150). Fix parser for when multiple divisions look like a regex. +- [Issue #158](https://github.com/influxdb/influxdb/issues/158). Logged deletes should be stored with the time range if missing. +- [Issue #136](https://github.com/influxdb/influxdb/issues/136). Make sure writes are replicated in order to avoid triggering replays +- [Issue #145](https://github.com/influxdb/influxdb/issues/145). Server fails to join cluster if all starting at same time. +- [Issue #176](https://github.com/influxdb/influxdb/issues/176). Drop database should take effect on all nodes +- [Issue #180](https://github.com/influxdb/influxdb/issues/180). Column names not returned when running multi-node cluster and writing more than one point. +- [Issue #182](https://github.com/influxdb/influxdb/issues/182). Queries with invalid limit clause crash the server + +### Deprecated + +- deprecate '==' and '!=' in favor of '=' and '<>', respectively +- deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint +- deprecate `username` field for a more consistent `name` field in `/db/:db/users` and `/cluster_admins` +- deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should + be used to update user flags, password, etc. +- Querying for column names that don't exist no longer throws an error. + +## v0.3.2 + +## Features + +- [Issue #82](https://github.com/influxdb/influxdb/issues/82). Add endpoint for listing available admin interfaces. +- [Issue #80](https://github.com/influxdb/influxdb/issues/80). Support durations when specifying start and end time +- [Issue #81](https://github.com/influxdb/influxdb/issues/81). Add support for IN + +## Bugfixes + +- [Issue #75](https://github.com/influxdb/influxdb/issues/75). Don't allow time series names that start with underscore +- [Issue #85](https://github.com/influxdb/influxdb/issues/85). Non-existing columns exist after they have been queried before + +## v0.3.0 + +## Features + +- [Issue #51](https://github.com/influxdb/influxdb/issues/51). Implement first and last aggregates +- [Issue #35](https://github.com/influxdb/influxdb/issues/35). Support table aliases in Join Queries +- [Issue #71](https://github.com/influxdb/influxdb/issues/71). Add WillReturnSingleSeries to the Query +- [Issue #61](https://github.com/influxdb/influxdb/issues/61). Limit should default to 10k +- [Issue #59](https://github.com/influxdb/influxdb/issues/59). Add histogram aggregate function + +## Bugfixes + +- Fix join and merges when the query is a descending order query +- [Issue #57](https://github.com/influxdb/influxdb/issues/57). Don't panic when type of time != float +- [Issue #63](https://github.com/influxdb/influxdb/issues/63). Aggregate queries should not have a sequence_number column + +## v0.2.0 + +### Features + +- [Issue #37](https://github.com/influxdb/influxdb/issues/37). Support the negation of the regex matcher !~ +- [Issue #47](https://github.com/influxdb/influxdb/issues/47). Spill out query and database detail at the time of bug report + +### Bugfixes + +- [Issue #36](https://github.com/influxdb/influxdb/issues/36). The regex operator should be =~ not ~= +- [Issue #39](https://github.com/influxdb/influxdb/issues/39). Return proper content types from the http api +- [Issue #42](https://github.com/influxdb/influxdb/issues/42). Make the api consistent with the docs +- [Issue #41](https://github.com/influxdb/influxdb/issues/41). Table/Points not deleted when database is dropped +- [Issue #45](https://github.com/influxdb/influxdb/issues/45). Aggregation shouldn't mess up the order of the points +- [Issue #44](https://github.com/influxdb/influxdb/issues/44). Fix crashes on RHEL 5.9 +- [Issue #34](https://github.com/influxdb/influxdb/issues/34). Ascending order always return null for columns that have a null value +- [Issue #55](https://github.com/influxdb/influxdb/issues/55). Limit should limit the points that match the Where clause +- [Issue #53](https://github.com/influxdb/influxdb/issues/53). Writing null values via HTTP API fails + +### Deprecated + +- Preparing to deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint +- Preparing to deprecate `username` field for a more consistent `name` field in the `/db/:db/users` +- Preparing to deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should + be used to update user flags, password, etc. + +## v0.1.0 + +### Features + +- [Issue #29](https://github.com/influxdb/influxdb/issues/29). Semicolon is now optional in queries +- [Issue #31](https://github.com/influxdb/influxdb/issues/31). Support Basic Auth as well as query params for authentication. + +### Bugfixes + +- Don't allow creating users with empty username +- [Issue #22](https://github.com/influxdb/influxdb/issues/22). Don't set goroot if it was set +- [Issue #25](https://github.com/influxdb/influxdb/issues/25). Fix queries that use the median aggregator +- [Issue #26](https://github.com/influxdb/influxdb/issues/26). Default log and db directories should be in /opt/influxdb/shared/data +- [Issue #27](https://github.com/influxdb/influxdb/issues/27). Group by should not blow up if the one of the columns in group by has null values +- [Issue #30](https://github.com/influxdb/influxdb/issues/30). Column indexes/names getting off somehow +- [Issue #32](https://github.com/influxdb/influxdb/issues/32). Fix many typos in the codebase. Thanks @pborreli + +## v0.0.9 + +#### Features + +- Add stddev(...) support +- Better docs, thanks @auxesis and @d-snp. + +#### Bugfixes + +- Set PYTHONPATH and CC appropriately on mac os x. +- [Issue #18](https://github.com/influxdb/influxdb/issues/18). Fix 386 debian and redhat packages +- [Issue #23](https://github.com/influxdb/influxdb/issues/23). Fix the init scripts on redhat + +## v0.0.8 + +#### Features + +- Add a way to reset the root password from the command line. +- Add distinct(..) and derivative(...) support +- Print test coverage if running go1.2 + +#### Bugfixes + +- Fix the default admin site path in the .deb and .rpm packages. +- Fix the configuration filename in the .tar.gz package. + +## v0.0.7 + +#### Features + +- include the admin site in the repo to make it easier for newcomers. + +## v0.0.6 + +#### Features + +- Add count(distinct(..)) support + +#### Bugfixes + +- Reuse levigo read/write options. + +## v0.0.5 + +#### Features + +- Cache passwords in memory to speed up password verification +- Add MERGE and INNER JOIN support + +#### Bugfixes + +- All columns should be returned if `select *` was used +- Read/Write benchmarks + +## v0.0.2 + +#### Features + +- Add an admin UI +- Deb and RPM packages + +#### Bugfixes + +- Fix some nil pointer dereferences +- Cleanup the aggregators implementation + +## v0.0.1 [2013-10-22] + + * Initial Release diff --git a/_third_party/github.com/influxdb/influxdb/CONTRIBUTING.md b/_third_party/github.com/influxdb/influxdb/CONTRIBUTING.md new file mode 100644 index 0000000000..83c59e8e85 --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/CONTRIBUTING.md @@ -0,0 +1,247 @@ +Contributing to InfluxDB +======================== + +Bug reports +--------------- +Before you file an issue, please search existing issues in case it has already been filed, or perhaps even fixed. If you file an issue, please include the following. +* Full details of your operating system (or distribution) e.g. 64-bit Ubuntu 14.04. +* The version of InfluxDB you are running +* Whether you installed it using a pre-built package, or built it from source. +* A small test case, if applicable, that demonstrates the issues. + +Remember the golden rule of bug reports: **The easier you make it for us to reproduce the problem, the faster it will get fixed.** +If you have never written a bug report before, or if you want to brush up on your bug reporting skills, we recommend reading [Simon Tatham's essay "How to Report Bugs Effectively."](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html) + +Test cases should be in the form of `curl` commands. For example: +``` +# create database +curl -G http://localhost:8086/query --data-urlencode "q=CREATE DATABASE mydb" + +# create retention policy +curl -G http://localhost:8086/query --data-urlencode "q=CREATE RETENTION POLICY myrp ON mydb DURATION 365d REPLICATION 1 DEFAULT" + +# write data +curl -X POST http://localhost:8086/write --data-urlencode "db=mydb" --data-binary "cpu,region=useast,host=server_1,service=redis value=61" + +# Delete a Measurement +curl -G http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode 'q=DROP MEASUREMENT cpu' + +# Query the Measurement +# Bug: expected it to return no data, but data comes back. +curl -G http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode 'q=SELECT * from cpu' +``` +**If you don't include a clear test case like this, your issue may not be investigated, and may even be closed**. If writing the data is too difficult, please zip up your data directory and include a link to it in your bug report. + +Please note that issues are *not the place to file general questions* such as "how do I use collectd with InfluxDB?" Questions of this nature should be sent to the [Google Group](https://groups.google.com/forum/#!forum/influxdb), not filed as issues. Issues like this will be closed. + +Feature requests +--------------- +We really like to receive feature requests, as it helps us prioritize our work. Please be clear about your requirements, as incomplete feature requests may simply be closed if we don't understand what you would like to see added to InfluxDB. + +Contributing to the source code +--------------- + +InfluxDB follows standard Go project structure. This means that all +your go development are done in `$GOPATH/src`. GOPATH can be any +directory under which InfluxDB and all its dependencies will be +cloned. For more details on recommended go project's structure, see +[How to Write Go Code](http://golang.org/doc/code.html) and +[Go: Best Practices for Production Environments](http://peter.bourgon.org/go-in-production/), or you can just follow +the steps below. + +Submitting a pull request +------------ +To submit a pull request you should fork the InfluxDB repository, and make your change on a feature branch of your fork. Then generate a pull request from your branch against *master* of the InfluxDB repository. Include in your pull request details of your change -- the why *and* the how -- as well as the testing your performed. Also, be sure to run the test suite with your change in place. Changes that cause tests to fail cannot be merged. + +There will usually be some back and forth as we finalize the change, but once that completes it may be merged. + +To assist in review for the PR, please add the following to your pull request comment: + +```md +- [ ] CHANGELOG.md updated +- [ ] Rebased/mergable +- [ ] Tests pass +- [ ] Sign [CLA](http://influxdb.com/community/cla.html) (if not already signed) +``` + +Use of third-party packages +------------ +A third-party package is defined as one that is not part of the standard Go distribution. Generally speaking we prefer to minimize our use of third-party packages, and avoid them unless absolutely necessarly. We'll often write a little bit of code rather than pull in a third-party package. Of course, we do use some third-party packages -- most importantly we use [BoltDB](https://github.com/boltdb/bolt) as the storage engine. So to maximise the chance your change will be accepted by us, use only the standard libaries, or the third-party packages we have decided to use. + +For rationale, check out the post [The Case Against Third Party Libraries](http://blog.gopheracademy.com/advent-2014/case-against-3pl/). + +Signing the CLA +--------------- + +If you are going to be contributing back to InfluxDB please take a +second to sign our CLA, which can be found +[on our website](http://influxdb.com/community/cla.html). + +Installing Go +------------- +InfluxDB requires Go 1.5 or greater. + +At InfluxDB we find gvm, a Go version manager, useful for installing Go. For instructions +on how to install it see [the gvm page on github](https://github.com/moovweb/gvm). + +After installing gvm you can install and set the default go version by +running the following: + + gvm install go1.5 + gvm use go1.5 --default + +Revision Control Systems +------------- +Go has the ability to import remote packages via revision control systems with the `go get` command. To ensure that you can retrieve any remote package, be sure to install the following rcs software to your system. +Currently the project only depends on `git` and `mercurial`. + +* [Install Git](http://git-scm.com/book/en/Getting-Started-Installing-Git) +* [Install Mercurial](http://mercurial.selenic.com/wiki/Download) + +Getting the source +------ +Setup the project structure and fetch the repo like so: + + mkdir $HOME/gocodez + export GOPATH=$HOME/gocodez + go get github.com/influxdb/influxdb + +You can add the line `export GOPATH=$HOME/gocodez` to your bash/zsh file to be set for every shell instead of having to manually run it everytime. + +Cloning a fork +------------- +If you wish to work with fork of InfluxDB, your own fork for example, you must still follow the directory structure above. But instead of cloning the main repo, instead clone your fork. Follow the steps below to work with a fork: + + export GOPATH=$HOME/gocodez + mkdir -p $GOPATH/src/github.com/influxdb + cd $GOPATH/src/github.com/influxdb + git clone git@github.com:/influxdb + +Retaining the directory structure `$GOPATH/src/github.com/influxdb` is necessary so that Go imports work correctly. + +Pre-commit checks +------------- + +We have a pre commit hook to make sure code is formatted properly +and vetted before you commit any changes. We strongly recommend using the pre +commit hook to guard against accidentally committing unformatted +code. To use the pre-commit hook, run the following: + + cd $GOPATH/src/github.com/influxdb/influxdb + cp .hooks/pre-commit .git/hooks/ + +In case the commit is rejected because it's not formatted you can run +the following to format the code: + +``` +go fmt ./... +go vet ./... +``` + +To install go vet, run the following command: +``` +go get golang.org/x/tools/cmd/vet +``` + +NOTE: If you have not installed mercurial, the above command will fail. See [Revision Control Systems](#revision-control-systems) above. + +For more information on `go vet`, [read the GoDoc](https://godoc.org/golang.org/x/tools/cmd/vet). + +Build and Test +----- + +Make sure you have Go installed and the project structure as shown above. To then build the project, execute the following commands: + +```bash +cd $GOPATH/src/github.com/influxdb +go get -u -f -t ./... +go build ./... +``` + +To then install the binaries, run the following command. They can be found in `$GOPATH/bin`. Please note that the InfluxDB binary is named `influxd`, not `influxdb`. + +```bash +go install ./... +``` + +To set the version and commit flags during the build pass the following to the build command: + +```bash +-ldflags="-X main.version=$VERSION -X main.branch=$BRANCH -X main.commit=$COMMIT -X main.buildTime=$TIME" +``` + +where `$VERSION` is the version, `$BRANCH` is the branch, `$COMMIT` is the git commit hash, and `$TIME` is the build timestamp. + +If you want to build packages, see `package.sh` help: +```bash +package.sh -h +``` + +To run the tests, execute the following command: + +```bash +cd $GOPATH/src/github.com/influxdb/influxdb +go test -v ./... + +# run tests that match some pattern +go test -run=TestDatabase . -v + +# run tests and show coverage +go test -coverprofile /tmp/cover . && go tool cover -html /tmp/cover +``` + +To install go cover, run the following command: +``` +go get golang.org/x/tools/cmd/cover +``` + +Generated Google Protobuf code +----------------- +Most changes to the source do not require that the generated protocol buffer code be changed. But if you need to modify the protocol buffer code, you'll first need to install the protocol buffers toolchain. + +First install the [protocol buffer compiler](https://developers.google.com/protocol-buffers/ +) 2.6.1 or later for your OS: + +Then install the go plugins: + +```bash +go get github.com/gogo/protobuf/proto +go get github.com/gogo/protobuf/protoc-gen-gogo +go get github.com/gogo/protobuf/gogoproto +``` + +Finally run, `go generate` after updating any `*.proto` file: + +```bash +go generate ./... +``` +**Trouleshooting** + +If generating the protobuf code is failing for you, check each of the following: + * Ensure the protobuf library can be found. Make sure that `LD_LIBRRARY_PATH` includes the directory in which the library `libprotoc.so` has been installed. + * Ensure the command `protoc-gen-gogo`, found in `GOPATH/bin`, is on your path. This can be done by adding `GOPATH/bin` to `PATH`. + +Profiling +----- +When troubleshooting problems with CPU or memory the Go toolchain can be helpful. You can start InfluxDB with CPU or memory profiling turned on. For example: + +```sh +# start influx with profiling +./influxd -cpuprofile influxd.prof +# run queries, writes, whatever you're testing +# open up pprof +go tool pprof influxd influxd.prof +# once inside run "web", opens up browser with the CPU graph +# can also run "web " to zoom in. Or "list " to see specific lines +``` + +Continuous Integration testing +----- +InfluxDB uses CirceCI for continuous integration testing. To see how the code is built and tested, check out [this file](https://github.com/influxdb/influxdb/blob/master/circle-test.sh). It closely follows the build and test process outlined above. You can see the exact version of Go InfluxDB uses for testing by consulting that file. + +Useful links +------------ +- [Useful techniques in Go](http://arslan.io/ten-useful-techniques-in-go) +- [Go in production](http://peter.bourgon.org/go-in-production/) +- [Principles of designing Go APIs with channels](https://inconshreveable.com/07-08-2014/principles-of-designing-go-apis-with-channels/) +- [Common mistakes in Golang](http://soryy.com/blog/2014/common-mistakes-with-go-lang/). Especially this section `Loops, Closures, and Local Variables` diff --git a/_third_party/github.com/influxdb/influxdb/DOCKER.md b/_third_party/github.com/influxdb/influxdb/DOCKER.md new file mode 100644 index 0000000000..e78187d9b7 --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/DOCKER.md @@ -0,0 +1,44 @@ +# Docker Setup +======================== + +This document describes how to build and run a minimal InfluxDB container under Docker. Currently, it has only been tested for local development and assumes that you have a working docker environment. + +## Building Image + +To build a docker image for InfluxDB from your current checkout, run the following: + +``` +$ ./build-docker.sh +``` + +This script uses the `golang:1.5` image to build a fully static binary of `influxd` and then adds it to a minimal `scratch` image. + +To build the image using a different version of go: + +``` +$ GO_VER=1.4.2 ./build-docker.sh +``` + +Available version can be found [here](https://hub.docker.com/_/golang/). + +## Single Node Container + +This will start an interactive, single-node, that publishes the containers port `8086` and `8088` to the hosts ports `8086` and `8088` respectively. This is identical to starting `influxd` manually. + +``` +$ docker run -it -p 8086:8086 -p 8088:8088 influxdb +``` + +## Multi-Node Cluster + +This will create a simple 3-node cluster. The data is stored within the container and will be lost when the container is removed. This is only useful for test clusters. + +The `HOST_IP` env variable should be your host IP if running under linux or the virtualbox VM IP if running under OSX. On OSX, this would be something like: `$(docker-machine ip dev)` or `$(boot2docker ip)` depending on which docker tool you are using. + +``` +$ export HOST_IP= +$ docker run -it -p 8086:8086 -p 8088:8088 influxdb -hostname $HOST_IP:8088 +$ docker run -it -p 8186:8086 -p 8188:8088 influxdb -hostname $HOST_IP:8188 -join $HOST_IP:8088 +$ docker run -it -p 8286:8086 -p 8288:8088 influxdb -hostname $HOST_IP:8288 -join $HOST_IP:8088 +``` + diff --git a/_third_party/github.com/influxdb/influxdb/Dockerfile b/_third_party/github.com/influxdb/influxdb/Dockerfile new file mode 100644 index 0000000000..d30cd300db --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/Dockerfile @@ -0,0 +1,24 @@ +FROM busybox:ubuntu-14.04 + +MAINTAINER Jason Wilder "" + +# admin, http, udp, cluster, graphite, opentsdb, collectd +EXPOSE 8083 8086 8086/udp 8088 2003 4242 25826 + +WORKDIR /app + +# copy binary into image +COPY influxd /app/ + +# Add influxd to the PATH +ENV PATH=/app:$PATH + +# Generate a default config +RUN influxd config > /etc/influxdb.toml + +# Use /data for all disk storage +RUN sed -i 's/dir = "\/.*influxdb/dir = "\/data/' /etc/influxdb.toml + +VOLUME ["/data"] + +ENTRYPOINT ["influxd", "--config", "/etc/influxdb.toml"] diff --git a/_third_party/github.com/influxdb/influxdb/Dockerfile_test_ubuntu32 b/_third_party/github.com/influxdb/influxdb/Dockerfile_test_ubuntu32 new file mode 100644 index 0000000000..caaf81dc9c --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/Dockerfile_test_ubuntu32 @@ -0,0 +1,12 @@ +FROM 32bit/ubuntu:14.04 + +RUN apt-get update && apt-get install -y python-software-properties software-properties-common git +RUN add-apt-repository ppa:evarlast/golang1.5 +RUN apt-get update && apt-get install -y -o Dpkg::Options::="--force-overwrite" golang-go + +ENV GOPATH=/root/go +RUN mkdir -p /root/go/src/github.com/influxdb/influxdb +RUN mkdir -p /tmp/artifacts + +VOLUME /root/go/src/github.com/influxdb/influxdb +VOLUME /tmp/artifacts diff --git a/_third_party/github.com/influxdb/influxdb/LICENSE b/_third_party/github.com/influxdb/influxdb/LICENSE new file mode 100644 index 0000000000..d50222706c --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013-2015 Errplane Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/_third_party/github.com/influxdb/influxdb/LICENSE_OF_DEPENDENCIES.md b/_third_party/github.com/influxdb/influxdb/LICENSE_OF_DEPENDENCIES.md new file mode 100644 index 0000000000..abba2b2414 --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/LICENSE_OF_DEPENDENCIES.md @@ -0,0 +1,19 @@ +# List +- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE) +- gopkg.in/fatih/pool.v2 [MIT LICENSE](https://github.com/fatih/pool/blob/v2.0.0/LICENSE) +- github.com/BurntSushi/toml [WTFPL LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING) +- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING) +- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE) +- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE) +- github.com/rakyll/statik/fs [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE) +- github.com/kimor79/gollectd [BSD LICENSE](https://github.com/kimor79/gollectd/blob/master/LICENSE) +- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license) +- react 0.13.3 [BSD LICENSE](https://github.com/facebook/react/blob/master/LICENSE) +- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE) +- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt) +- glyphicons [LICENSE](http://glyphicons.com/license/) +- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE) +- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE) +- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE) +- golang.org/x/crypto/bcrypt [BSD LICENSE](https://go.googlesource.com/crypto/+/master/LICENSE) + diff --git a/_third_party/github.com/influxdb/influxdb/QUERIES.md b/_third_party/github.com/influxdb/influxdb/QUERIES.md new file mode 100644 index 0000000000..8491aa7ec2 --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/QUERIES.md @@ -0,0 +1,180 @@ +The top level name is called a measurement. These names can contain any characters. Then there are field names, field values, tag keys and tag values, which can also contain any characters. However, if the measurement, field, or tag contains any character other than [A-Z,a-z,0-9,_], or if it starts with a digit, it must be double-quoted. Therefore anywhere a measurement name, field name, field value, tag name, or tag value appears it should be wrapped in double quotes. + +# Databases & retention policies + +```sql +-- create a database +CREATE DATABASE + +-- create a retention policy +CREATE RETENTION POLICY ON DURATION REPLICATION [DEFAULT] + +-- alter retention policy +ALTER RETENTION POLICY ON (DURATION | REPLICATION | DEFAULT)+ + +-- drop a database +DROP DATABASE + +-- drop a retention policy +DROP RETENTION POLICY ON +``` +where `` is either `INF` for infinite retention, or an integer followed by the desired unit of time: u,ms,s,m,h,d,w for microseconds, milliseconds, seconds, minutes, hours, days, or weeks, respectively. `` must be an integer. + +If present, `DEFAULT` sets the retention policy as the default retention policy for writes and reads. + +# Users and permissions + +```sql +-- create user +CREATE USER WITH PASSWORD '' + +-- grant privilege on a database +GRANT ON TO + +-- grant cluster admin privileges +GRANT ALL [PRIVILEGES] TO + +-- revoke privilege +REVOKE ON FROM + +-- revoke all privileges for a DB +REVOKE ALL [PRIVILEGES] ON FROM + +-- revoke all privileges including cluster admin +REVOKE ALL [PRIVILEGES] FROM + +-- combine db creation with privilege assignment (user must already exist) +CREATE DATABASE GRANT TO +CREATE DATABASE REVOKE FROM + +-- delete a user +DROP USER + + +``` +where ` := READ | WRITE | All `. + +Authentication must be enabled in the influxdb.conf file for user permissions to be in effect. + +By default, newly created users have no privileges to any databases. + +Cluster administration privileges automatically grant full read and write permissions to all databases, regardless of subsequent database-specific privilege revocation statements. + +# Select + +```sql +SELECT mean(value) from cpu WHERE host = 'serverA' AND time > now() - 4h GROUP BY time(5m) + +SELECT mean(value) from cpu WHERE time > now() - 4h GROUP BY time(5m), region +``` + +## Group By + +# Delete + +# Series + +## Destroy + +```sql +DROP MEASUREMENT +DROP MEASUREMENT cpu WHERE region = 'uswest' +``` + +## Show + +Show series queries are for pulling out individual series from measurement names and tag data. They're useful for discovery. + +```sql +-- show all databases +SHOW DATABASES + +-- show measurement names +SHOW MEASUREMENTS +SHOW MEASUREMENTS LIMIT 15 +SHOW MEASUREMENTS LIMIT 10 OFFSET 40 +SHOW MEASUREMENTS WHERE service = 'redis' +-- LIMIT and OFFSET can be applied to any of the SHOW type queries + +-- show all series across all measurements/tagsets +SHOW SERIES + +-- get a show of all series for any measurements where tag key region = tak value 'uswest' +SHOW SERIES WHERE region = 'uswest' + +SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10 + +-- returns the 100 - 109 rows in the result. In the case of SHOW SERIES, which returns +-- series split into measurements. Each series counts as a row. So you could see only a +-- single measurement returned, but 10 series within it. +SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10 OFFSET 100 + +-- show all retention policies on a database +SHOW RETENTION POLICIES ON mydb + +-- get a show of all tag keys across all measurements +SHOW TAG KEYS + +-- show all the tag keys for a given measurement +SHOW TAG KEYS FROM cpu +SHOW TAG KEYS FROM temperature, wind_speed + +-- show all the tag values. note that a single WHERE TAG KEY = '...' clause is required +SHOW TAG VALUES WITH TAG KEY = 'region' +SHOW TAG VALUES FROM cpu WHERE region = 'uswest' WITH TAG KEY = 'host' + +-- and you can do stuff against fields +SHOW FIELD KEYS FROM cpu + +-- but you can't do this +SHOW FIELD VALUES +-- we don't index field values, so this query should be invalid. + +-- show all users +SHOW USERS +``` + +Note that `FROM` and `WHERE` are optional clauses in most of the show series queries. + +And the show series output looks like this: + +```json +[ + { + "name": "cpu", + "columns": ["id", "region", "host"], + "values": [ + 1, "uswest", "servera", + 2, "uswest", "serverb" + ] + }, + { + "name": "reponse_time", + "columns": ["id", "application", "host"], + "values": [ + 3, "myRailsApp", "servera" + ] + } +] +``` + +# Continuous Queries + +Continous queries are going to be inspired by MySQL `TRIGGER` syntax: + +http://dev.mysql.com/doc/refman/5.0/en/trigger-syntax.html + +Instead of having automatically-assigned ids, named continuous queries allows for some level of duplication prevention, +particularly in the case where creation is scripted. + +## Create + + CREATE CONTINUOUS QUERY AS SELECT ... FROM ... + +## Destroy + + DROP CONTINUOUS QUERY + +## List + + SHOW CONTINUOUS QUERIES diff --git a/_third_party/github.com/influxdb/influxdb/README.md b/_third_party/github.com/influxdb/influxdb/README.md new file mode 100644 index 0000000000..94b8547abf --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/README.md @@ -0,0 +1,72 @@ +# InfluxDB [![Circle CI](https://circleci.com/gh/influxdb/influxdb/tree/master.svg?style=svg)](https://circleci.com/gh/influxdb/influxdb/tree/master) + +## An Open-Source, Distributed, Time Series Database + +> InfluxDB v0.9.0 is now out. Going forward, the 0.9.x series of releases will not make breaking API changes or breaking changes to the underlying data storage. However, 0.9.0 clustering should be considered an alpha release. + +InfluxDB is an open source **distributed time series database** with +**no external dependencies**. It's useful for recording metrics, +events, and performing analytics. + +## Features + +* Built-in [HTTP API](http://influxdb.com/docs/v0.9/concepts/reading_and_writing_data.html) so you don't have to write any server side code to get up and running. +* Data can be tagged, allowing very flexible querying. +* SQL-like query language. +* Clustering is supported out of the box, so that you can scale horizontally to handle your data. +* Simple to install and manage, and fast to get data in and out. +* It aims to answer queries in real-time. That means every data point is + indexed as it comes in and is immediately available in queries that + should return in < 100ms. + +## Getting Started +*The following directions apply only to the 0.9.0 release or building from the source on master.* + +### Building + +You don't need to build the project to use it - you can use any of our +[pre-built packages](http://influxdb.com/download/index.html) to install InfluxDB. That's +the recommended way to get it running. However, if you want to contribute to the core of InfluxDB, you'll need to build. +For those adventurous enough, you can +[follow along on our docs](http://github.com/influxdb/influxdb/blob/master/CONTRIBUTING.md). + +### Starting InfluxDB +* `service influxdb start` if you have installed InfluxDB using an official Debian or RPM package. +* `systemctl start influxdb` if you have installed InfluxDB using an official Debian or RPM package, and are running a distro with `systemd`. For example, Ubuntu 15 or later. +* `$GOPATH/bin/influxd` if you have built InfluxDB from source. + +### Creating your first database + +``` +curl -G 'http://localhost:8086/query' --data-urlencode "q=CREATE DATABASE mydb" +``` + +### Insert some data +``` +curl -XPOST 'http://localhost:8086/write?db=mydb' \ +-d 'cpu,host=server01,region=uswest load=42 1434055562000000000' + +curl -XPOST 'http://localhost:8086/write?db=mydb' \ +-d 'cpu,host=server02,region=uswest load=78 1434055562000000000' + +curl -XPOST 'http://localhost:8086/write?db=mydb' \ +-d 'cpu,host=server03,region=useast load=15.4 1434055562000000000' +``` + +### Query for the data +```JSON +curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=mydb" \ +--data-urlencode "q=SELECT * FROM cpu WHERE host='server01' AND time < now() - 1d" +``` + +### Analyze the data +```JSON +curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=mydb" \ +--data-urlencode "q=SELECT mean(load) FROM cpu WHERE region='uswest'" +``` + +## Helpful Links + +* Understand the [design goals and motivations of the project](http://influxdb.com/docs/v0.9/introduction/overview.html). +* Follow the [getting started guide](http://influxdb.com/docs/v0.9/introduction/getting_started.html) to find out how to install InfluxDB, start writing more data, and issue more queries - in just a few minutes. +* See the [HTTP API documentation to start writing a library for your favorite language](http://influxdb.com/docs/v0.9/concepts/reading_and_writing_data.html). diff --git a/_third_party/github.com/influxdb/influxdb/balancer.go b/_third_party/github.com/influxdb/influxdb/balancer.go new file mode 100644 index 0000000000..d4286edae4 --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/balancer.go @@ -0,0 +1,78 @@ +package influxdb + +import ( + "math/rand" + + "bosun.org/_third_party/github.com/influxdb/influxdb/meta" +) + +// Balancer represents a load-balancing algorithm for a set of nodes +type Balancer interface { + // Next returns the next Node according to the balancing method + // or nil if there are no nodes available + Next() *meta.NodeInfo +} + +type nodeBalancer struct { + nodes []meta.NodeInfo // data nodes to balance between + p int // current node index +} + +// NewNodeBalancer create a shuffled, round-robin balancer so that +// multiple instances will return nodes in randomized order and each +// each returned node will be repeated in a cycle +func NewNodeBalancer(nodes []meta.NodeInfo) Balancer { + // make a copy of the node slice so we can randomize it + // without affecting the original instance as well as ensure + // that each Balancer returns nodes in a different order + b := &nodeBalancer{} + + b.nodes = make([]meta.NodeInfo, len(nodes)) + copy(b.nodes, nodes) + + b.shuffle() + return b +} + +// shuffle randomizes the ordering the balancers available nodes +func (b *nodeBalancer) shuffle() { + for i := range b.nodes { + j := rand.Intn(i + 1) + b.nodes[i], b.nodes[j] = b.nodes[j], b.nodes[i] + } +} + +// online returns a slice of the nodes that are online +func (b *nodeBalancer) online() []meta.NodeInfo { + return b.nodes + // now := time.Now().UTC() + // up := []meta.NodeInfo{} + // for _, n := range b.nodes { + // if n.OfflineUntil.After(now) { + // continue + // } + // up = append(up, n) + // } + // return up +} + +// Next returns the next available nodes +func (b *nodeBalancer) Next() *meta.NodeInfo { + // only use online nodes + up := b.online() + + // no nodes online + if len(up) == 0 { + return nil + } + + // rollover back to the beginning + if b.p >= len(up) { + b.p = 0 + } + + d := &up[b.p] + b.p += 1 + + return d +} diff --git a/_third_party/github.com/influxdb/influxdb/balancer_test.go b/_third_party/github.com/influxdb/influxdb/balancer_test.go new file mode 100644 index 0000000000..fca170c41f --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/balancer_test.go @@ -0,0 +1,115 @@ +package influxdb_test + +import ( + "fmt" + "testing" + + "bosun.org/_third_party/github.com/influxdb/influxdb" + "bosun.org/_third_party/github.com/influxdb/influxdb/meta" +) + +func NewNodes() []meta.NodeInfo { + var nodes []meta.NodeInfo + for i := 1; i <= 2; i++ { + nodes = append(nodes, meta.NodeInfo{ + ID: uint64(i), + Host: fmt.Sprintf("localhost:999%d", i), + }) + } + return nodes +} + +func TestBalancerEmptyNodes(t *testing.T) { + b := influxdb.NewNodeBalancer([]meta.NodeInfo{}) + got := b.Next() + if got != nil { + t.Errorf("expected nil, got %v", got) + } +} + +func TestBalancerUp(t *testing.T) { + nodes := NewNodes() + b := influxdb.NewNodeBalancer(nodes) + + // First node in randomized round-robin order + first := b.Next() + if first == nil { + t.Errorf("expected datanode, got %v", first) + } + + // Second node in randomized round-robin order + second := b.Next() + if second == nil { + t.Errorf("expected datanode, got %v", second) + } + + // Should never get the same node in order twice + if first.ID == second.ID { + t.Errorf("expected first != second. got %v = %v", first.ID, second.ID) + } +} + +/* +func TestBalancerDown(t *testing.T) { + nodes := NewNodes() + b := influxdb.NewNodeBalancer(nodes) + + nodes[0].Down() + + // First node in randomized round-robin order + first := b.Next() + if first == nil { + t.Errorf("expected datanode, got %v", first) + } + + // Second node should rollover to the first up node + second := b.Next() + if second == nil { + t.Errorf("expected datanode, got %v", second) + } + + // Health node should be returned each time + if first.ID != 2 && first.ID != second.ID { + t.Errorf("expected first != second. got %v = %v", first.ID, second.ID) + } +} +*/ + +/* +func TestBalancerBackUp(t *testing.T) { + nodes := newDataNodes() + b := influxdb.NewNodeBalancer(nodes) + + nodes[0].Down() + + for i := 0; i < 3; i++ { + got := b.Next() + if got == nil { + t.Errorf("expected datanode, got %v", got) + } + + if exp := uint64(2); got.ID != exp { + t.Errorf("wrong node id: exp %v, got %v", exp, got.ID) + } + } + + nodes[0].Up() + + // First node in randomized round-robin order + first := b.Next() + if first == nil { + t.Errorf("expected datanode, got %v", first) + } + + // Second node should rollover to the first up node + second := b.Next() + if second == nil { + t.Errorf("expected datanode, got %v", second) + } + + // Should get both nodes returned + if first.ID == second.ID { + t.Errorf("expected first != second. got %v = %v", first.ID, second.ID) + } +} +*/ diff --git a/_third_party/github.com/influxdb/influxdb/build-docker.sh b/_third_party/github.com/influxdb/influxdb/build-docker.sh new file mode 100755 index 0000000000..0dea62d2a1 --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/build-docker.sh @@ -0,0 +1,9 @@ +#!/bin/sh + +set -e -x + +GO_VER=${GO_VER:-1.5} + +docker run -it -v "${GOPATH}":/gopath -v "$(pwd)":/app -e "GOPATH=/gopath" -w /app golang:$GO_VER sh -c 'CGO_ENABLED=0 go build -a --installsuffix cgo --ldflags="-s" -o influxd ./cmd/influxd' + +docker build -t influxdb . diff --git a/_third_party/github.com/influxdb/influxdb/circle-test.sh b/_third_party/github.com/influxdb/influxdb/circle-test.sh new file mode 100755 index 0000000000..092582c443 --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/circle-test.sh @@ -0,0 +1,95 @@ +#!/bin/bash +# +# This is the InfluxDB CircleCI test script. Using this script allows total control +# the environment in which the build and test is run, and matches the official +# build process for InfluxDB. + +BUILD_DIR=$HOME/influxdb-build +GO_VERSION=go1.5 +PARALLELISM="-parallel 256" +TIMEOUT="-timeout 480s" + +# Executes the given statement, and exits if the command returns a non-zero code. +function exit_if_fail { + command=$@ + echo "Executing '$command'" + $command + rc=$? + if [ $rc -ne 0 ]; then + echo "'$command' returned $rc." + exit $rc + fi +} + +# Check that go fmt has been run. +function check_go_fmt { + fmtcount=`git ls-files | grep '.go$' | xargs gofmt -l 2>&1 | wc -l` + if [ $fmtcount -gt 0 ]; then + echo "run 'go fmt ./...' to format your source code." + exit 1 + fi +} + +# Check that go vet passes. +function check_go_vet { + # Due to the way composites work, vet will fail for some of our tests so we ignore it + vetcount=`go tool vet --composites=false ./ 2>&1 | wc -l` + if [ $vetcount -gt 0 ]; then + echo "run 'go tool vet --composites=false ./' to see the errors it flags and correct your source code." + exit 1 + fi +} + +source $HOME/.gvm/scripts/gvm +exit_if_fail gvm use $GO_VERSION + +# Set up the build directory, and then GOPATH. +exit_if_fail mkdir $BUILD_DIR +export GOPATH=$BUILD_DIR +exit_if_fail mkdir -p $GOPATH/src/github.com/influxdb + +# Dump some test config to the log. +echo "Test configuration" +echo "========================================" +echo "\$HOME: $HOME" +echo "\$GOPATH: $GOPATH" +echo "\$CIRCLE_BRANCH: $CIRCLE_BRANCH" + +# Move the checked-out source to a better location. +exit_if_fail mv $HOME/influxdb $GOPATH/src/github.com/influxdb +exit_if_fail cd $GOPATH/src/github.com/influxdb/influxdb +exit_if_fail git branch --set-upstream-to=origin/$CIRCLE_BRANCH $CIRCLE_BRANCH + +# Install the code. +exit_if_fail cd $GOPATH/src/github.com/influxdb/influxdb +exit_if_fail go get -t -d -v ./... +exit_if_fail git checkout $CIRCLE_BRANCH # 'go get' switches to master. Who knew? Switch back. +check_go_fmt +check_go_vet +exit_if_fail go build -v ./... + +# Run the tests. +case $CIRCLE_NODE_INDEX in + 0) + go test $PARALLELISM $TIMEOUT -v ./... 2>&1 | tee $CIRCLE_ARTIFACTS/test_logs.txt + rc=${PIPESTATUS[0]} + ;; + 1) + # 32bit tests. + if [[ -e ~/docker/image.tar ]]; then docker load -i ~/docker/image.tar; fi + docker build -f Dockerfile_test_ubuntu32 -t ubuntu-32-influxdb-test . + mkdir -p ~/docker; docker save ubuntu-32-influxdb-test > ~/docker/image.tar + exit_if_fail docker build -f Dockerfile_test_ubuntu32 -t ubuntu-32-influxdb-test . + docker run -v $(pwd):/root/go/src/github.com/influxdb/influxdb -e "CI=${CI}" \ + -v ${CIRCLE_ARTIFACTS}:/tmp/artifacts \ + -t ubuntu-32-influxdb-test bash \ + -c "cd /root/go/src/github.com/influxdb/influxdb && go get -t -d -v ./... && go build -v ./... && go test ${PARALLELISM} ${TIMEOUT} -v ./... 2>&1 | tee /tmp/artifacts/test_logs_i386.txt && exit \${PIPESTATUS[0]}" + rc=$? + ;; + 2) + GORACE="halt_on_error=1" go test $PARALLELISM $TIMEOUT -v -race ./... 2>&1 | tee $CIRCLE_ARTIFACTS/test_logs_race.txt + rc=${PIPESTATUS[0]} + ;; +esac + +exit $rc diff --git a/_third_party/github.com/influxdb/influxdb/circle.yml b/_third_party/github.com/influxdb/influxdb/circle.yml new file mode 100644 index 0000000000..01a5a161a3 --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/circle.yml @@ -0,0 +1,16 @@ +machine: + services: + - docker + pre: + - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/master/binscripts/gvm-installer) + - source $HOME/.gvm/scripts/gvm; gvm install go1.5 --binary + +dependencies: + override: + - mkdir -p ~/docker + cache_directories: + - "~/docker" +test: + override: + - bash circle-test.sh: + parallel: true diff --git a/_third_party/github.com/influxdb/influxdb/client/README.md b/_third_party/github.com/influxdb/influxdb/client/README.md index 8d94a7579d..27e7afdbba 100644 --- a/_third_party/github.com/influxdb/influxdb/client/README.md +++ b/_third_party/github.com/influxdb/influxdb/client/README.md @@ -195,8 +195,8 @@ for i, row := range res[0].Series[0].Values { if err != nil { log.Fatal(err) } - val, err := row[1].(json.Number).Int64() - log.Printf("[%2d] %s: %03d\n", i, t.Format(time.Stamp), val) + val := row[1].(string) + log.Printf("[%2d] %s: %s\n", i, t.Format(time.Stamp), val) } ``` diff --git a/_third_party/github.com/influxdb/influxdb/client/influxdb.go b/_third_party/github.com/influxdb/influxdb/client/influxdb.go index 594e73286c..efab541ccd 100644 --- a/_third_party/github.com/influxdb/influxdb/client/influxdb.go +++ b/_third_party/github.com/influxdb/influxdb/client/influxdb.go @@ -13,8 +13,7 @@ import ( "strings" "time" - "bosun.org/_third_party/github.com/influxdb/influxdb/influxql" - "bosun.org/_third_party/github.com/influxdb/influxdb/tsdb" + "bosun.org/_third_party/github.com/influxdb/influxdb/models" ) const ( @@ -79,6 +78,7 @@ type Config struct { Password string UserAgent string Timeout time.Duration + Precision string } // NewConfig will create a config to be used in connecting to the client @@ -95,6 +95,7 @@ type Client struct { password string httpClient *http.Client userAgent string + precision string } const ( @@ -112,6 +113,7 @@ func NewClient(c Config) (*Client, error) { password: c.Password, httpClient: &http.Client{Timeout: c.Timeout}, userAgent: c.UserAgent, + precision: c.Precision, } if client.userAgent == "" { client.userAgent = "InfluxDBClient" @@ -125,6 +127,11 @@ func (c *Client) SetAuth(u, p string) { c.password = p } +// SetPrecision will update the precision +func (c *Client) SetPrecision(precision string) { + c.precision = precision +} + // Query sends a command to the server and returns the Response func (c *Client) Query(q Query) (*Response, error) { u := c.url @@ -133,6 +140,9 @@ func (c *Client) Query(q Query) (*Response, error) { values := u.Query() values.Set("q", q.Command) values.Set("db", q.Database) + if c.precision != "" { + values.Set("epoch", c.precision) + } u.RawQuery = values.Encode() req, err := http.NewRequest("GET", u.String(), nil) @@ -314,7 +324,7 @@ func (c *Client) Ping() (time.Duration, string, error) { // Result represents a resultset returned from a single statement. type Result struct { - Series []influxql.Row + Series []models.Row Err error } @@ -322,8 +332,8 @@ type Result struct { func (r *Result) MarshalJSON() ([]byte, error) { // Define a struct that outputs "error" as a string. var o struct { - Series []influxql.Row `json:"series,omitempty"` - Err string `json:"error,omitempty"` + Series []models.Row `json:"series,omitempty"` + Err string `json:"error,omitempty"` } // Copy fields to output struct. @@ -338,8 +348,8 @@ func (r *Result) MarshalJSON() ([]byte, error) { // UnmarshalJSON decodes the data into the Result struct func (r *Result) UnmarshalJSON(b []byte) error { var o struct { - Series []influxql.Row `json:"series,omitempty"` - Err string `json:"error,omitempty"` + Series []models.Row `json:"series,omitempty"` + Err string `json:"error,omitempty"` } dec := json.NewDecoder(bytes.NewBuffer(b)) @@ -449,7 +459,11 @@ func (p *Point) MarshalJSON() ([]byte, error) { } func (p *Point) MarshalString() string { - return tsdb.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time).String() + pt := models.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time) + if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" { + return pt.String() + } + return pt.PrecisionString(p.Precision) } // UnmarshalJSON decodes the data into the Point struct diff --git a/_third_party/github.com/influxdb/influxdb/errors.go b/_third_party/github.com/influxdb/influxdb/errors.go new file mode 100644 index 0000000000..ff925d68c9 --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/errors.go @@ -0,0 +1,82 @@ +package influxdb + +import ( + "encoding/json" + "errors" + "fmt" + "runtime" + "strings" +) + +var ( + // ErrFieldsRequired is returned when a point does not any fields. + ErrFieldsRequired = errors.New("fields required") + + // ErrFieldTypeConflict is returned when a new field already exists with a different type. + ErrFieldTypeConflict = errors.New("field type conflict") +) + +func ErrDatabaseNotFound(name string) error { return fmt.Errorf("database not found: %s", name) } + +func ErrRetentionPolicyNotFound(name string) error { + return fmt.Errorf("retention policy not found: %s", name) +} + +func ErrMeasurementNotFound(name string) error { return fmt.Errorf("measurement not found: %s", name) } + +func Errorf(format string, a ...interface{}) (err error) { + if _, file, line, ok := runtime.Caller(2); ok { + a = append(a, file, line) + err = fmt.Errorf(format+" (%s:%d)", a...) + } else { + err = fmt.Errorf(format, a...) + } + return +} + +// IsClientError indicates whether an error is a known client error. +func IsClientError(err error) bool { + if err == nil { + return false + } + + if err == ErrFieldsRequired { + return true + } + if err == ErrFieldTypeConflict { + return true + } + + if strings.Contains(err.Error(), ErrFieldTypeConflict.Error()) { + return true + } + + return false +} + +// mustMarshal encodes a value to JSON. +// This will panic if an error occurs. This should only be used internally when +// an invalid marshal will cause corruption and a panic is appropriate. +func mustMarshalJSON(v interface{}) []byte { + b, err := json.Marshal(v) + if err != nil { + panic("marshal: " + err.Error()) + } + return b +} + +// mustUnmarshalJSON decodes a value from JSON. +// This will panic if an error occurs. This should only be used internally when +// an invalid unmarshal will cause corruption and a panic is appropriate. +func mustUnmarshalJSON(b []byte, v interface{}) { + if err := json.Unmarshal(b, v); err != nil { + panic("unmarshal: " + err.Error()) + } +} + +// assert will panic with a given formatted message if the given condition is false. +func assert(condition bool, msg string, v ...interface{}) { + if !condition { + panic(fmt.Sprintf("assert failed: "+msg, v...)) + } +} diff --git a/_third_party/github.com/influxdb/influxdb/influxql/ast.go b/_third_party/github.com/influxdb/influxdb/influxql/ast.go index 2c139b98ac..4aea4a9a4d 100644 --- a/_third_party/github.com/influxdb/influxdb/influxql/ast.go +++ b/_third_party/github.com/influxdb/influxdb/influxql/ast.go @@ -9,6 +9,8 @@ import ( "strconv" "strings" "time" + + "bosun.org/_third_party/github.com/influxdb/influxdb/pkg/slices" ) // DataType represents the primitive data types available in InfluxQL. @@ -105,6 +107,7 @@ func (*ShowFieldKeysStatement) node() {} func (*ShowRetentionPoliciesStatement) node() {} func (*ShowMeasurementsStatement) node() {} func (*ShowSeriesStatement) node() {} +func (*ShowShardsStatement) node() {} func (*ShowStatsStatement) node() {} func (*ShowDiagnosticsStatement) node() {} func (*ShowTagKeysStatement) node() {} @@ -206,6 +209,7 @@ func (*ShowFieldKeysStatement) stmt() {} func (*ShowMeasurementsStatement) stmt() {} func (*ShowRetentionPoliciesStatement) stmt() {} func (*ShowSeriesStatement) stmt() {} +func (*ShowShardsStatement) stmt() {} func (*ShowStatsStatement) stmt() {} func (*ShowDiagnosticsStatement) stmt() {} func (*ShowTagKeysStatement) stmt() {} @@ -274,7 +278,7 @@ type SortField struct { // String returns a string representation of a sort field func (field *SortField) String() string { var buf bytes.Buffer - if field.Name == "" { + if field.Name != "" { _, _ = buf.WriteString(field.Name) _, _ = buf.WriteString(" ") } @@ -714,6 +718,18 @@ type SelectStatement struct { FillValue interface{} } +// SourceNames returns a list of source names. +func (s *SelectStatement) SourceNames() []string { + a := make([]string, 0, len(s.Sources)) + for _, src := range s.Sources { + switch src := src.(type) { + case *Measurement: + a = append(a, src.Name) + } + } + return a +} + // HasDerivative returns true if one of the function calls in the statement is a // derivative aggregate func (s *SelectStatement) HasDerivative() bool { @@ -739,6 +755,11 @@ func (s *SelectStatement) IsSimpleDerivative() bool { return false } +// TimeAscending returns true if the time field is sorted in chronological order. +func (s *SelectStatement) TimeAscending() bool { + return len(s.SortFields) == 0 || s.SortFields[0].Ascending +} + // Clone returns a deep copy of the statement. func (s *SelectStatement) Clone() *SelectStatement { clone := &SelectStatement{ @@ -855,6 +876,48 @@ func (s *SelectStatement) RewriteDistinct() { } } +// ColumnNames will walk all fields and functions and return the appropriate field names for the select statement +// while maintaining order of the field names +func (s *SelectStatement) ColumnNames() []string { + // Always set the first column to be time, even if they didn't specify it + columnNames := []string{"time"} + + // First walk each field + for _, field := range s.Fields { + switch f := field.Expr.(type) { + case *Call: + if f.Name == "top" || f.Name == "bottom" { + if len(f.Args) == 2 { + columnNames = append(columnNames, f.Name) + continue + } + // We have a special case now where we have to add the column names for the fields TOP or BOTTOM asked for as well + columnNames = slices.Union(columnNames, f.Fields(), true) + continue + } + columnNames = append(columnNames, field.Name()) + default: + // time is always first, and we already added it, so ignore it if they asked for it anywhere else. + if field.Name() != "time" { + columnNames = append(columnNames, field.Name()) + } + } + } + + return columnNames +} + +// HasTimeFieldSpecified will walk all fields and determine if the user explicitly asked for time +// This is needed to determine re-write behaviors for functions like TOP and BOTTOM +func (s *SelectStatement) HasTimeFieldSpecified() bool { + for _, f := range s.Fields { + if f.Name() == "time" { + return true + } + } + return false +} + // String returns a string representation of the select statement. func (s *SelectStatement) String() string { var buf bytes.Buffer @@ -996,6 +1059,10 @@ func (s *SelectStatement) validate(tr targetRequirement) error { return err } + if err := s.validateDimensions(); err != nil { + return err + } + if err := s.validateDistinct(); err != nil { return err } @@ -1012,10 +1079,6 @@ func (s *SelectStatement) validate(tr targetRequirement) error { return err } - if err := s.validateWildcard(); err != nil { - return err - } - return nil } @@ -1027,40 +1090,155 @@ func (s *SelectStatement) validateFields() error { return nil } -func (s *SelectStatement) validateAggregates(tr targetRequirement) error { - // First, if 1 field is an aggregate, then all fields must be an aggregate. This is - // a explicit limitation of the current system. +func (s *SelectStatement) validateDimensions() error { + var dur time.Duration + for _, dim := range s.Dimensions { + switch expr := dim.Expr.(type) { + case *Call: + // Ensure the call is time() and it only has one duration argument. + // If we already have a duration + if expr.Name != "time" { + return errors.New("only time() calls allowed in dimensions") + } else if len(expr.Args) != 1 { + return errors.New("time dimension expected one argument") + } else if lit, ok := expr.Args[0].(*DurationLiteral); !ok { + return errors.New("time dimension must have one duration argument") + } else if dur != 0 { + return errors.New("multiple time dimensions not allowed") + } else { + dur = lit.Val + } + case *VarRef: + if strings.ToLower(expr.Val) == "time" { + return errors.New("time() is a function and expects at least one argument") + } + case *Wildcard: + default: + return errors.New("only time and tag dimensions allowed") + } + } + return nil +} + +// validSelectWithAggregate determines if a SELECT statement has the correct +// combination of aggregate functions combined with selected fields and tags +// Currently we don't have support for all aggregates, but aggregates that +// can be combined with fields/tags are: +// TOP, BOTTOM, MAX, MIN, FIRST, LAST +func (s *SelectStatement) validSelectWithAggregate() error { + calls := map[string]struct{}{} numAggregates := 0 for _, f := range s.Fields { - if _, ok := f.Expr.(*Call); ok { + if c, ok := f.Expr.(*Call); ok { + calls[c.Name] = struct{}{} numAggregates++ } } + // For TOP, BOTTOM, MAX, MIN, FIRST, LAST (selector functions) it is ok to ask for fields and tags + // but only if one function is specified. Combining multiple functions and fields and tags is not currently supported + onlySelectors := true + for k := range calls { + switch k { + case "top", "bottom", "max", "min", "first", "last": + default: + onlySelectors = false + break + } + } + if onlySelectors { + // If they only have one selector, they can have as many fields or tags as they want + if numAggregates == 1 { + return nil + } + // If they have multiple selectors, they are not allowed to have any other fields or tags specified + if numAggregates > 1 && len(s.Fields) != numAggregates { + return fmt.Errorf("mixing multiple selector functions with tags or fields is not supported") + } + } + if numAggregates != 0 && numAggregates != len(s.Fields) { return fmt.Errorf("mixing aggregate and non-aggregate queries is not supported") } + return nil +} - // Secondly, determine if specific calls have at least one and only one argument +func (s *SelectStatement) validateAggregates(tr targetRequirement) error { for _, f := range s.Fields { - if c, ok := f.Expr.(*Call); ok { - switch c.Name { + switch expr := f.Expr.(type) { + case *Call: + switch expr.Name { case "derivative", "non_negative_derivative": - if min, max, got := 1, 2, len(c.Args); got > max || got < min { - return fmt.Errorf("invalid number of arguments for %s, expected at least %d but no more than %d, got %d", c.Name, min, max, got) + if err := s.validSelectWithAggregate(); err != nil { + return err + } + if min, max, got := 1, 2, len(expr.Args); got > max || got < min { + return fmt.Errorf("invalid number of arguments for %s, expected at least %d but no more than %d, got %d", expr.Name, min, max, got) + } + // Validate that if they have grouping by time, they need a sub-call like min/max, etc. + groupByInterval, _ := s.GroupByInterval() + if groupByInterval > 0 { + if _, ok := expr.Args[0].(*Call); !ok { + return fmt.Errorf("aggregate function required inside the call to %s", expr.Name) + } } + case "percentile": - if exp, got := 2, len(c.Args); got != exp { - return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", c.Name, exp, got) + if err := s.validSelectWithAggregate(); err != nil { + return err + } + if exp, got := 2, len(expr.Args); got != exp { + return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got) + } + _, ok := expr.Args[1].(*NumberLiteral) + if !ok { + return fmt.Errorf("expected float argument in percentile()") + } + case "top", "bottom": + if exp, got := 2, len(expr.Args); got < exp { + return fmt.Errorf("invalid number of arguments for %s, expected at least %d, got %d", expr.Name, exp, got) + } + if len(expr.Args) > 1 { + callLimit, ok := expr.Args[len(expr.Args)-1].(*NumberLiteral) + if !ok { + return fmt.Errorf("expected integer as last argument in %s(), found %s", expr.Name, expr.Args[len(expr.Args)-1]) + } + // Check if they asked for a limit smaller than what they passed into the call + if int64(callLimit.Val) > int64(s.Limit) && s.Limit != 0 { + return fmt.Errorf("limit (%d) in %s function can not be larger than the LIMIT (%d) in the select statement", int64(callLimit.Val), expr.Name, int64(s.Limit)) + } + + for _, v := range expr.Args[:len(expr.Args)-1] { + if _, ok := v.(*VarRef); !ok { + return fmt.Errorf("only fields or tags are allowed in %s(), found %s", expr.Name, v) + } + } } default: - if exp, got := 1, len(c.Args); got != exp { - return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", c.Name, exp, got) + if err := s.validSelectWithAggregate(); err != nil { + return err + } + if exp, got := 1, len(expr.Args); got != exp { + return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got) + } + switch fc := expr.Args[0].(type) { + case *VarRef: + // do nothing + case *Call: + if fc.Name != "distinct" { + return fmt.Errorf("expected field argument in %s()", expr.Name) + } + case *Distinct: + if expr.Name != "count" { + return fmt.Errorf("expected field argument in %s()", expr.Name) + } + default: + return fmt.Errorf("expected field argument in %s()", expr.Name) } } } } - // Now, check that we have valid duration and where clauses for aggregates + // Check that we have valid duration and where clauses for aggregates // fetch the group by duration groupByDuration, _ := s.GroupByInterval() @@ -1079,13 +1257,6 @@ func (s *SelectStatement) validateAggregates(tr targetRequirement) error { return nil } -func (s *SelectStatement) validateWildcard() error { - if s.HasWildcard() && len(s.Fields) > 1 { - return fmt.Errorf("wildcards can not be combined with other fields") - } - return nil -} - func (s *SelectStatement) HasDistinct() bool { // determine if we have a call named distinct for _, f := range s.Fields { @@ -1385,6 +1556,25 @@ func (s *SelectStatement) NamesInDimension() []string { return a } +// LimitTagSets returns a tag set list with SLIMIT and SOFFSET applied. +func (s *SelectStatement) LimitTagSets(a []*TagSet) []*TagSet { + // Ignore if no limit or offset is specified. + if s.SLimit == 0 && s.SOffset == 0 { + return a + } + + // If offset is beyond the number of tag sets then return nil. + if s.SOffset > len(a) { + return nil + } + + // Clamp limit to the max number of tag sets. + if s.SOffset+s.SLimit > len(a) { + s.SLimit = len(a) - s.SOffset + } + return a[s.SOffset : s.SOffset+s.SLimit] +} + // walkNames will walk the Expr and return the database fields func walkNames(exp Expr) []string { switch expr := exp.(type) { @@ -1421,6 +1611,15 @@ func (s *SelectStatement) FunctionCalls() []*Call { return a } +// FunctionCallsByPosition returns the Call objects from the query in the order they appear in the select statement +func (s *SelectStatement) FunctionCallsByPosition() [][]*Call { + var a [][]*Call + for _, f := range s.Fields { + a = append(a, walkFunctionCalls(f.Expr)) + } + return a +} + // walkFunctionCalls walks the Field of a query for any function calls made func walkFunctionCalls(exp Expr) []*Call { switch expr := exp.(type) { @@ -1508,6 +1707,9 @@ func (t *Target) String() string { var buf bytes.Buffer _, _ = buf.WriteString("INTO ") _, _ = buf.WriteString(t.Measurement.String()) + if t.Measurement.Name == "" { + _, _ = buf.WriteString(":MEASUREMENT") + } return buf.String() } @@ -1817,18 +2019,19 @@ func (s *ShowRetentionPoliciesStatement) RequiredPrivileges() ExecutionPrivilege return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}} } -// ShowRetentionPoliciesStatement represents a command for displaying stats for a given server. +// ShowStats statement displays statistics for a given module. type ShowStatsStatement struct { - // Hostname or IP of the server for stats. - Host string + // Module + Module string } // String returns a string representation of a ShowStatsStatement. func (s *ShowStatsStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("SHOW STATS ") - if s.Host != "" { - _, _ = buf.WriteString(s.Host) + if s.Module != "" { + _, _ = buf.WriteString("FOR ") + _, _ = buf.WriteString(s.Module) } return buf.String() } @@ -1838,11 +2041,33 @@ func (s *ShowStatsStatement) RequiredPrivileges() ExecutionPrivileges { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} } +// ShowShardsStatement represents a command for displaying shards in the cluster. +type ShowShardsStatement struct{} + +// String returns a string representation. +func (s *ShowShardsStatement) String() string { return "SHOW SHARDS" } + +// RequiredPrivileges returns the privileges required to execute the statement. +func (s *ShowShardsStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + // ShowDiagnosticsStatement represents a command for show node diagnostics. -type ShowDiagnosticsStatement struct{} +type ShowDiagnosticsStatement struct { + // Module + Module string +} // String returns a string representation of the ShowDiagnosticsStatement. -func (s *ShowDiagnosticsStatement) String() string { return "SHOW DIAGNOSTICS" } +func (s *ShowDiagnosticsStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SHOW DIAGNOSTICS ") + if s.Module != "" { + _, _ = buf.WriteString("FOR ") + _, _ = buf.WriteString(s.Module) + } + return buf.String() +} // RequiredPrivileges returns the privilege required to execute a ShowDiagnosticsStatement func (s *ShowDiagnosticsStatement) RequiredPrivileges() ExecutionPrivileges { @@ -1860,12 +2085,17 @@ type ShowTagKeysStatement struct { // Fields to sort results by SortFields SortFields - // Maximum number of rows to be returned. - // Unlimited if zero. + // Maximum number of tag keys per measurement. Unlimited if zero. Limit int - // Returns rows starting at an offset from the first row. + // Returns tag keys starting at an offset from the first row. Offset int + + // Maxiumum number of series to be returned. Unlimited if zero. + SLimit int + + // Returns series starting at an offset from the first one. + SOffset int } // String returns a string representation of the statement. @@ -1893,6 +2123,14 @@ func (s *ShowTagKeysStatement) String() string { _, _ = buf.WriteString(" OFFSET ") _, _ = buf.WriteString(strconv.Itoa(s.Offset)) } + if s.SLimit > 0 { + _, _ = buf.WriteString(" SLIMIT ") + _, _ = buf.WriteString(strconv.Itoa(s.SLimit)) + } + if s.SOffset > 0 { + _, _ = buf.WriteString(" SOFFSET ") + _, _ = buf.WriteString(strconv.Itoa(s.SOffset)) + } return buf.String() } @@ -2107,37 +2345,21 @@ func (a Dimensions) String() string { // Normalize returns the interval and tag dimensions separately. // Returns 0 if no time interval is specified. -// Returns an error if multiple time dimensions exist or if non-VarRef dimensions are specified. -func (a Dimensions) Normalize() (time.Duration, []string, error) { +func (a Dimensions) Normalize() (time.Duration, []string) { var dur time.Duration var tags []string for _, dim := range a { switch expr := dim.Expr.(type) { case *Call: - // Ensure the call is time() and it only has one duration argument. - // If we already have a duration - if expr.Name != "time" { - return 0, nil, errors.New("only time() calls allowed in dimensions") - } else if len(expr.Args) != 1 { - return 0, nil, errors.New("time dimension expected one argument") - } else if lit, ok := expr.Args[0].(*DurationLiteral); !ok { - return 0, nil, errors.New("time dimension must have one duration argument") - } else if dur != 0 { - return 0, nil, errors.New("multiple time dimensions not allowed") - } else { - dur = lit.Val - } - + lit, _ := expr.Args[0].(*DurationLiteral) + dur = lit.Val case *VarRef: tags = append(tags, expr.Val) - - default: - return 0, nil, errors.New("only time and tag dimensions allowed") } } - return dur, tags, nil + return dur, tags } // Dimension represents an expression that a select statement is grouped by. @@ -2166,6 +2388,7 @@ type Measurement struct { RetentionPolicy string Name string Regex *RegexLiteral + IsTarget bool } // String returns a string representation of the measurement. @@ -2224,6 +2447,47 @@ func (c *Call) String() string { return fmt.Sprintf("%s(%s)", c.Name, strings.Join(str, ", ")) } +// Fields will extract any field names from the call. Only specific calls support this. +func (c *Call) Fields() []string { + switch c.Name { + case "top", "bottom": + // maintain the order the user specified in the query + keyMap := make(map[string]struct{}) + keys := []string{} + for i, a := range c.Args { + if i == 0 { + // special case, first argument is always the name of the function regardless of the field name + keys = append(keys, c.Name) + continue + } + switch v := a.(type) { + case *VarRef: + if _, ok := keyMap[v.Val]; !ok { + keyMap[v.Val] = struct{}{} + keys = append(keys, v.Val) + } + } + } + return keys + case "min", "max", "first", "last", "sum", "mean": + // maintain the order the user specified in the query + keyMap := make(map[string]struct{}) + keys := []string{} + for _, a := range c.Args { + switch v := a.(type) { + case *VarRef: + if _, ok := keyMap[v.Val]; !ok { + keyMap[v.Val] = struct{}{} + keys = append(keys, v.Val) + } + } + } + return keys + default: + panic(fmt.Sprintf("*call.Fields is unable to provide information on %s", c.Name)) + } +} + // Distinct represents a DISTINCT expression. type Distinct struct { // Identifier following DISTINCT @@ -2779,6 +3043,13 @@ func evalBinaryExpr(expr *BinaryExpr, m map[string]interface{}) interface{} { return nil } +// EvalBool evaluates expr and returns true if result is a boolean true. +// Otherwise returns false. +func EvalBool(expr Expr, m map[string]interface{}) bool { + v, _ := Eval(expr, m).(bool) + return v +} + // Reduce evaluates expr using the available values in valuer. // References that don't exist in valuer are ignored. func Reduce(expr Expr, valuer Valuer) Expr { diff --git a/_third_party/github.com/influxdb/influxdb/influxql/ast_test.go b/_third_party/github.com/influxdb/influxdb/influxql/ast_test.go index f6337c79b3..cd5cc2f6ff 100644 --- a/_third_party/github.com/influxdb/influxdb/influxql/ast_test.go +++ b/_third_party/github.com/influxdb/influxdb/influxql/ast_test.go @@ -451,7 +451,7 @@ func TestSelectStatement_IsRawQuerySet(t *testing.T) { isRaw: false, }, { - stmt: "select mean(*) from foo group by *", + stmt: "select mean(value) from foo group by *", isRaw: false, }, } diff --git a/_third_party/github.com/influxdb/influxdb/influxql/parser.go b/_third_party/github.com/influxdb/influxdb/influxql/parser.go index bafe42d83d..7809689d85 100644 --- a/_third_party/github.com/influxdb/influxdb/influxql/parser.go +++ b/_third_party/github.com/influxdb/influxdb/influxql/parser.go @@ -7,6 +7,7 @@ import ( "io" "math" "regexp" + "sort" "strconv" "strings" "time" @@ -129,6 +130,8 @@ func (p *Parser) parseShowStatement() (Statement, error) { return nil, newParseError(tokstr(tok, lit), []string{"POLICIES"}, pos) case SERIES: return p.parseShowSeriesStatement() + case SHARDS: + return p.parseShowShardsStatement() case STATS: return p.parseShowStatsStatement() case DIAGNOSTICS: @@ -145,7 +148,24 @@ func (p *Parser) parseShowStatement() (Statement, error) { return p.parseShowUsersStatement() } - return nil, newParseError(tokstr(tok, lit), []string{"CONTINUOUS", "DATABASES", "FIELD", "GRANTS", "MEASUREMENTS", "RETENTION", "SERIES", "SERVERS", "TAG", "USERS"}, pos) + showQueryKeywords := []string{ + "CONTINUOUS", + "DATABASES", + "FIELD", + "GRANTS", + "MEASUREMENTS", + "RETENTION", + "SERIES", + "SERVERS", + "TAG", + "USERS", + "STATS", + "DIAGNOSTICS", + "SHARDS", + } + sort.Strings(showQueryKeywords) + + return nil, newParseError(tokstr(tok, lit), showQueryKeywords, pos) } // parseCreateStatement parses a string and returns a create statement. @@ -488,6 +508,9 @@ func (p *Parser) parseSegmentedIdents() ([]string, error) { if ch := p.peekRune(); ch == '/' { // Next segment is a regex so we're done. break + } else if ch == ':' { + // Next segment is context-specific so let caller handle it. + break } else if ch == '.' { // Add an empty identifier. idents = append(idents, "") @@ -799,7 +822,18 @@ func (p *Parser) parseTarget(tr targetRequirement) (*Target, error) { return nil, err } - t := &Target{Measurement: &Measurement{}} + if len(idents) < 3 { + // Check for source measurement reference. + if ch := p.peekRune(); ch == ':' { + if err := p.parseTokens([]Token{COLON, MEASUREMENT}); err != nil { + return nil, err + } + // Append empty measurement name. + idents = append(idents, "") + } + } + + t := &Target{Measurement: &Measurement{IsTarget: true}} switch len(idents) { case 1: @@ -963,6 +997,16 @@ func (p *Parser) parseShowTagKeysStatement() (*ShowTagKeysStatement, error) { return nil, err } + // Parse series limit: "SLIMIT ". + if stmt.SLimit, err = p.parseOptionalTokenAndInt(SLIMIT); err != nil { + return nil, err + } + + // Parse series offset: "SOFFSET ". + if stmt.SOffset, err = p.parseOptionalTokenAndInt(SOFFSET); err != nil { + return nil, err + } + return stmt, nil } @@ -1395,14 +1439,20 @@ func (p *Parser) parseRetentionPolicy() (name string, dfault bool, err error) { return } +// parseShowShardsStatement parses a string for "SHOW SHARDS" statement. +// This function assumes the "SHOW SHARDS" tokens have already been consumed. +func (p *Parser) parseShowShardsStatement() (*ShowShardsStatement, error) { + return &ShowShardsStatement{}, nil +} + // parseShowStatsStatement parses a string and returns a ShowStatsStatement. // This function assumes the "SHOW STATS" tokens have already been consumed. func (p *Parser) parseShowStatsStatement() (*ShowStatsStatement, error) { stmt := &ShowStatsStatement{} var err error - if tok, _, _ := p.scanIgnoreWhitespace(); tok == ON { - stmt.Host, err = p.parseString() + if tok, _, _ := p.scanIgnoreWhitespace(); tok == FOR { + stmt.Module, err = p.parseString() } else { p.unscan() } @@ -1413,7 +1463,15 @@ func (p *Parser) parseShowStatsStatement() (*ShowStatsStatement, error) { // parseShowDiagnostics parses a string and returns a ShowDiagnosticsStatement. func (p *Parser) parseShowDiagnosticsStatement() (*ShowDiagnosticsStatement, error) { stmt := &ShowDiagnosticsStatement{} - return stmt, nil + var err error + + if tok, _, _ := p.scanIgnoreWhitespace(); tok == FOR { + stmt.Module, err = p.parseString() + } else { + p.unscan() + } + + return stmt, err } // parseDropContinuousQueriesStatement parses a string and returns a DropContinuousQueryStatement. @@ -1451,13 +1509,6 @@ func (p *Parser) parseDropContinuousQueryStatement() (*DropContinuousQueryStatem func (p *Parser) parseFields() (Fields, error) { var fields Fields - // Check for "*" (i.e., "all fields") - if tok, _, _ := p.scanIgnoreWhitespace(); tok == MUL { - fields = append(fields, &Field{&Wildcard{}, ""}) - return fields, nil - } - p.unscan() - for { // Parse the field. f, err := p.parseField() @@ -1787,24 +1838,29 @@ func (p *Parser) parseOrderBy() (SortFields, error) { func (p *Parser) parseSortFields() (SortFields, error) { var fields SortFields - // If first token is ASC or DESC, all fields are sorted. - if tok, pos, lit := p.scanIgnoreWhitespace(); tok == ASC || tok == DESC { - if tok == DESC { - // Token must be ASC, until other sort orders are supported. - return nil, errors.New("only ORDER BY time ASC supported at this time") + tok, pos, lit := p.scanIgnoreWhitespace() + + switch tok { + // The first field after an order by may not have a field name (e.g. ORDER BY ASC) + case ASC, DESC: + fields = append(fields, &SortField{Ascending: (tok == ASC)}) + // If it's a token, parse it as a sort field. At least one is required. + case IDENT: + p.unscan() + field, err := p.parseSortField() + if err != nil { + return nil, err } - return append(fields, &SortField{Ascending: (tok == ASC)}), nil - } else if tok != IDENT { - return nil, newParseError(tokstr(tok, lit), []string{"identifier", "ASC", "DESC"}, pos) - } - p.unscan() - // At least one field is required. - field, err := p.parseSortField() - if err != nil { - return nil, err + if lit != "time" { + return nil, errors.New("only ORDER BY time supported at this time") + } + + fields = append(fields, field) + // Parse error... + default: + return nil, newParseError(tokstr(tok, lit), []string{"identifier", "ASC", "DESC"}, pos) } - fields = append(fields, field) // Parse additional fields. for { @@ -1823,9 +1879,8 @@ func (p *Parser) parseSortFields() (SortFields, error) { fields = append(fields, field) } - // First SortField must be time ASC, until other sort orders are supported. - if len(fields) > 1 || fields[0].Name != "time" || !fields[0].Ascending { - return nil, errors.New("only ORDER BY time ASC supported at this time") + if len(fields) > 1 { + return nil, errors.New("only ORDER BY time supported at this time") } return fields, nil diff --git a/_third_party/github.com/influxdb/influxdb/influxql/parser_test.go b/_third_party/github.com/influxdb/influxdb/influxql/parser_test.go index 0660257631..2fbc33d937 100644 --- a/_third_party/github.com/influxdb/influxdb/influxql/parser_test.go +++ b/_third_party/github.com/influxdb/influxdb/influxql/parser_test.go @@ -73,11 +73,45 @@ func TestParser_ParseStatement(t *testing.T) { Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, }, }, + { + s: `SELECT * FROM myseries GROUP BY *`, + stmt: &influxql.SelectStatement{ + IsRawQuery: true, + Fields: []*influxql.Field{ + {Expr: &influxql.Wildcard{}}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, + Dimensions: []*influxql.Dimension{{Expr: &influxql.Wildcard{}}}, + }, + }, + { + s: `SELECT field1, * FROM myseries GROUP BY *`, + stmt: &influxql.SelectStatement{ + IsRawQuery: true, + Fields: []*influxql.Field{ + {Expr: &influxql.VarRef{Val: "field1"}}, + {Expr: &influxql.Wildcard{}}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, + Dimensions: []*influxql.Dimension{{Expr: &influxql.Wildcard{}}}, + }, + }, + { + s: `SELECT *, field1 FROM myseries GROUP BY *`, + stmt: &influxql.SelectStatement{ + IsRawQuery: true, + Fields: []*influxql.Field{ + {Expr: &influxql.Wildcard{}}, + {Expr: &influxql.VarRef{Val: "field1"}}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, + Dimensions: []*influxql.Dimension{{Expr: &influxql.Wildcard{}}}, + }, + }, // SELECT statement { - skip: true, - s: fmt.Sprintf(`SELECT mean(field1), sum(field2) ,count(field3) AS field_x FROM myseries WHERE host = 'hosta.influxdb.org' and time > '%s' GROUP BY time(10h) ORDER BY ASC LIMIT 20 OFFSET 10;`, now.UTC().Format(time.RFC3339Nano)), + s: fmt.Sprintf(`SELECT mean(field1), sum(field2) ,count(field3) AS field_x FROM myseries WHERE host = 'hosta.influxdb.org' and time > '%s' GROUP BY time(10h) ORDER BY DESC LIMIT 20 OFFSET 10;`, now.UTC().Format(time.RFC3339Nano)), stmt: &influxql.SelectStatement{ IsRawQuery: false, Fields: []*influxql.Field{ @@ -101,12 +135,32 @@ func TestParser_ParseStatement(t *testing.T) { }, Dimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: "time", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 10 * time.Hour}}}}}, SortFields: []*influxql.SortField{ - {Ascending: true}, + {Ascending: false}, }, Limit: 20, Offset: 10, }, }, + { + s: `SELECT "foo.bar.baz" AS foo FROM myseries`, + stmt: &influxql.SelectStatement{ + IsRawQuery: true, + Fields: []*influxql.Field{ + {Expr: &influxql.VarRef{Val: "foo.bar.baz"}, Alias: "foo"}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, + }, + }, + { + s: `SELECT "foo.bar.baz" AS foo FROM foo`, + stmt: &influxql.SelectStatement{ + IsRawQuery: true, + Fields: []*influxql.Field{ + {Expr: &influxql.VarRef{Val: "foo.bar.baz"}, Alias: "foo"}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "foo"}}, + }, + }, // derivative { @@ -120,6 +174,22 @@ func TestParser_ParseStatement(t *testing.T) { }, }, + { + s: fmt.Sprintf(`SELECT derivative(field1, 1h) FROM myseries WHERE time > '%s'`, now.UTC().Format(time.RFC3339Nano)), + stmt: &influxql.SelectStatement{ + IsRawQuery: false, + Fields: []*influxql.Field{ + {Expr: &influxql.Call{Name: "derivative", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.DurationLiteral{Val: time.Hour}}}}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, + Condition: &influxql.BinaryExpr{ + Op: influxql.GT, + LHS: &influxql.VarRef{Val: "time"}, + RHS: &influxql.TimeLiteral{Val: now.UTC()}, + }, + }, + }, + { s: `SELECT derivative(mean(field1), 1h) FROM myseries;`, stmt: &influxql.SelectStatement{ @@ -214,6 +284,65 @@ func TestParser_ParseStatement(t *testing.T) { }, }, + // select percentile statements + { + s: `select percentile("field1", 2.0) from cpu`, + stmt: &influxql.SelectStatement{ + IsRawQuery: false, + Fields: []*influxql.Field{ + {Expr: &influxql.Call{Name: "percentile", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2.0}}}}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, + }, + }, + + // select top statements + { + s: `select top("field1", 2) from cpu`, + stmt: &influxql.SelectStatement{ + IsRawQuery: false, + Fields: []*influxql.Field{ + {Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, + }, + }, + + { + s: `select top(field1, 2) from cpu`, + stmt: &influxql.SelectStatement{ + IsRawQuery: false, + Fields: []*influxql.Field{ + {Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, + }, + }, + + { + s: `select top(field1, 2), tag1 from cpu`, + stmt: &influxql.SelectStatement{ + IsRawQuery: false, + Fields: []*influxql.Field{ + {Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}}, + {Expr: &influxql.VarRef{Val: "tag1"}}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, + }, + }, + + { + s: `select top(field1, tag1, 2), tag1 from cpu`, + stmt: &influxql.SelectStatement{ + IsRawQuery: false, + Fields: []*influxql.Field{ + {Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.VarRef{Val: "tag1"}, &influxql.NumberLiteral{Val: 2}}}}, + {Expr: &influxql.VarRef{Val: "tag1"}}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, + }, + }, + // select distinct statements { s: `select distinct(field1) from cpu`, @@ -587,17 +716,17 @@ func TestParser_ParseStatement(t *testing.T) { // SHOW SERIES WHERE with ORDER BY and LIMIT { skip: true, - s: `SHOW SERIES WHERE region = 'uswest' ORDER BY ASC, field1, field2 DESC LIMIT 10`, + s: `SHOW SERIES WHERE region = 'order by desc' ORDER BY DESC, field1, field2 DESC LIMIT 10`, stmt: &influxql.ShowSeriesStatement{ Condition: &influxql.BinaryExpr{ Op: influxql.EQ, LHS: &influxql.VarRef{Val: "region"}, - RHS: &influxql.StringLiteral{Val: "uswest"}, + RHS: &influxql.StringLiteral{Val: "order by desc"}, }, SortFields: []*influxql.SortField{ - {Ascending: true}, - {Name: "field1"}, - {Name: "field2"}, + &influxql.SortField{Ascending: false}, + &influxql.SortField{Name: "field1", Ascending: true}, + &influxql.SortField{Name: "field2"}, }, Limit: 10, }, @@ -638,6 +767,74 @@ func TestParser_ParseStatement(t *testing.T) { }, }, + // SHOW TAG KEYS with LIMIT + { + s: `SHOW TAG KEYS FROM src LIMIT 2`, + stmt: &influxql.ShowTagKeysStatement{ + Sources: []influxql.Source{&influxql.Measurement{Name: "src"}}, + Limit: 2, + }, + }, + + // SHOW TAG KEYS with OFFSET + { + s: `SHOW TAG KEYS FROM src OFFSET 1`, + stmt: &influxql.ShowTagKeysStatement{ + Sources: []influxql.Source{&influxql.Measurement{Name: "src"}}, + Offset: 1, + }, + }, + + // SHOW TAG KEYS with LIMIT and OFFSET + { + s: `SHOW TAG KEYS FROM src LIMIT 2 OFFSET 1`, + stmt: &influxql.ShowTagKeysStatement{ + Sources: []influxql.Source{&influxql.Measurement{Name: "src"}}, + Limit: 2, + Offset: 1, + }, + }, + + // SHOW TAG KEYS with SLIMIT + { + s: `SHOW TAG KEYS FROM src SLIMIT 2`, + stmt: &influxql.ShowTagKeysStatement{ + Sources: []influxql.Source{&influxql.Measurement{Name: "src"}}, + SLimit: 2, + }, + }, + + // SHOW TAG KEYS with SOFFSET + { + s: `SHOW TAG KEYS FROM src SOFFSET 1`, + stmt: &influxql.ShowTagKeysStatement{ + Sources: []influxql.Source{&influxql.Measurement{Name: "src"}}, + SOffset: 1, + }, + }, + + // SHOW TAG KEYS with SLIMIT and SOFFSET + { + s: `SHOW TAG KEYS FROM src SLIMIT 2 SOFFSET 1`, + stmt: &influxql.ShowTagKeysStatement{ + Sources: []influxql.Source{&influxql.Measurement{Name: "src"}}, + SLimit: 2, + SOffset: 1, + }, + }, + + // SHOW TAG KEYS with LIMIT, OFFSET, SLIMIT, and SOFFSET + { + s: `SHOW TAG KEYS FROM src LIMIT 4 OFFSET 3 SLIMIT 2 SOFFSET 1`, + stmt: &influxql.ShowTagKeysStatement{ + Sources: []influxql.Source{&influxql.Measurement{Name: "src"}}, + Limit: 4, + Offset: 3, + SLimit: 2, + SOffset: 1, + }, + }, + // SHOW TAG KEYS FROM // { s: `SHOW TAG KEYS FROM /[cg]pu/`, @@ -830,7 +1027,7 @@ func TestParser_ParseStatement(t *testing.T) { Database: "testdb", Source: &influxql.SelectStatement{ Fields: []*influxql.Field{{Expr: &influxql.Call{Name: "count", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}}}}}, - Target: &influxql.Target{Measurement: &influxql.Measurement{Name: "measure1"}}, + Target: &influxql.Target{Measurement: &influxql.Measurement{Name: "measure1", IsTarget: true}}, Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, Dimensions: []*influxql.Dimension{ { @@ -854,7 +1051,7 @@ func TestParser_ParseStatement(t *testing.T) { Source: &influxql.SelectStatement{ IsRawQuery: true, Fields: []*influxql.Field{{Expr: &influxql.Wildcard{}}}, - Target: &influxql.Target{Measurement: &influxql.Measurement{Name: "measure1"}}, + Target: &influxql.Target{Measurement: &influxql.Measurement{Name: "measure1", IsTarget: true}}, Sources: []influxql.Source{&influxql.Measurement{Name: "cpu_load_short"}}, }, }, @@ -869,7 +1066,7 @@ func TestParser_ParseStatement(t *testing.T) { Source: &influxql.SelectStatement{ Fields: []*influxql.Field{{Expr: &influxql.Call{Name: "count", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}}}}}, Target: &influxql.Target{ - Measurement: &influxql.Measurement{RetentionPolicy: "1h.policy1", Name: "cpu.load"}, + Measurement: &influxql.Measurement{RetentionPolicy: "1h.policy1", Name: "cpu.load", IsTarget: true}, }, Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, Dimensions: []*influxql.Dimension{ @@ -896,7 +1093,7 @@ func TestParser_ParseStatement(t *testing.T) { IsRawQuery: true, Fields: []*influxql.Field{{Expr: &influxql.VarRef{Val: "value"}}}, Target: &influxql.Target{ - Measurement: &influxql.Measurement{RetentionPolicy: "policy1", Name: "value"}, + Measurement: &influxql.Measurement{RetentionPolicy: "policy1", Name: "value", IsTarget: true}, }, Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, }, @@ -914,13 +1111,39 @@ func TestParser_ParseStatement(t *testing.T) { Fields: []*influxql.Field{{Expr: &influxql.VarRef{Val: "transmit_rx"}}, {Expr: &influxql.VarRef{Val: "transmit_tx"}}}, Target: &influxql.Target{ - Measurement: &influxql.Measurement{RetentionPolicy: "policy1", Name: "network"}, + Measurement: &influxql.Measurement{RetentionPolicy: "policy1", Name: "network", IsTarget: true}, }, Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, }, }, }, + // CREATE CONTINUOUS QUERY with backreference measurement name + { + s: `CREATE CONTINUOUS QUERY myquery ON testdb BEGIN SELECT mean(value) INTO "policy1".:measurement FROM /^[a-z]+.*/ GROUP BY time(1m) END`, + stmt: &influxql.CreateContinuousQueryStatement{ + Name: "myquery", + Database: "testdb", + Source: &influxql.SelectStatement{ + Fields: []*influxql.Field{{Expr: &influxql.Call{Name: "mean", Args: []influxql.Expr{&influxql.VarRef{Val: "value"}}}}}, + Target: &influxql.Target{ + Measurement: &influxql.Measurement{RetentionPolicy: "policy1", IsTarget: true}, + }, + Sources: []influxql.Source{&influxql.Measurement{Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`^[a-z]+.*`)}}}, + Dimensions: []*influxql.Dimension{ + { + Expr: &influxql.Call{ + Name: "time", + Args: []influxql.Expr{ + &influxql.DurationLiteral{Val: 1 * time.Minute}, + }, + }, + }, + }, + }, + }, + }, + // CREATE DATABASE statement { s: `CREATE DATABASE testdb`, @@ -1189,20 +1412,20 @@ func TestParser_ParseStatement(t *testing.T) { { s: `SHOW STATS`, stmt: &influxql.ShowStatsStatement{ - Host: "", + Module: "", }, }, { - s: `SHOW STATS ON 'servera'`, + s: `SHOW STATS FOR 'cluster'`, stmt: &influxql.ShowStatsStatement{ - Host: "servera", + Module: "cluster", }, }, + + // SHOW SHARDS { - s: `SHOW STATS ON '192.167.1.44'`, - stmt: &influxql.ShowStatsStatement{ - Host: "192.167.1.44", - }, + s: `SHOW SHARDS`, + stmt: &influxql.ShowShardsStatement{}, }, // SHOW DIAGNOSTICS @@ -1210,6 +1433,12 @@ func TestParser_ParseStatement(t *testing.T) { s: `SHOW DIAGNOSTICS`, stmt: &influxql.ShowDiagnosticsStatement{}, }, + { + s: `SHOW DIAGNOSTICS FOR 'build'`, + stmt: &influxql.ShowDiagnosticsStatement{ + Module: "build", + }, + }, // Errors {s: ``, err: `found EOF, expected SELECT, DELETE, SHOW, CREATE, DROP, GRANT, REVOKE, ALTER, SET at line 1, char 1`}, @@ -1221,6 +1450,21 @@ func TestParser_ParseStatement(t *testing.T) { {s: `SELECT field1 FROM myseries GROUP`, err: `found EOF, expected BY at line 1, char 35`}, {s: `SELECT field1 FROM myseries LIMIT`, err: `found EOF, expected number at line 1, char 35`}, {s: `SELECT field1 FROM myseries LIMIT 10.5`, err: `fractional parts not allowed in LIMIT at line 1, char 35`}, + {s: `SELECT top() FROM myseries`, err: `invalid number of arguments for top, expected at least 2, got 0`}, + {s: `SELECT top(field1) FROM myseries`, err: `invalid number of arguments for top, expected at least 2, got 1`}, + {s: `SELECT top(field1,foo) FROM myseries`, err: `expected integer as last argument in top(), found foo`}, + {s: `SELECT top(field1,host,server,foo) FROM myseries`, err: `expected integer as last argument in top(), found foo`}, + {s: `SELECT top(field1,5,server,2) FROM myseries`, err: `only fields or tags are allowed in top(), found 5.000`}, + {s: `SELECT top(field1,max(foo),server,2) FROM myseries`, err: `only fields or tags are allowed in top(), found max(foo)`}, + {s: `SELECT bottom() FROM myseries`, err: `invalid number of arguments for bottom, expected at least 2, got 0`}, + {s: `SELECT bottom(field1) FROM myseries`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, + {s: `SELECT bottom(field1,foo) FROM myseries`, err: `expected integer as last argument in bottom(), found foo`}, + {s: `SELECT bottom(field1,host,server,foo) FROM myseries`, err: `expected integer as last argument in bottom(), found foo`}, + {s: `SELECT bottom(field1,5,server,2) FROM myseries`, err: `only fields or tags are allowed in bottom(), found 5.000`}, + {s: `SELECT bottom(field1,max(foo),server,2) FROM myseries`, err: `only fields or tags are allowed in bottom(), found max(foo)`}, + {s: `SELECT percentile() FROM myseries`, err: `invalid number of arguments for percentile, expected 2, got 0`}, + {s: `SELECT percentile(field1) FROM myseries`, err: `invalid number of arguments for percentile, expected 2, got 1`}, + {s: `SELECT percentile(field1, foo) FROM myseries`, err: `expected float argument in percentile()`}, {s: `SELECT field1 FROM myseries OFFSET`, err: `found EOF, expected number at line 1, char 36`}, {s: `SELECT field1 FROM myseries OFFSET 10.5`, err: `fractional parts not allowed in OFFSET at line 1, char 36`}, {s: `SELECT field1 FROM myseries ORDER`, err: `found EOF, expected BY at line 1, char 35`}, @@ -1228,19 +1472,20 @@ func TestParser_ParseStatement(t *testing.T) { {s: `SELECT field1 FROM myseries ORDER BY /`, err: `found /, expected identifier, ASC, DESC at line 1, char 38`}, {s: `SELECT field1 FROM myseries ORDER BY 1`, err: `found 1, expected identifier, ASC, DESC at line 1, char 38`}, {s: `SELECT field1 FROM myseries ORDER BY time ASC,`, err: `found EOF, expected identifier at line 1, char 47`}, - {s: `SELECT field1 FROM myseries ORDER BY DESC`, err: `only ORDER BY time ASC supported at this time`}, - {s: `SELECT field1 FROM myseries ORDER BY field1`, err: `only ORDER BY time ASC supported at this time`}, - {s: `SELECT field1 FROM myseries ORDER BY time DESC`, err: `only ORDER BY time ASC supported at this time`}, - {s: `SELECT field1 FROM myseries ORDER BY time, field1`, err: `only ORDER BY time ASC supported at this time`}, + {s: `SELECT field1 FROM myseries ORDER BY time, field1`, err: `only ORDER BY time supported at this time`}, {s: `SELECT field1 AS`, err: `found EOF, expected identifier at line 1, char 18`}, {s: `SELECT field1 FROM foo group by time(1s)`, err: `GROUP BY requires at least one aggregate function`}, {s: `SELECT count(value), value FROM foo`, err: `mixing aggregate and non-aggregate queries is not supported`}, {s: `SELECT count(value) FROM foo group by time(1s)`, err: `aggregate functions with GROUP BY time require a WHERE time clause`}, {s: `SELECT count(value) FROM foo group by time(1s) where host = 'hosta.influxdb.org'`, err: `aggregate functions with GROUP BY time require a WHERE time clause`}, + {s: `SELECT count(value) FROM foo group by time`, err: `time() is a function and expects at least one argument`}, + {s: `SELECT count(value) FROM foo group by 'time'`, err: `only time and tag dimensions allowed`}, + {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time()`, err: `time dimension expected one argument`}, + {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(b)`, err: `time dimension must have one duration argument`}, + {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(1s), time(2s)`, err: `multiple time dimensions not allowed`}, {s: `SELECT field1 FROM 12`, err: `found 12, expected identifier at line 1, char 20`}, {s: `SELECT 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 FROM myseries`, err: `unable to parse number at line 1, char 8`}, {s: `SELECT 10.5h FROM myseries`, err: `found h, expected FROM at line 1, char 12`}, - {s: `SELECT derivative(field1), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, {s: `SELECT distinct(field1), sum(field1) FROM myseries`, err: `aggregate function distinct() can not be combined with other functions or fields`}, {s: `SELECT distinct(field1), field2 FROM myseries`, err: `aggregate function distinct() can not be combined with other functions or fields`}, {s: `SELECT distinct(field1, field2) FROM myseries`, err: `distinct function can only have one argument`}, @@ -1251,15 +1496,18 @@ func TestParser_ParseStatement(t *testing.T) { {s: `SELECT count(distinct field1, field2) FROM myseries`, err: `count(distinct ) can only have one argument`}, {s: `select count(distinct(too, many, arguments)) from myseries`, err: `count(distinct ) can only have one argument`}, {s: `select count() from myseries`, err: `invalid number of arguments for count, expected 1, got 0`}, + {s: `SELECT derivative(), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, {s: `select derivative() from myseries`, err: `invalid number of arguments for derivative, expected at least 1 but no more than 2, got 0`}, {s: `select derivative(mean(value), 1h, 3) from myseries`, err: `invalid number of arguments for derivative, expected at least 1 but no more than 2, got 3`}, + {s: `SELECT derivative(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to derivative`}, + {s: `SELECT non_negative_derivative(), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, + {s: `select non_negative_derivative() from myseries`, err: `invalid number of arguments for non_negative_derivative, expected at least 1 but no more than 2, got 0`}, + {s: `select non_negative_derivative(mean(value), 1h, 3) from myseries`, err: `invalid number of arguments for non_negative_derivative, expected at least 1 but no more than 2, got 3`}, + {s: `SELECT non_negative_derivative(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to non_negative_derivative`}, {s: `SELECT field1 from myseries WHERE host =~ 'asd' LIMIT 1`, err: `found asd, expected regex at line 1, char 42`}, {s: `SELECT value > 2 FROM cpu`, err: `invalid operator > in SELECT clause at line 1, char 8; operator is intended for WHERE clause`}, {s: `SELECT value = 2 FROM cpu`, err: `invalid operator = in SELECT clause at line 1, char 8; operator is intended for WHERE clause`}, {s: `SELECT s =~ /foo/ FROM cpu`, err: `invalid operator =~ in SELECT clause at line 1, char 8; operator is intended for WHERE clause`}, - {s: `SELECT foo, * from cpu`, err: `wildcards can not be combined with other fields`}, - {s: `SELECT *, * from cpu`, err: `found ,, expected FROM at line 1, char 9`}, - {s: `SELECT *, foo from cpu`, err: `found ,, expected FROM at line 1, char 9`}, {s: `DELETE`, err: `found EOF, expected FROM at line 1, char 8`}, {s: `DELETE FROM`, err: `found EOF, expected identifier at line 1, char 13`}, {s: `DELETE FROM myseries WHERE`, err: `found EOF, expected identifier, string, number, bool at line 1, char 28`}, @@ -1273,8 +1521,9 @@ func TestParser_ParseStatement(t *testing.T) { {s: `SHOW RETENTION POLICIES`, err: `found EOF, expected ON at line 1, char 25`}, {s: `SHOW RETENTION POLICIES mydb`, err: `found mydb, expected ON at line 1, char 25`}, {s: `SHOW RETENTION POLICIES ON`, err: `found EOF, expected identifier at line 1, char 28`}, - {s: `SHOW FOO`, err: `found FOO, expected CONTINUOUS, DATABASES, FIELD, GRANTS, MEASUREMENTS, RETENTION, SERIES, SERVERS, TAG, USERS at line 1, char 6`}, - {s: `SHOW STATS ON`, err: `found EOF, expected string at line 1, char 15`}, + {s: `SHOW FOO`, err: `found FOO, expected CONTINUOUS, DATABASES, DIAGNOSTICS, FIELD, GRANTS, MEASUREMENTS, RETENTION, SERIES, SERVERS, SHARDS, STATS, TAG, USERS at line 1, char 6`}, + {s: `SHOW STATS FOR`, err: `found EOF, expected string at line 1, char 16`}, + {s: `SHOW DIAGNOSTICS FOR`, err: `found EOF, expected string at line 1, char 22`}, {s: `SHOW GRANTS`, err: `found EOF, expected FOR at line 1, char 13`}, {s: `SHOW GRANTS FOR`, err: `found EOF, expected identifier at line 1, char 17`}, {s: `DROP CONTINUOUS`, err: `found EOF, expected QUERY at line 1, char 17`}, @@ -1408,7 +1657,8 @@ func TestParser_ParseStatement(t *testing.T) { if !reflect.DeepEqual(tt.err, errstring(err)) { t.Errorf("%d. %q: error mismatch:\n exp=%s\n got=%s\n\n", i, tt.s, tt.err, err) } else if tt.err == "" && !reflect.DeepEqual(tt.stmt, stmt) { - t.Logf("\nexp=%s\ngot=%s\n", mustMarshalJSON(tt.stmt), mustMarshalJSON(stmt)) + t.Logf("\n# %s\nexp=%s\ngot=%s\n", tt.s, mustMarshalJSON(tt.stmt), mustMarshalJSON(stmt)) + t.Logf("\nSQL exp=%s\nSQL got=%s\n", tt.stmt.String(), stmt.String()) t.Errorf("%d. %q\n\nstmt mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.stmt, stmt) } } diff --git a/_third_party/github.com/influxdb/influxdb/influxql/result.go b/_third_party/github.com/influxdb/influxdb/influxql/result.go index a9a8cd561c..554f725987 100644 --- a/_third_party/github.com/influxdb/influxdb/influxql/result.go +++ b/_third_party/github.com/influxdb/influxdb/influxql/result.go @@ -3,8 +3,8 @@ package influxql import ( "encoding/json" "errors" - "hash/fnv" - "sort" + + "bosun.org/_third_party/github.com/influxdb/influxdb/models" ) // TagSet is a fundamental concept within the query system. It represents a composite series, @@ -22,66 +22,13 @@ func (t *TagSet) AddFilter(key string, filter Expr) { t.Filters = append(t.Filters, filter) } -// Row represents a single row returned from the execution of a statement. -type Row struct { - Name string `json:"name,omitempty"` - Tags map[string]string `json:"tags,omitempty"` - Columns []string `json:"columns,omitempty"` - Values [][]interface{} `json:"values,omitempty"` - Err error `json:"err,omitempty"` -} - -// SameSeries returns true if r contains values for the same series as o. -func (r *Row) SameSeries(o *Row) bool { - return r.tagsHash() == o.tagsHash() && r.Name == o.Name -} - -// tagsHash returns a hash of tag key/value pairs. -func (r *Row) tagsHash() uint64 { - h := fnv.New64a() - keys := r.tagsKeys() - for _, k := range keys { - h.Write([]byte(k)) - h.Write([]byte(r.Tags[k])) - } - return h.Sum64() -} - -// tagKeys returns a sorted list of tag keys. -func (r *Row) tagsKeys() []string { - a := make([]string, 0, len(r.Tags)) - for k := range r.Tags { - a = append(a, k) - } - sort.Strings(a) - return a -} - // Rows represents a list of rows that can be sorted consistently by name/tag. -type Rows []*Row - -func (p Rows) Len() int { return len(p) } - -func (p Rows) Less(i, j int) bool { - // Sort by name first. - if p[i].Name != p[j].Name { - return p[i].Name < p[j].Name - } - - // Sort by tag set hash. Tags don't have a meaningful sort order so we - // just compute a hash and sort by that instead. This allows the tests - // to receive rows in a predictable order every time. - return p[i].tagsHash() < p[j].tagsHash() -} - -func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - // Result represents a resultset returned from a single statement. type Result struct { // StatementID is just the statement's position in the query. It's used // to combine statement results if they're being buffered in memory. StatementID int `json:"-"` - Series Rows + Series models.Rows Err error } @@ -89,8 +36,8 @@ type Result struct { func (r *Result) MarshalJSON() ([]byte, error) { // Define a struct that outputs "error" as a string. var o struct { - Series []*Row `json:"series,omitempty"` - Err string `json:"error,omitempty"` + Series []*models.Row `json:"series,omitempty"` + Err string `json:"error,omitempty"` } // Copy fields to output struct. @@ -105,8 +52,8 @@ func (r *Result) MarshalJSON() ([]byte, error) { // UnmarshalJSON decodes the data into the Result struct func (r *Result) UnmarshalJSON(b []byte) error { var o struct { - Series []*Row `json:"series,omitempty"` - Err string `json:"error,omitempty"` + Series []*models.Row `json:"series,omitempty"` + Err string `json:"error,omitempty"` } err := json.Unmarshal(b, &o) diff --git a/_third_party/github.com/influxdb/influxdb/influxql/scanner.go b/_third_party/github.com/influxdb/influxdb/influxql/scanner.go index c6dab019c8..d071c85717 100644 --- a/_third_party/github.com/influxdb/influxdb/influxql/scanner.go +++ b/_third_party/github.com/influxdb/influxdb/influxql/scanner.go @@ -95,6 +95,8 @@ func (s *Scanner) Scan() (tok Token, pos Pos, lit string) { return COMMA, pos, "" case ';': return SEMICOLON, pos, "" + case ':': + return COLON, pos, "" } return ILLEGAL, pos, string(ch0) diff --git a/_third_party/github.com/influxdb/influxdb/influxql/scanner_test.go b/_third_party/github.com/influxdb/influxdb/influxql/scanner_test.go index fbe85ba7e3..17c0676a45 100644 --- a/_third_party/github.com/influxdb/influxdb/influxql/scanner_test.go +++ b/_third_party/github.com/influxdb/influxdb/influxql/scanner_test.go @@ -136,6 +136,7 @@ func TestScanner_Scan(t *testing.T) { {s: `KEYS`, tok: influxql.KEYS}, {s: `LIMIT`, tok: influxql.LIMIT}, {s: `SHOW`, tok: influxql.SHOW}, + {s: `SHARDS`, tok: influxql.SHARDS}, {s: `MEASUREMENT`, tok: influxql.MEASUREMENT}, {s: `MEASUREMENTS`, tok: influxql.MEASUREMENTS}, {s: `NOT`, tok: influxql.NOT}, diff --git a/_third_party/github.com/influxdb/influxdb/influxql/token.go b/_third_party/github.com/influxdb/influxdb/influxql/token.go index ef5a473af6..795c7b169e 100644 --- a/_third_party/github.com/influxdb/influxdb/influxql/token.go +++ b/_third_party/github.com/influxdb/influxdb/influxql/token.go @@ -50,6 +50,7 @@ const ( LPAREN // ( RPAREN // ) COMMA // , + COLON // : SEMICOLON // ; DOT // . @@ -110,6 +111,7 @@ const ( SERVERS SET SHOW + SHARDS SLIMIT STATS DIAGNOSTICS @@ -160,6 +162,7 @@ var tokens = [...]string{ LPAREN: "(", RPAREN: ")", COMMA: ",", + COLON: ":", SEMICOLON: ";", DOT: ".", @@ -218,6 +221,7 @@ var tokens = [...]string{ SERVERS: "SERVERS", SET: "SET", SHOW: "SHOW", + SHARDS: "SHARDS", SLIMIT: "SLIMIT", SOFFSET: "SOFFSET", STATS: "STATS", diff --git a/_third_party/github.com/influxdb/influxdb/influxvar.go b/_third_party/github.com/influxdb/influxdb/influxvar.go new file mode 100644 index 0000000000..58455515ac --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/influxvar.go @@ -0,0 +1,45 @@ +package influxdb + +import ( + "expvar" + "sync" +) + +var expvarMu sync.Mutex + +// NewStatistics returns an expvar-based map with the given key. Within that map +// is another map. Within there "name" is the Measurement name, "tags" are the tags, +// and values are placed at the key "values". +func NewStatistics(key, name string, tags map[string]string) *expvar.Map { + expvarMu.Lock() + defer expvarMu.Unlock() + + // Add expvar for this service. + var v expvar.Var + if v = expvar.Get(key); v == nil { + v = expvar.NewMap(key) + } + m := v.(*expvar.Map) + + // Set the name + nameVar := &expvar.String{} + nameVar.Set(name) + m.Set("name", nameVar) + + // Set the tags + tagsVar := &expvar.Map{} + tagsVar.Init() + for k, v := range tags { + value := &expvar.String{} + value.Set(v) + tagsVar.Set(k, value) + } + m.Set("tags", tagsVar) + + // Create and set the values entry used for actual stats. + statMap := &expvar.Map{} + statMap.Init() + m.Set("values", statMap) + + return statMap +} diff --git a/_third_party/github.com/influxdb/influxdb/meta/data.go b/_third_party/github.com/influxdb/influxdb/meta/data.go index 1a880ee0d1..31ac1c46e7 100644 --- a/_third_party/github.com/influxdb/influxdb/meta/data.go +++ b/_third_party/github.com/influxdb/influxdb/meta/data.go @@ -132,7 +132,7 @@ func (data *Data) RetentionPolicy(database, name string) (*RetentionPolicyInfo, return &di.RetentionPolicies[i], nil } } - return nil, ErrRetentionPolicyNotFound + return nil, nil } // CreateRetentionPolicy creates a new retention policy on a database. @@ -172,6 +172,11 @@ func (data *Data) DropRetentionPolicy(database, name string) error { return ErrDatabaseNotFound } + // Prohibit dropping the default retention policy. + if di.DefaultRetentionPolicy == name { + return ErrRetentionPolicyDefault + } + // Remove from list. for i := range di.RetentionPolicies { if di.RetentionPolicies[i].Name == name { @@ -273,7 +278,6 @@ func (data *Data) ShardGroupsByTimeRange(database, policy string, tmin, tmax tim } groups = append(groups, g) } - sort.Sort(ShardGroupInfos(groups)) return groups, nil } @@ -344,13 +348,16 @@ func (data *Data) CreateShardGroup(database, policy string, timestamp time.Time) si := &sgi.Shards[i] for j := 0; j < replicaN; j++ { nodeID := data.Nodes[nodeIndex%len(data.Nodes)].ID - si.OwnerIDs = append(si.OwnerIDs, nodeID) + si.Owners = append(si.Owners, ShardOwner{NodeID: nodeID}) nodeIndex++ } } - // Retention policy has a new shard group, so update the policy. + // Retention policy has a new shard group, so update the policy. Shard + // Groups must be stored in sorted order, as other parts of the system + // assume this to be the case. rpi.ShardGroups = append(rpi.ShardGroups, sgi) + sort.Sort(ShardGroupInfos(rpi.ShardGroups)) return nil } @@ -942,14 +949,14 @@ func (sgi *ShardGroupInfo) unmarshal(pb *internal.ShardGroupInfo) { // ShardInfo represents metadata about a shard. type ShardInfo struct { - ID uint64 - OwnerIDs []uint64 + ID uint64 + Owners []ShardOwner } // OwnedBy returns whether the shard's owner IDs includes nodeID. func (si ShardInfo) OwnedBy(nodeID uint64) bool { - for _, id := range si.OwnerIDs { - if id == nodeID { + for _, so := range si.Owners { + if so.NodeID == nodeID { return true } } @@ -960,9 +967,11 @@ func (si ShardInfo) OwnedBy(nodeID uint64) bool { func (si ShardInfo) clone() ShardInfo { other := si - if si.OwnerIDs != nil { - other.OwnerIDs = make([]uint64, len(si.OwnerIDs)) - copy(other.OwnerIDs, si.OwnerIDs) + if si.Owners != nil { + other.Owners = make([]ShardOwner, len(si.Owners)) + for i := range si.Owners { + other.Owners[i] = si.Owners[i].clone() + } } return other @@ -974,17 +983,64 @@ func (si ShardInfo) marshal() *internal.ShardInfo { ID: proto.Uint64(si.ID), } - pb.OwnerIDs = make([]uint64, len(si.OwnerIDs)) - copy(pb.OwnerIDs, si.OwnerIDs) + pb.Owners = make([]*internal.ShardOwner, len(si.Owners)) + for i := range si.Owners { + pb.Owners[i] = si.Owners[i].marshal() + } return pb } +// UnmarshalBinary decodes the object from a binary format. +func (si *ShardInfo) UnmarshalBinary(buf []byte) error { + var pb internal.ShardInfo + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + si.unmarshal(&pb) + return nil +} + // unmarshal deserializes from a protobuf representation. func (si *ShardInfo) unmarshal(pb *internal.ShardInfo) { si.ID = pb.GetID() - si.OwnerIDs = make([]uint64, len(pb.GetOwnerIDs())) - copy(si.OwnerIDs, pb.GetOwnerIDs()) + + // If deprecated "OwnerIDs" exists then convert it to "Owners" format. + if len(pb.GetOwnerIDs()) > 0 { + si.Owners = make([]ShardOwner, len(pb.GetOwnerIDs())) + for i, x := range pb.GetOwnerIDs() { + si.Owners[i].unmarshal(&internal.ShardOwner{ + NodeID: proto.Uint64(x), + }) + } + } else if len(pb.GetOwners()) > 0 { + si.Owners = make([]ShardOwner, len(pb.GetOwners())) + for i, x := range pb.GetOwners() { + si.Owners[i].unmarshal(x) + } + } +} + +// ShardOwner represents a node that owns a shard. +type ShardOwner struct { + NodeID uint64 +} + +// clone returns a deep copy of so. +func (so ShardOwner) clone() ShardOwner { + return so +} + +// marshal serializes to a protobuf representation. +func (so ShardOwner) marshal() *internal.ShardOwner { + return &internal.ShardOwner{ + NodeID: proto.Uint64(so.NodeID), + } +} + +// unmarshal deserializes from a protobuf representation. +func (so *ShardOwner) unmarshal(pb *internal.ShardOwner) { + so.NodeID = pb.GetNodeID() } // ContinuousQueryInfo represents metadata about a continuous query. diff --git a/_third_party/github.com/influxdb/influxdb/meta/data_test.go b/_third_party/github.com/influxdb/influxdb/meta/data_test.go index cccc91b223..91a99fd553 100644 --- a/_third_party/github.com/influxdb/influxdb/meta/data_test.go +++ b/_third_party/github.com/influxdb/influxdb/meta/data_test.go @@ -9,8 +9,10 @@ import ( "time" "bosun.org/_third_party/github.com/davecgh/go-spew/spew" + "bosun.org/_third_party/github.com/gogo/protobuf/proto" "bosun.org/_third_party/github.com/influxdb/influxdb/influxql" "bosun.org/_third_party/github.com/influxdb/influxdb/meta" + "bosun.org/_third_party/github.com/influxdb/influxdb/meta/internal" ) // Ensure a node can be created. @@ -299,7 +301,13 @@ func TestData_CreateShardGroup(t *testing.T) { StartTime: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), EndTime: time.Date(2000, time.January, 1, 1, 0, 0, 0, time.UTC), Shards: []meta.ShardInfo{ - {ID: 1, OwnerIDs: []uint64{1, 2}}, + { + ID: 1, + Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + }, + }, }, }) { t.Fatalf("unexpected shard group: %#v", sgi) @@ -570,8 +578,12 @@ func TestData_Clone(t *testing.T) { EndTime: time.Date(2000, time.February, 1, 0, 0, 0, 0, time.UTC), Shards: []meta.ShardInfo{ { - ID: 200, - OwnerIDs: []uint64{1, 3, 4}, + ID: 200, + Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 3}, + {NodeID: 4}, + }, }, }, }, @@ -605,8 +617,8 @@ func TestData_Clone(t *testing.T) { } // Ensure that changing data in the clone does not affect the original. - other.Databases[0].RetentionPolicies[0].ShardGroups[0].Shards[0].OwnerIDs[1] = 9 - if v := data.Databases[0].RetentionPolicies[0].ShardGroups[0].Shards[0].OwnerIDs[1]; v != 3 { + other.Databases[0].RetentionPolicies[0].ShardGroups[0].Shards[0].Owners[1].NodeID = 9 + if v := data.Databases[0].RetentionPolicies[0].ShardGroups[0].Shards[0].Owners[1].NodeID; v != 3 { t.Fatalf("editing clone changed original: %v", v) } } @@ -637,8 +649,12 @@ func TestData_MarshalBinary(t *testing.T) { EndTime: time.Date(2000, time.February, 1, 0, 0, 0, 0, time.UTC), Shards: []meta.ShardInfo{ { - ID: 200, - OwnerIDs: []uint64{1, 3, 4}, + ID: 200, + Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 3}, + {NodeID: 4}, + }, }, }, }, @@ -682,3 +698,33 @@ func TestData_MarshalBinary(t *testing.T) { t.Fatalf("unexpected users: %#v", other.Users) } } + +// Ensure shards with deprecated "OwnerIDs" can be decoded. +func TestShardInfo_UnmarshalBinary_OwnerIDs(t *testing.T) { + // Encode deprecated form to bytes. + buf, err := proto.Marshal(&internal.ShardInfo{ + ID: proto.Uint64(1), + OwnerIDs: []uint64{10, 20, 30}, + }) + if err != nil { + t.Fatal(err) + } + + // Decode deprecated form. + var si meta.ShardInfo + if err := si.UnmarshalBinary(buf); err != nil { + t.Fatal(err) + } + + // Verify data is migrated correctly. + if !reflect.DeepEqual(si, meta.ShardInfo{ + ID: 1, + Owners: []meta.ShardOwner{ + {NodeID: 10}, + {NodeID: 20}, + {NodeID: 30}, + }, + }) { + t.Fatalf("unexpected shard info: %s", spew.Sdump(si)) + } +} diff --git a/_third_party/github.com/influxdb/influxdb/meta/errors.go b/_third_party/github.com/influxdb/influxdb/meta/errors.go index 44c54f964e..6c726af634 100644 --- a/_third_party/github.com/influxdb/influxdb/meta/errors.go +++ b/_third_party/github.com/influxdb/influxdb/meta/errors.go @@ -7,102 +7,100 @@ import ( var ( // ErrStoreOpen is returned when opening an already open store. - ErrStoreOpen = errors.New("store already open") + ErrStoreOpen = newError("store already open") // ErrStoreClosed is returned when closing an already closed store. - ErrStoreClosed = errors.New("raft store already closed") + ErrStoreClosed = newError("raft store already closed") // ErrTooManyPeers is returned when more than 3 peers are used. - ErrTooManyPeers = errors.New("too many peers; influxdb v0.9.0 is limited to 3 nodes in a cluster") + ErrTooManyPeers = newError("too many peers; influxdb v0.9.0 is limited to 3 nodes in a cluster") ) var ( // ErrNodeExists is returned when creating an already existing node. - ErrNodeExists = errors.New("node already exists") + ErrNodeExists = newError("node already exists") // ErrNodeNotFound is returned when mutating a node that doesn't exist. - ErrNodeNotFound = errors.New("node not found") + ErrNodeNotFound = newError("node not found") // ErrNodesRequired is returned when at least one node is required for an operation. // This occurs when creating a shard group. - ErrNodesRequired = errors.New("at least one node required") + ErrNodesRequired = newError("at least one node required") ) var ( // ErrDatabaseExists is returned when creating an already existing database. - ErrDatabaseExists = errors.New("database already exists") + ErrDatabaseExists = newError("database already exists") // ErrDatabaseNotFound is returned when mutating a database that doesn't exist. - ErrDatabaseNotFound = errors.New("database not found") + ErrDatabaseNotFound = newError("database not found") // ErrDatabaseNameRequired is returned when creating a database without a name. - ErrDatabaseNameRequired = errors.New("database name required") + ErrDatabaseNameRequired = newError("database name required") ) var ( // ErrRetentionPolicyExists is returned when creating an already existing policy. - ErrRetentionPolicyExists = errors.New("retention policy already exists") + ErrRetentionPolicyExists = newError("retention policy already exists") + + // ErrRetentionPolicyDefault is returned when attempting a prohibited operation + // on a default retention policy. + ErrRetentionPolicyDefault = newError("retention policy is default") // ErrRetentionPolicyNotFound is returned when mutating a policy that doesn't exist. - ErrRetentionPolicyNotFound = errors.New("retention policy not found") + ErrRetentionPolicyNotFound = newError("retention policy not found") // ErrRetentionPolicyNameRequired is returned when creating a policy without a name. - ErrRetentionPolicyNameRequired = errors.New("retention policy name required") + ErrRetentionPolicyNameRequired = newError("retention policy name required") // ErrRetentionPolicyNameExists is returned when renaming a policy to // the same name as another existing policy. - ErrRetentionPolicyNameExists = errors.New("retention policy name already exists") + ErrRetentionPolicyNameExists = newError("retention policy name already exists") // ErrRetentionPolicyDurationTooLow is returned when updating a retention // policy that has a duration lower than the allowed minimum. - ErrRetentionPolicyDurationTooLow = errors.New(fmt.Sprintf("retention policy duration must be at least %s", + ErrRetentionPolicyDurationTooLow = newError(fmt.Sprintf("retention policy duration must be at least %s", RetentionPolicyMinDuration)) // ErrReplicationFactorTooLow is returned when the replication factor is not in an // acceptable range. - ErrReplicationFactorTooLow = errors.New("replication factor must be greater than 0") + ErrReplicationFactorTooLow = newError("replication factor must be greater than 0") ) var ( // ErrShardGroupExists is returned when creating an already existing shard group. - ErrShardGroupExists = errors.New("shard group already exists") + ErrShardGroupExists = newError("shard group already exists") // ErrShardGroupNotFound is returned when mutating a shard group that doesn't exist. - ErrShardGroupNotFound = errors.New("shard group not found") + ErrShardGroupNotFound = newError("shard group not found") ) var ( // ErrContinuousQueryExists is returned when creating an already existing continuous query. - ErrContinuousQueryExists = errors.New("continuous query already exists") + ErrContinuousQueryExists = newError("continuous query already exists") // ErrContinuousQueryNotFound is returned when removing a continuous query that doesn't exist. - ErrContinuousQueryNotFound = errors.New("continuous query not found") + ErrContinuousQueryNotFound = newError("continuous query not found") ) var ( // ErrUserExists is returned when creating an already existing user. - ErrUserExists = errors.New("user already exists") + ErrUserExists = newError("user already exists") // ErrUserNotFound is returned when mutating a user that doesn't exist. - ErrUserNotFound = errors.New("user not found") + ErrUserNotFound = newError("user not found") // ErrUsernameRequired is returned when creating a user without a username. - ErrUsernameRequired = errors.New("username required") + ErrUsernameRequired = newError("username required") ) -var errs = [...]error{ - ErrStoreOpen, ErrStoreClosed, - ErrNodeExists, ErrNodeNotFound, - ErrDatabaseExists, ErrDatabaseNotFound, ErrDatabaseNameRequired, -} - // errLookup stores a mapping of error strings to well defined error types. var errLookup = make(map[string]error) -func init() { - for _, err := range errs { - errLookup[err.Error()] = err - } +func newError(msg string) error { + err := errors.New(msg) + errLookup[err.Error()] = err + return err } // lookupError returns a known error reference, if one exists. diff --git a/_third_party/github.com/influxdb/influxdb/meta/internal/meta.pb.go b/_third_party/github.com/influxdb/influxdb/meta/internal/meta.pb.go index 06ceb395d7..ef3b80c8c0 100644 --- a/_third_party/github.com/influxdb/influxdb/meta/internal/meta.pb.go +++ b/_third_party/github.com/influxdb/influxdb/meta/internal/meta.pb.go @@ -15,6 +15,7 @@ It has these top-level messages: RetentionPolicyInfo ShardGroupInfo ShardInfo + ShardOwner ContinuousQueryInfo UserInfo UserPrivilege @@ -416,9 +417,10 @@ func (m *ShardGroupInfo) GetShards() []*ShardInfo { } type ShardInfo struct { - ID *uint64 `protobuf:"varint,1,req" json:"ID,omitempty"` - OwnerIDs []uint64 `protobuf:"varint,2,rep" json:"OwnerIDs,omitempty"` - XXX_unrecognized []byte `json:"-"` + ID *uint64 `protobuf:"varint,1,req" json:"ID,omitempty"` + OwnerIDs []uint64 `protobuf:"varint,2,rep" json:"OwnerIDs,omitempty"` + Owners []*ShardOwner `protobuf:"bytes,3,rep" json:"Owners,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *ShardInfo) Reset() { *m = ShardInfo{} } @@ -439,6 +441,29 @@ func (m *ShardInfo) GetOwnerIDs() []uint64 { return nil } +func (m *ShardInfo) GetOwners() []*ShardOwner { + if m != nil { + return m.Owners + } + return nil +} + +type ShardOwner struct { + NodeID *uint64 `protobuf:"varint,1,req" json:"NodeID,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ShardOwner) Reset() { *m = ShardOwner{} } +func (m *ShardOwner) String() string { return proto.CompactTextString(m) } +func (*ShardOwner) ProtoMessage() {} + +func (m *ShardOwner) GetNodeID() uint64 { + if m != nil && m.NodeID != nil { + return *m.NodeID + } + return 0 +} + type ContinuousQueryInfo struct { Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"` Query *string `protobuf:"bytes,2,req" json:"Query,omitempty"` diff --git a/_third_party/github.com/influxdb/influxdb/meta/internal/meta.proto b/_third_party/github.com/influxdb/influxdb/meta/internal/meta.proto index 2aa50244b1..3111472257 100644 --- a/_third_party/github.com/influxdb/influxdb/meta/internal/meta.proto +++ b/_third_party/github.com/influxdb/influxdb/meta/internal/meta.proto @@ -49,8 +49,13 @@ message ShardGroupInfo { } message ShardInfo { - required uint64 ID = 1; - repeated uint64 OwnerIDs = 2; + required uint64 ID = 1; + repeated uint64 OwnerIDs = 2 [deprecated=true]; + repeated ShardOwner Owners = 3; +} + +message ShardOwner { + required uint64 NodeID = 1; } message ContinuousQueryInfo { diff --git a/_third_party/github.com/influxdb/influxdb/meta/rpc_test.go b/_third_party/github.com/influxdb/influxdb/meta/rpc_test.go index 3f60c6bd05..40f3540aff 100644 --- a/_third_party/github.com/influxdb/influxdb/meta/rpc_test.go +++ b/_third_party/github.com/influxdb/influxdb/meta/rpc_test.go @@ -122,7 +122,7 @@ func TestRPCFetchDataMatchesBlocking(t *testing.T) { // Simulate the rmote index changing and unblocking fs.mu.Lock() - fs.md.Index = 100 + fs.md = &Data{Index: 100} fs.mu.Unlock() close(fs.blockChan) wg.Wait() diff --git a/_third_party/github.com/influxdb/influxdb/meta/statement_executor.go b/_third_party/github.com/influxdb/influxdb/meta/statement_executor.go index 359ed29cdb..5d21dfc65a 100644 --- a/_third_party/github.com/influxdb/influxdb/meta/statement_executor.go +++ b/_third_party/github.com/influxdb/influxdb/meta/statement_executor.go @@ -1,9 +1,13 @@ package meta import ( + "bytes" "fmt" + "strconv" + "time" "bosun.org/_third_party/github.com/influxdb/influxdb/influxql" + "bosun.org/_third_party/github.com/influxdb/influxdb/models" ) // StatementExecutor translates InfluxQL queries to meta store methods. @@ -80,6 +84,8 @@ func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement) *influxql. return e.executeDropContinuousQueryStatement(stmt) case *influxql.ShowContinuousQueriesStatement: return e.executeShowContinuousQueriesStatement(stmt) + case *influxql.ShowShardsStatement: + return e.executeShowShardsStatement(stmt) case *influxql.ShowStatsStatement: return e.executeShowStatsStatement(stmt) default: @@ -105,11 +111,11 @@ func (e *StatementExecutor) executeShowDatabasesStatement(q *influxql.ShowDataba return &influxql.Result{Err: err} } - row := &influxql.Row{Name: "databases", Columns: []string{"name"}} + row := &models.Row{Name: "databases", Columns: []string{"name"}} for _, di := range dis { row.Values = append(row.Values, []interface{}{di.Name}) } - return &influxql.Result{Series: []*influxql.Row{row}} + return &influxql.Result{Series: []*models.Row{row}} } func (e *StatementExecutor) executeShowGrantsForUserStatement(q *influxql.ShowGrantsForUserStatement) *influxql.Result { @@ -118,11 +124,11 @@ func (e *StatementExecutor) executeShowGrantsForUserStatement(q *influxql.ShowGr return &influxql.Result{Err: err} } - row := &influxql.Row{Columns: []string{"database", "privilege"}} + row := &models.Row{Columns: []string{"database", "privilege"}} for d, p := range priv { row.Values = append(row.Values, []interface{}{d, p.String()}) } - return &influxql.Result{Series: []*influxql.Row{row}} + return &influxql.Result{Series: []*models.Row{row}} } func (e *StatementExecutor) executeShowServersStatement(q *influxql.ShowServersStatement) *influxql.Result { @@ -136,11 +142,11 @@ func (e *StatementExecutor) executeShowServersStatement(q *influxql.ShowServersS return &influxql.Result{Err: err} } - row := &influxql.Row{Columns: []string{"id", "cluster_addr", "raft"}} + row := &models.Row{Columns: []string{"id", "cluster_addr", "raft"}} for _, ni := range nis { row.Values = append(row.Values, []interface{}{ni.ID, ni.Host, contains(peers, ni.Host)}) } - return &influxql.Result{Series: []*influxql.Row{row}} + return &influxql.Result{Series: []*models.Row{row}} } func (e *StatementExecutor) executeCreateUserStatement(q *influxql.CreateUserStatement) *influxql.Result { @@ -162,11 +168,11 @@ func (e *StatementExecutor) executeShowUsersStatement(q *influxql.ShowUsersState return &influxql.Result{Err: err} } - row := &influxql.Row{Columns: []string{"user", "admin"}} + row := &models.Row{Columns: []string{"user", "admin"}} for _, ui := range uis { row.Values = append(row.Values, []interface{}{ui.Name, ui.Admin}) } - return &influxql.Result{Series: []*influxql.Row{row}} + return &influxql.Result{Series: []*models.Row{row}} } func (e *StatementExecutor) executeGrantStatement(stmt *influxql.GrantStatement) *influxql.Result { @@ -248,11 +254,11 @@ func (e *StatementExecutor) executeShowRetentionPoliciesStatement(q *influxql.Sh return &influxql.Result{Err: ErrDatabaseNotFound} } - row := &influxql.Row{Columns: []string{"name", "duration", "replicaN", "default"}} + row := &models.Row{Columns: []string{"name", "duration", "replicaN", "default"}} for _, rpi := range di.RetentionPolicies { row.Values = append(row.Values, []interface{}{rpi.Name, rpi.Duration.String(), rpi.ReplicaN, di.DefaultRetentionPolicy == rpi.Name}) } - return &influxql.Result{Series: []*influxql.Row{row}} + return &influxql.Result{Series: []*models.Row{row}} } func (e *StatementExecutor) executeCreateContinuousQueryStatement(q *influxql.CreateContinuousQueryStatement) *influxql.Result { @@ -273,9 +279,9 @@ func (e *StatementExecutor) executeShowContinuousQueriesStatement(stmt *influxql return &influxql.Result{Err: err} } - rows := []*influxql.Row{} + rows := []*models.Row{} for _, di := range dis { - row := &influxql.Row{Columns: []string{"name", "query"}, Name: di.Name} + row := &models.Row{Columns: []string{"name", "query"}, Name: di.Name} for _, cqi := range di.ContinuousQueries { row.Values = append(row.Values, []interface{}{cqi.Name, cqi.Query}) } @@ -284,6 +290,50 @@ func (e *StatementExecutor) executeShowContinuousQueriesStatement(stmt *influxql return &influxql.Result{Series: rows} } +func (e *StatementExecutor) executeShowShardsStatement(stmt *influxql.ShowShardsStatement) *influxql.Result { + dis, err := e.Store.Databases() + if err != nil { + return &influxql.Result{Err: err} + } + + rows := []*models.Row{} + for _, di := range dis { + row := &models.Row{Columns: []string{"id", "start_time", "end_time", "expiry_time", "owners"}, Name: di.Name} + for _, rpi := range di.RetentionPolicies { + for _, sgi := range rpi.ShardGroups { + for _, si := range sgi.Shards { + ownerIDs := make([]uint64, len(si.Owners)) + for i, owner := range si.Owners { + ownerIDs[i] = owner.NodeID + } + + row.Values = append(row.Values, []interface{}{ + si.ID, + sgi.StartTime.UTC().Format(time.RFC3339), + sgi.EndTime.UTC().Format(time.RFC3339), + sgi.EndTime.Add(rpi.Duration).UTC().Format(time.RFC3339), + joinUint64(ownerIDs), + }) + } + } + } + rows = append(rows, row) + } + return &influxql.Result{Series: rows} +} + func (e *StatementExecutor) executeShowStatsStatement(stmt *influxql.ShowStatsStatement) *influxql.Result { return &influxql.Result{Err: fmt.Errorf("SHOW STATS is not implemented yet")} } + +// joinUint64 returns a comma-delimited string of uint64 numbers. +func joinUint64(a []uint64) string { + var buf bytes.Buffer + for i, x := range a { + buf.WriteString(strconv.FormatUint(x, 10)) + if i < len(a)-1 { + buf.WriteRune(',') + } + } + return buf.String() +} diff --git a/_third_party/github.com/influxdb/influxdb/meta/statement_executor_test.go b/_third_party/github.com/influxdb/influxdb/meta/statement_executor_test.go index 0d6c001164..c2589c206f 100644 --- a/_third_party/github.com/influxdb/influxdb/meta/statement_executor_test.go +++ b/_third_party/github.com/influxdb/influxdb/meta/statement_executor_test.go @@ -9,6 +9,7 @@ import ( "bosun.org/_third_party/github.com/davecgh/go-spew/spew" "bosun.org/_third_party/github.com/influxdb/influxdb/influxql" "bosun.org/_third_party/github.com/influxdb/influxdb/meta" + "bosun.org/_third_party/github.com/influxdb/influxdb/models" ) // Ensure a CREATE DATABASE statement can be executed. @@ -57,7 +58,7 @@ func TestStatementExecutor_ExecuteStatement_ShowDatabases(t *testing.T) { if res := e.ExecuteStatement(influxql.MustParseStatement(`SHOW DATABASES`)); res.Err != nil { t.Fatal(res.Err) - } else if !reflect.DeepEqual(res.Series, influxql.Rows{ + } else if !reflect.DeepEqual(res.Series, models.Rows{ { Name: "databases", Columns: []string{"name"}, @@ -99,7 +100,7 @@ func TestStatementExecutor_ExecuteStatement_ShowGrantsFor(t *testing.T) { if res := e.ExecuteStatement(influxql.MustParseStatement(`SHOW GRANTS FOR dejan`)); res.Err != nil { t.Fatal(res.Err) - } else if !reflect.DeepEqual(res.Series, influxql.Rows{ + } else if !reflect.DeepEqual(res.Series, models.Rows{ { Columns: []string{"database", "privilege"}, Values: [][]interface{}{ @@ -127,7 +128,7 @@ func TestStatementExecutor_ExecuteStatement_ShowServers(t *testing.T) { if res := e.ExecuteStatement(influxql.MustParseStatement(`SHOW SERVERS`)); res.Err != nil { t.Fatal(res.Err) - } else if !reflect.DeepEqual(res.Series, influxql.Rows{ + } else if !reflect.DeepEqual(res.Series, models.Rows{ { Columns: []string{"id", "cluster_addr", "raft"}, Values: [][]interface{}{ @@ -257,7 +258,7 @@ func TestStatementExecutor_ExecuteStatement_ShowUsers(t *testing.T) { if res := e.ExecuteStatement(influxql.MustParseStatement(`SHOW USERS`)); res.Err != nil { t.Fatal(res.Err) - } else if !reflect.DeepEqual(res.Series, influxql.Rows{ + } else if !reflect.DeepEqual(res.Series, models.Rows{ { Columns: []string{"user", "admin"}, Values: [][]interface{}{ @@ -580,7 +581,7 @@ func TestStatementExecutor_ExecuteStatement_ShowRetentionPolicies(t *testing.T) if res := e.ExecuteStatement(influxql.MustParseStatement(`SHOW RETENTION POLICIES ON db0`)); res.Err != nil { t.Fatal(res.Err) - } else if !reflect.DeepEqual(res.Series, influxql.Rows{ + } else if !reflect.DeepEqual(res.Series, models.Rows{ { Columns: []string{"name", "duration", "replicaN", "default"}, Values: [][]interface{}{ @@ -625,13 +626,13 @@ func TestStatementExecutor_ExecuteStatement_CreateContinuousQuery(t *testing.T) t.Fatalf("unexpected database: %s", database) } else if name != "cq0" { t.Fatalf("unexpected name: %s", name) - } else if query != `CREATE CONTINUOUS QUERY cq0 ON db0 BEGIN SELECT count(*) INTO db1 FROM db0 GROUP BY time(1h) END` { + } else if query != `CREATE CONTINUOUS QUERY cq0 ON db0 BEGIN SELECT count(field1) INTO db1 FROM db0 GROUP BY time(1h) END` { t.Fatalf("unexpected query: %s", query) } return nil } - stmt := influxql.MustParseStatement(`CREATE CONTINUOUS QUERY cq0 ON db0 BEGIN SELECT count(*) INTO db1 FROM db0 GROUP BY time(1h) END`) + stmt := influxql.MustParseStatement(`CREATE CONTINUOUS QUERY cq0 ON db0 BEGIN SELECT count(field1) INTO db1 FROM db0 GROUP BY time(1h) END`) if res := e.ExecuteStatement(stmt); res.Err != nil { t.Fatal(res.Err) } else if res.Series != nil { @@ -646,7 +647,7 @@ func TestStatementExecutor_ExecuteStatement_CreateContinuousQuery_Err(t *testing return errors.New("marker") } - stmt := influxql.MustParseStatement(`CREATE CONTINUOUS QUERY cq0 ON db0 BEGIN SELECT count(*) INTO db1 FROM db0 GROUP BY time(1h) END`) + stmt := influxql.MustParseStatement(`CREATE CONTINUOUS QUERY cq0 ON db0 BEGIN SELECT count(field1) INTO db1 FROM db0 GROUP BY time(1h) END`) if res := e.ExecuteStatement(stmt); res.Err == nil || res.Err.Error() != "marker" { t.Fatalf("unexpected error: %s", res.Err) } @@ -693,14 +694,14 @@ func TestStatementExecutor_ExecuteStatement_ShowContinuousQueries(t *testing.T) { Name: "db0", ContinuousQueries: []meta.ContinuousQueryInfo{ - {Name: "cq0", Query: "SELECT count(*) INTO db1 FROM db0"}, - {Name: "cq1", Query: "SELECT count(*) INTO db2 FROM db0"}, + {Name: "cq0", Query: "SELECT count(field1) INTO db1 FROM db0"}, + {Name: "cq1", Query: "SELECT count(field1) INTO db2 FROM db0"}, }, }, { Name: "db1", ContinuousQueries: []meta.ContinuousQueryInfo{ - {Name: "cq2", Query: "SELECT count(*) INTO db3 FROM db1"}, + {Name: "cq2", Query: "SELECT count(field1) INTO db3 FROM db1"}, }, }, }, nil @@ -709,20 +710,20 @@ func TestStatementExecutor_ExecuteStatement_ShowContinuousQueries(t *testing.T) stmt := influxql.MustParseStatement(`SHOW CONTINUOUS QUERIES`) if res := e.ExecuteStatement(stmt); res.Err != nil { t.Fatal(res.Err) - } else if !reflect.DeepEqual(res.Series, influxql.Rows{ + } else if !reflect.DeepEqual(res.Series, models.Rows{ { Name: "db0", Columns: []string{"name", "query"}, Values: [][]interface{}{ - {"cq0", "SELECT count(*) INTO db1 FROM db0"}, - {"cq1", "SELECT count(*) INTO db2 FROM db0"}, + {"cq0", "SELECT count(field1) INTO db1 FROM db0"}, + {"cq1", "SELECT count(field1) INTO db2 FROM db0"}, }, }, { Name: "db1", Columns: []string{"name", "query"}, Values: [][]interface{}{ - {"cq2", "SELECT count(*) INTO db3 FROM db1"}, + {"cq2", "SELECT count(field1) INTO db3 FROM db1"}, }, }, }) { @@ -755,7 +756,7 @@ func TestStatementExecutor_ExecuteStatement_Unsupported(t *testing.T) { // Execute a SELECT statement. NewStatementExecutor().ExecuteStatement( - influxql.MustParseStatement(`SELECT count(*) FROM db0`), + influxql.MustParseStatement(`SELECT count(field1) FROM db0`), ) }() @@ -765,6 +766,57 @@ func TestStatementExecutor_ExecuteStatement_Unsupported(t *testing.T) { } } +// Ensure a SHOW SHARDS statement can be executed. +func TestStatementExecutor_ExecuteStatement_ShowShards(t *testing.T) { + e := NewStatementExecutor() + e.Store.DatabasesFn = func() ([]meta.DatabaseInfo, error) { + return []meta.DatabaseInfo{ + { + Name: "foo", + RetentionPolicies: []meta.RetentionPolicyInfo{ + { + Duration: time.Second, + ShardGroups: []meta.ShardGroupInfo{ + { + StartTime: time.Unix(0, 0), + EndTime: time.Unix(1, 0), + Shards: []meta.ShardInfo{ + { + ID: 1, + Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }, + }, + { + ID: 2, + }, + }, + }, + }, + }, + }, + }, + }, nil + } + + if res := e.ExecuteStatement(influxql.MustParseStatement(`SHOW SHARDS`)); res.Err != nil { + t.Fatal(res.Err) + } else if !reflect.DeepEqual(res.Series, models.Rows{ + { + Name: "foo", + Columns: []string{"id", "start_time", "end_time", "expiry_time", "owners"}, + Values: [][]interface{}{ + {uint64(1), "1970-01-01T00:00:00Z", "1970-01-01T00:00:01Z", "1970-01-01T00:00:02Z", "1,2,3"}, + {uint64(2), "1970-01-01T00:00:00Z", "1970-01-01T00:00:01Z", "1970-01-01T00:00:02Z", ""}, + }, + }, + }) { + t.Fatalf("unexpected rows: %s", spew.Sdump(res.Series)) + } +} + // StatementExecutor represents a test wrapper for meta.StatementExecutor. type StatementExecutor struct { *meta.StatementExecutor diff --git a/_third_party/github.com/influxdb/influxdb/meta/store.go b/_third_party/github.com/influxdb/influxdb/meta/store.go index 9ad0b47f86..52669cd653 100644 --- a/_third_party/github.com/influxdb/influxdb/meta/store.go +++ b/_third_party/github.com/influxdb/influxdb/meta/store.go @@ -254,7 +254,10 @@ func (s *Store) Open() error { close(s.ready) } - return nil + // Wait for a leader to be elected so we know the raft log is loaded + // and up to date + <-s.ready + return s.WaitForLeader(0) } // syncNodeInfo continuously tries to update the current nodes hostname @@ -689,7 +692,7 @@ func (s *Store) handleExecConn(conn net.Conn) { // Apply against the raft log. if err := s.apply(buf); err != nil { - return fmt.Errorf("apply: %s", err) + return err } return nil }() @@ -858,6 +861,7 @@ func (s *Store) CreateDatabase(name string) (*DatabaseInfo, error) { ); err != nil { return nil, err } + s.Logger.Printf("database '%s' created", name) if s.retentionAutoCreate { // Read node count. @@ -977,6 +981,7 @@ func (s *Store) CreateRetentionPolicy(database string, rpi *RetentionPolicyInfo) return nil, err } + s.Logger.Printf("retention policy '%s' for database '%s' created", rpi.Name, database) return s.RetentionPolicy(database, rpi.Name) } @@ -1047,8 +1052,6 @@ func (s *Store) DropRetentionPolicy(database, name string) error { ) } -// FIX: CreateRetentionPolicyIfNotExists(database string, rp *RetentionPolicyInfo) (*RetentionPolicyInfo, error) - // CreateShardGroup creates a new shard group in a retention policy for a given time. func (s *Store) CreateShardGroup(database, policy string, timestamp time.Time) (*ShardGroupInfo, error) { if err := s.exec(internal.Command_CreateShardGroupCommand, internal.E_CreateShardGroupCommand_Command, @@ -1389,38 +1392,34 @@ func (s *Store) UserCount() (count int, err error) { return } -// PrecreateShardGroups creates shard groups whose endtime is before the cutoff time passed in. This -// avoid the need for these shards to be created when data for the corresponding time range arrives. -// Shard creation involves Raft consensus, and precreation avoids taking the hit at write-time. -func (s *Store) PrecreateShardGroups(cutoff time.Time) error { +// PrecreateShardGroups creates shard groups whose endtime is before the 'to' time passed in, but +// is yet to expire before 'from'. This is to avoid the need for these shards to be created when data +// for the corresponding time range arrives. Shard creation involves Raft consensus, and precreation +// avoids taking the hit at write-time. +func (s *Store) PrecreateShardGroups(from, to time.Time) error { s.read(func(data *Data) error { for _, di := range data.Databases { for _, rp := range di.RetentionPolicies { - for _, g := range rp.ShardGroups { - // Check to see if it is not deleted and going to end before our interval - if !g.Deleted() && g.EndTime.Before(cutoff) { - nextShardGroupTime := g.EndTime.Add(1 * time.Nanosecond) - - // Check if successive shard group exists. - if sgi, err := s.ShardGroupByTimestamp(di.Name, rp.Name, nextShardGroupTime); err != nil { - s.Logger.Printf("failed to check if successive shard group for group exists %d: %s", - g.ID, err.Error()) - continue - } else if sgi != nil && !sgi.Deleted() { - continue - } - - // It doesn't. Create it. - if newGroup, err := s.CreateShardGroupIfNotExists(di.Name, rp.Name, nextShardGroupTime); err != nil { - s.Logger.Printf("failed to create successive shard group for group %d: %s", - g.ID, err.Error()) - } else { - s.Logger.Printf("new shard group %d successfully created for database %s, retention policy %s", - newGroup.ID, di.Name, rp.Name) - } + if len(rp.ShardGroups) == 0 { + // No data was ever written to this group, or all groups have been deleted. + continue + } + g := rp.ShardGroups[len(rp.ShardGroups)-1] // Get the last group in time. + if !g.Deleted() && g.EndTime.Before(to) && g.EndTime.After(from) { + // Group is not deleted, will end before the future time, but is still yet to expire. + // This last check is important, so the system doesn't create shards groups wholly + // in the past. + + // Create successive shard group. + nextShardGroupTime := g.EndTime.Add(1 * time.Nanosecond) + if newGroup, err := s.CreateShardGroupIfNotExists(di.Name, rp.Name, nextShardGroupTime); err != nil { + s.Logger.Printf("failed to create successive shard group for group %d: %s", + g.ID, err.Error()) + } else { + s.Logger.Printf("new shard group %d successfully created for database %s, retention policy %s", + newGroup.ID, di.Name, rp.Name) } } - } } return nil @@ -1554,7 +1553,7 @@ func (s *Store) remoteExec(b []byte) error { if err := proto.Unmarshal(buf, &resp); err != nil { return fmt.Errorf("unmarshal response: %s", err) } else if !resp.GetOK() { - return fmt.Errorf("exec failed: %s", resp.GetError()) + return lookupError(fmt.Errorf(resp.GetError())) } // Wait for local FSM to sync to index. diff --git a/_third_party/github.com/influxdb/influxdb/meta/store_test.go b/_third_party/github.com/influxdb/influxdb/meta/store_test.go index 7bb59536f7..65aaa05c7c 100644 --- a/_third_party/github.com/influxdb/influxdb/meta/store_test.go +++ b/_third_party/github.com/influxdb/influxdb/meta/store_test.go @@ -274,6 +274,47 @@ func TestStore_CreateRetentionPolicy(t *testing.T) { } } +// Ensure the store can create and get a retention policy on a database. +func TestStore_CreateAndGetRetentionPolicy(t *testing.T) { + t.Parallel() + s := MustOpenStore() + defer s.Close() + + // Create an additional nodes and database. + if _, err := s.CreateNode("hostX"); err != nil { + t.Fatal(err) + } else if _, err := s.CreateDatabase("db0"); err != nil { + t.Fatal(err) + } + + // Create policy on database. + if _, err := s.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{ + Name: "rp0", + ReplicaN: 2, + Duration: 48 * time.Hour, + }); err != nil { + t.Fatal(err) + } + + // Get the policy on database. + if rpi, err := s.RetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(rpi, &meta.RetentionPolicyInfo{ + Name: "rp0", + ReplicaN: 2, + Duration: 48 * time.Hour, + ShardGroupDuration: 24 * time.Hour, + }) { + t.Fatalf("unexpected policy: %#v", rpi) + } + + // Get non-existent policies. + if _, err := s.RetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + +} + // Ensure the store can delete a retention policy. func TestStore_DropRetentionPolicy(t *testing.T) { t.Parallel() @@ -489,30 +530,57 @@ func TestStore_PrecreateShardGroup(t *testing.T) { s := MustOpenStore() defer s.Close() - // Create node, database, policy, & group. + // Create node, database, policy, & groups. if _, err := s.CreateNode("host0"); err != nil { t.Fatal(err) } else if _, err := s.CreateDatabase("db0"); err != nil { t.Fatal(err) } else if _, err = s.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0", ReplicaN: 2, Duration: 1 * time.Hour}); err != nil { t.Fatal(err) - } else if _, err := s.CreateShardGroup("db0", "rp0", time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)); err != nil { + } else if _, err = s.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp1", ReplicaN: 2, Duration: 1 * time.Hour}); err != nil { + t.Fatal(err) + } else if _, err = s.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp2", ReplicaN: 2, Duration: 1 * time.Hour}); err != nil { t.Fatal(err) - } else if err := s.PrecreateShardGroups(time.Date(2001, time.January, 1, 0, 0, 0, 0, time.UTC)); err != nil { + } else if _, err := s.CreateShardGroup("db0", "rp0", time.Date(2001, time.January, 1, 1, 0, 0, 0, time.UTC)); err != nil { + t.Fatal(err) + } else if _, err := s.CreateShardGroup("db0", "rp1", time.Date(2000, time.January, 1, 1, 0, 0, 0, time.UTC)); err != nil { t.Fatal(err) } + if err := s.PrecreateShardGroups(time.Date(2001, time.January, 1, 0, 0, 0, 0, time.UTC), time.Date(2001, time.January, 1, 3, 0, 0, 0, time.UTC)); err != nil { + t.Fatal(err) + } + + // rp0 should undergo precreation. groups, err := s.ShardGroups("db0", "rp0") if err != nil { t.Fatal(err) } if len(groups) != 2 { - t.Fatalf("shard group precreation failed to create new shard group") + t.Fatalf("shard group precreation failed to create new shard group for rp0") } - if groups[1].StartTime != time.Date(2000, time.January, 1, 1, 0, 0, 0, time.UTC) { + if groups[1].StartTime != time.Date(2001, time.January, 1, 2, 0, 0, 0, time.UTC) { t.Fatalf("precreated shard group has wrong start time, exp %s, got %s", time.Date(2000, time.January, 1, 1, 0, 0, 0, time.UTC), groups[1].StartTime) } + + // rp1 should not undergo precreation since it is completely in the past. + groups, err = s.ShardGroups("db0", "rp1") + if err != nil { + t.Fatal(err) + } + if len(groups) != 1 { + t.Fatalf("shard group precreation created new shard group for rp1") + } + + // rp2 should not undergo precreation since it has no shards. + groups, err = s.ShardGroups("db0", "rp2") + if err != nil { + t.Fatal(err) + } + if len(groups) != 0 { + t.Fatalf("shard group precreation created new shard group for rp2") + } } // Ensure the store can create a new continuous query. @@ -828,14 +896,14 @@ func TestCluster_Restart(t *testing.T) { t.Fatal("no leader found") } - // Add 5 more ndes, 2 should become raft peers, 3 remote raft clients + // Add 5 more nodes, 2 should become raft peers, 3 remote raft clients for i := 0; i < 5; i++ { if err := c.Join(); err != nil { t.Fatalf("failed to join cluster: %v", err) } } - // The tests use a host host assigned listener port. We need to re-use + // The tests use a host assigned listener port. We need to re-use // the original ports when the new cluster is restarted so that the existing // peer store addresses can be reached. addrs := []string{} @@ -858,10 +926,25 @@ func TestCluster_Restart(t *testing.T) { // Re-create the cluster nodes from existing disk paths and addresses stores := []*Store{} + storeChan := make(chan *Store) for i, s := range c.Stores { - store := MustOpenStoreWithPath(addrs[i], s.Path()) + + // Need to start each instance asynchronously because they have existing raft peers + // store. Starting one will block indefinitely because it will not be able to become + // leader until another peer is available to hold an election. + go func(addr, path string) { + store := MustOpenStoreWithPath(addr, path) + storeChan <- store + }(addrs[i], s.Path()) + + } + + // Collect up our restart meta-stores + for range c.Stores { + store := <-storeChan stores = append(stores, store) } + c.Stores = stores // Wait for the cluster to stabilize diff --git a/_third_party/github.com/influxdb/influxdb/models/points.go b/_third_party/github.com/influxdb/influxdb/models/points.go new file mode 100644 index 0000000000..d6553a07e3 --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/models/points.go @@ -0,0 +1,1340 @@ +package models + +import ( + "bytes" + "fmt" + "hash/fnv" + "sort" + "strconv" + "time" + + "bosun.org/_third_party/github.com/influxdb/influxdb/pkg/escape" +) + +var ( + measurementEscapeCodes = map[byte][]byte{ + ',': []byte(`\,`), + ' ': []byte(`\ `), + } + + tagEscapeCodes = map[byte][]byte{ + ',': []byte(`\,`), + ' ': []byte(`\ `), + '=': []byte(`\=`), + } +) + +// Point defines the values that will be written to the database +type Point interface { + Name() string + SetName(string) + + Tags() Tags + AddTag(key, value string) + SetTags(tags Tags) + + Fields() Fields + AddField(name string, value interface{}) + + Time() time.Time + SetTime(t time.Time) + UnixNano() int64 + + HashID() uint64 + Key() []byte + + Data() []byte + SetData(buf []byte) + + // String returns a string representation of the point object, if there is a + // timestamp associated with the point then it will be specified with the default + // precision of nanoseconds + String() string + + // PrecisionString returns a string representation of the point object, if there + // is a timestamp associated with the point then it will be specified in the + // given unit + PrecisionString(precision string) string +} + +// Points represents a sortable list of points by timestamp. +type Points []Point + +func (a Points) Len() int { return len(a) } +func (a Points) Less(i, j int) bool { return a[i].Time().Before(a[j].Time()) } +func (a Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// point is the default implementation of Point. +type point struct { + time time.Time + + // text encoding of measurement and tags + // key must always be stored sorted by tags, if the original line was not sorted, + // we need to resort it + key []byte + + // text encoding of field data + fields []byte + + // text encoding of timestamp + ts []byte + + // binary encoded field data + data []byte + + // cached version of parsed fields from data + cachedFields map[string]interface{} + + // cached version of parsed name from key + cachedName string +} + +const ( + // the number of characters for the largest possible int64 (9223372036854775807) + maxInt64Digits = 19 + + // the number of characters for the smallest possible int64 (-9223372036854775808) + minInt64Digits = 20 + + // the number of characters required for the largest float64 before a range check + // would occur during parsing + maxFloat64Digits = 25 + + // the number of characters required for smallest float64 before a range check occur + // would occur during parsing + minFloat64Digits = 27 +) + +var () + +func ParsePointsString(buf string) ([]Point, error) { + return ParsePoints([]byte(buf)) +} + +// ParsePoints returns a slice of Points from a text representation of a point +// with each point separated by newlines. +func ParsePoints(buf []byte) ([]Point, error) { + return ParsePointsWithPrecision(buf, time.Now().UTC(), "n") +} + +func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) { + points := []Point{} + var ( + pos int + block []byte + ) + for { + pos, block = scanLine(buf, pos) + pos += 1 + + if len(block) == 0 { + break + } + + // lines which start with '#' are comments + start := skipWhitespace(block, 0) + + // If line is all whitespace, just skip it + if start >= len(block) { + continue + } + + if block[start] == '#' { + continue + } + + // strip the newline if one is present + if block[len(block)-1] == '\n' { + block = block[:len(block)-1] + } + + pt, err := parsePoint(block[start:len(block)], defaultTime, precision) + if err != nil { + return nil, fmt.Errorf("unable to parse '%s': %v", string(block[start:len(block)]), err) + } + points = append(points, pt) + + if pos >= len(buf) { + break + } + + } + return points, nil + +} + +func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) { + // scan the first block which is measurement[,tag1=value1,tag2=value=2...] + pos, key, err := scanKey(buf, 0) + if err != nil { + return nil, err + } + + // measurement name is required + if len(key) == 0 { + return nil, fmt.Errorf("missing measurement") + } + + // scan the second block is which is field1=value1[,field2=value2,...] + pos, fields, err := scanFields(buf, pos) + if err != nil { + return nil, err + } + + // at least one field is required + if len(fields) == 0 { + return nil, fmt.Errorf("missing fields") + } + + // scan the last block which is an optional integer timestamp + pos, ts, err := scanTime(buf, pos) + + if err != nil { + return nil, err + } + + pt := &point{ + key: key, + fields: fields, + ts: ts, + } + + if len(ts) == 0 { + pt.time = defaultTime + pt.SetPrecision(precision) + } else { + ts, err := strconv.ParseInt(string(ts), 10, 64) + if err != nil { + return nil, err + } + pt.time = time.Unix(0, ts*pt.GetPrecisionMultiplier(precision)) + } + return pt, nil +} + +// scanKey scans buf starting at i for the measurement and tag portion of the point. +// It returns the ending position and the byte slice of key within buf. If there +// are tags, they will be sorted if they are not already. +func scanKey(buf []byte, i int) (int, []byte, error) { + start := skipWhitespace(buf, i) + + i = start + + // Determines whether the tags are sort, assume they are + sorted := true + + // indices holds the indexes within buf of the start of each tag. For example, + // a buf of 'cpu,host=a,region=b,zone=c' would have indices slice of [4,11,20] + // which indicates that the first tag starts at buf[4], seconds at buf[11], and + // last at buf[20] + indices := make([]int, 100) + + // tracks how many commas we've seen so we know how many values are indices. + // Since indices is an arbitrarily large slice, + // we need to know how many values in the buffer are in use. + commas := 0 + + // tracks whether we've see an '=' + equals := 0 + + // loop over each byte in buf + for { + // reached the end of buf? + if i >= len(buf) { + if equals == 0 && commas > 0 { + return i, buf[start:i], fmt.Errorf("missing tag value") + } + + break + } + + // equals is special in the tags section. It must be escaped if part of a tag name or value. + // It does not need to be escaped if part of the measurement. + if buf[i] == '=' && commas > 0 { + if i-1 < 0 || i-2 < 0 { + return i, buf[start:i], fmt.Errorf("missing tag name") + } + + // Check for "cpu,=value" but allow "cpu,a\,=value" + if buf[i-1] == ',' && buf[i-2] != '\\' { + return i, buf[start:i], fmt.Errorf("missing tag name") + } + + // Check for "cpu,\ =value" + if buf[i-1] == ' ' && buf[i-2] != '\\' { + return i, buf[start:i], fmt.Errorf("missing tag name") + } + + i += 1 + equals += 1 + + // Check for "cpu,a=1,b= value=1" + if i < len(buf) && buf[i] == ' ' { + return i, buf[start:i], fmt.Errorf("missing tag value") + } + continue + } + + // escaped character + if buf[i] == '\\' { + i += 2 + continue + } + + // At a tag separator (comma), track it's location + if buf[i] == ',' { + if equals == 0 && commas > 0 { + return i, buf[start:i], fmt.Errorf("missing tag value") + } + i += 1 + + // grow our indices slice if we have too many tags + if commas >= len(indices) { + newIndics := make([]int, cap(indices)*2) + copy(newIndics, indices) + indices = newIndics + } + indices[commas] = i + commas += 1 + + // Check for "cpu, value=1" + if i < len(buf) && buf[i] == ' ' { + return i, buf[start:i], fmt.Errorf("missing tag key") + } + continue + } + + // reached end of the block? (next block would be fields) + if buf[i] == ' ' { + // check for "cpu,tag value=1" + if equals == 0 && commas > 0 { + return i, buf[start:i], fmt.Errorf("missing tag value") + } + if equals > 0 && commas-1 != equals-1 { + return i, buf[start:i], fmt.Errorf("missing tag value") + } + + // grow our indices slice if we have too many tags + if commas >= len(indices) { + newIndics := make([]int, cap(indices)*2) + copy(newIndics, indices) + indices = newIndics + } + + indices[commas] = i + 1 + break + } + + i += 1 + } + + // check that all field sections had key and values (e.g. prevent "a=1,b" + // We're using commas -1 because there should always be a comma after measurement + if equals > 0 && commas-1 != equals-1 { + return i, buf[start:i], fmt.Errorf("invalid tag format") + } + + // This check makes sure we actually received fields from the user. #3379 + // This will catch invalid syntax such as: `cpu,host=serverA,region=us-west` + if i >= len(buf) { + return i, buf[start:i], fmt.Errorf("missing fields") + } + + // Now we know where the key region is within buf, and the locations of tags, we + // need to deterimine if duplicate tags exist and if the tags are sorted. This iterates + // 1/2 of the list comparing each end with each other, walking towards the center from + // both sides. + for j := 0; j < commas/2; j++ { + // get the left and right tags + _, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=') + _, right := scanTo(buf[indices[commas-j-1]:indices[commas-j]-1], 0, '=') + + // If the tags are equal, then there are duplicate tags, and we should abort + if bytes.Equal(left, right) { + return i, buf[start:i], fmt.Errorf("duplicate tags") + } + + // If left is greater than right, the tags are not sorted. We must continue + // since their could be duplicate tags still. + if bytes.Compare(left, right) > 0 { + sorted = false + } + } + + // If the tags are not sorted, then sort them. This sort is inline and + // uses the tag indices we created earlier. The actual buffer is not sorted, the + // indices are using the buffer for value comparison. After the indices are sorted, + // the buffer is reconstructed from the sorted indices. + if !sorted && commas > 0 { + // Get the measurement name for later + measurement := buf[start : indices[0]-1] + + // Sort the indices + indices := indices[:commas] + insertionSort(0, commas, buf, indices) + + // Create a new key using the measurement and sorted indices + b := make([]byte, len(buf[start:i])) + pos := copy(b, measurement) + for _, i := range indices { + b[pos] = ',' + pos += 1 + _, v := scanToSpaceOr(buf, i, ',') + pos += copy(b[pos:], v) + } + + return i, b, nil + } + + return i, buf[start:i], nil +} + +func insertionSort(l, r int, buf []byte, indices []int) { + for i := l + 1; i < r; i++ { + for j := i; j > l && less(buf, indices, j, j-1); j-- { + indices[j], indices[j-1] = indices[j-1], indices[j] + } + } +} + +func less(buf []byte, indices []int, i, j int) bool { + // This grabs the tag names for i & j, it ignores the values + _, a := scanTo(buf, indices[i], '=') + _, b := scanTo(buf, indices[j], '=') + return bytes.Compare(a, b) < 0 +} + +func isFieldEscapeChar(b byte) bool { + for c := range escape.Codes { + if c == b { + return true + } + } + return false +} + +// scanFields scans buf, starting at i for the fields section of a point. It returns +// the ending position and the byte slice of the fields within buf +func scanFields(buf []byte, i int) (int, []byte, error) { + start := skipWhitespace(buf, i) + i = start + quoted := false + + // tracks how many '=' we've seen + equals := 0 + + // tracks how many commas we've seen + commas := 0 + + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // escaped characters? + if buf[i] == '\\' && i+1 < len(buf) { + + // Is this an escape char within a string field? Only " and \ are allowed. + if quoted && (buf[i+1] == '"' || buf[i+1] == '\\') { + i += 2 + continue + // Non-string field escaped chars + } else if !quoted && isFieldEscapeChar(buf[i+1]) { + i += 2 + continue + } + } + + // If the value is quoted, scan until we get to the end quote + if buf[i] == '"' { + quoted = !quoted + i += 1 + continue + } + + // If we see an =, ensure that there is at least on char before and after it + if buf[i] == '=' && !quoted { + equals += 1 + + // check for "... =123" but allow "a\ =123" + if buf[i-1] == ' ' && buf[i-2] != '\\' { + return i, buf[start:i], fmt.Errorf("missing field name") + } + + // check for "...a=123,=456" but allow "a=123,a\,=456" + if buf[i-1] == ',' && buf[i-2] != '\\' { + return i, buf[start:i], fmt.Errorf("missing field name") + } + + // check for "... value=" + if i+1 >= len(buf) { + return i, buf[start:i], fmt.Errorf("missing field value") + } + + // check for "... value=,value2=..." + if buf[i+1] == ',' || buf[i+1] == ' ' { + return i, buf[start:i], fmt.Errorf("missing field value") + } + + if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' { + var err error + i, err = scanNumber(buf, i+1) + if err != nil { + return i, buf[start:i], err + } + continue + } + // If next byte is not a double-quote, the value must be a boolean + if buf[i+1] != '"' { + var err error + i, _, err = scanBoolean(buf, i+1) + if err != nil { + return i, buf[start:i], err + } + continue + } + } + + if buf[i] == ',' && !quoted { + commas += 1 + } + + // reached end of block? + if buf[i] == ' ' && !quoted { + break + } + i += 1 + } + + if quoted { + return i, buf[start:i], fmt.Errorf("unbalanced quotes") + } + + // check that all field sections had key and values (e.g. prevent "a=1,b" + if equals == 0 || commas != equals-1 { + return i, buf[start:i], fmt.Errorf("invalid field format") + } + + return i, buf[start:i], nil +} + +// scanTime scans buf, starting at i for the time section of a point. It returns +// the ending position and the byte slice of the fields within buf and error if the +// timestamp is not in the correct numeric format +func scanTime(buf []byte, i int) (int, []byte, error) { + start := skipWhitespace(buf, i) + i = start + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // Timestamps should integers, make sure they are so we don't need to actually + // parse the timestamp until needed + if buf[i] < '0' || buf[i] > '9' { + return i, buf[start:i], fmt.Errorf("bad timestamp") + } + + // reached end of block? + if buf[i] == '\n' { + break + } + i += 1 + } + return i, buf[start:i], nil +} + +func isNumeric(b byte) bool { + return (b >= '0' && b <= '9') || b == '.' +} + +// scanNumber returns the end position within buf, start at i after +// scanning over buf for an integer, or float. It returns an +// error if a invalid number is scanned. +func scanNumber(buf []byte, i int) (int, error) { + start := i + var isInt bool + + // Is negative number? + if i < len(buf) && buf[i] == '-' { + i += 1 + // There must be more characters now, as just '-' is illegal. + if i == len(buf) { + return i, fmt.Errorf("invalid number") + } + } + + // how many decimal points we've see + decimals := 0 + + // indicates the number is float in scientific notation + scientific := false + + for { + if i >= len(buf) { + break + } + + if buf[i] == ',' || buf[i] == ' ' { + break + } + + if buf[i] == 'i' && i > start && !isInt { + isInt = true + i += 1 + continue + } + + if buf[i] == '.' { + decimals += 1 + } + + // Can't have more than 1 decimal (e.g. 1.1.1 should fail) + if decimals > 1 { + return i, fmt.Errorf("invalid number") + } + + // `e` is valid for floats but not as the first char + if i > start && (buf[i] == 'e') { + scientific = true + i += 1 + continue + } + + // + and - are only valid at this point if they follow an e (scientific notation) + if (buf[i] == '+' || buf[i] == '-') && buf[i-1] == 'e' { + i += 1 + continue + } + + // NaN is a valid float + if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') { + if (buf[i+1] == 'a' || buf[i+1] == 'A') && (buf[i+2] == 'N' || buf[i+2] == 'n') { + i += 3 + continue + } + return i, fmt.Errorf("invalid number") + } + if !isNumeric(buf[i]) { + return i, fmt.Errorf("invalid number") + } + i += 1 + } + if isInt && (decimals > 0 || scientific) { + return i, fmt.Errorf("invalid number") + } + + // It's more common that numbers will be within min/max range for their type but we need to prevent + // out or range numbers from being parsed successfully. This uses some simple heuristics to decide + // if we should parse the number to the actual type. It does not do it all the time because it incurs + // extra allocations and we end up converting the type again when writing points to disk. + if isInt { + // Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid) + if buf[i-1] != 'i' { + return i, fmt.Errorf("invalid number") + } + // Parse the int to check bounds the number of digits could be larger than the max range + // We subtract 1 from the index to remove the `i` from our tests + if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits { + if _, err := strconv.ParseInt(string(buf[start:i-1]), 10, 64); err != nil { + return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err) + } + } + } else { + // Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range + if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits { + if _, err := strconv.ParseFloat(string(buf[start:i]), 10); err != nil { + return i, fmt.Errorf("invalid float") + } + } + } + + return i, nil +} + +// scanBoolean returns the end position within buf, start at i after +// scanning over buf for boolean. Valid values for a boolean are +// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean +// is scanned. +func scanBoolean(buf []byte, i int) (int, []byte, error) { + start := i + + if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + i += 1 + for { + if i >= len(buf) { + break + } + + if buf[i] == ',' || buf[i] == ' ' { + break + } + i += 1 + } + + // Single char bool (t, T, f, F) is ok + if i-start == 1 { + return i, buf[start:i], nil + } + + // length must be 4 for true or TRUE + if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + // length must be 5 for false or FALSE + if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + // Otherwise + valid := false + switch buf[start] { + case 't': + valid = bytes.Equal(buf[start:i], []byte("true")) + case 'f': + valid = bytes.Equal(buf[start:i], []byte("false")) + case 'T': + valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True")) + case 'F': + valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False")) + } + + if !valid { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + return i, buf[start:i], nil + +} + +// skipWhitespace returns the end position within buf, starting at i after +// scanning over spaces in tags +func skipWhitespace(buf []byte, i int) int { + for { + if i >= len(buf) { + return i + } + + if buf[i] == ' ' || buf[i] == '\t' { + i += 1 + continue + } + break + } + return i +} + +// scanLine returns the end position in buf and the next line found within +// buf. +func scanLine(buf []byte, i int) (int, []byte) { + start := i + quoted := false + fields := false + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + if buf[i] == ' ' { + fields = true + } + + // If we see a double quote, makes sure it is not escaped + if fields && buf[i] == '"' && (i-1 > 0 && buf[i-1] != '\\') { + i += 1 + quoted = !quoted + continue + } + + if buf[i] == '\n' && !quoted { + break + } + + i += 1 + } + + return i, buf[start:i] +} + +// scanTo returns the end position in buf and the next consecutive block +// of bytes, starting from i and ending with stop byte. If there are leading +// spaces or escaped chars, they are skipped. +func scanTo(buf []byte, i int, stop byte) (int, []byte) { + start := i + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + if buf[i] == '\\' { + i += 2 + continue + } + + // reached end of block? + if buf[i] == stop { + break + } + i += 1 + } + + return i, buf[start:i] +} + +// scanTo returns the end position in buf and the next consecutive block +// of bytes, starting from i and ending with stop byte. If there are leading +// spaces, they are skipped. +func scanToSpaceOr(buf []byte, i int, stop byte) (int, []byte) { + start := i + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + if buf[i] == '\\' { + i += 2 + continue + } + // reached end of block? + if buf[i] == stop || buf[i] == ' ' { + break + } + i += 1 + } + + return i, buf[start:i] +} + +func scanTagValue(buf []byte, i int) (int, []byte) { + start := i + for { + if i >= len(buf) { + break + } + + if buf[i] == '\\' { + i += 2 + continue + } + + if buf[i] == ',' { + break + } + i += 1 + } + return i, buf[start:i] +} + +func scanFieldValue(buf []byte, i int) (int, []byte) { + start := i + quoted := false + for { + if i >= len(buf) { + break + } + + // Only escape char for a field value is a double-quote + if buf[i] == '\\' && i+1 < len(buf) && buf[i+1] == '"' { + i += 2 + continue + } + + // Quoted value? (e.g. string) + if buf[i] == '"' { + i += 1 + quoted = !quoted + continue + } + + if buf[i] == ',' && !quoted { + break + } + i += 1 + } + return i, buf[start:i] +} + +func escapeMeasurement(in []byte) []byte { + for b, esc := range measurementEscapeCodes { + in = bytes.Replace(in, []byte{b}, esc, -1) + } + return in +} + +func unescapeMeasurement(in []byte) []byte { + for b, esc := range measurementEscapeCodes { + in = bytes.Replace(in, esc, []byte{b}, -1) + } + return in +} + +func escapeTag(in []byte) []byte { + for b, esc := range tagEscapeCodes { + in = bytes.Replace(in, []byte{b}, esc, -1) + } + return in +} + +func unescapeTag(in []byte) []byte { + for b, esc := range tagEscapeCodes { + in = bytes.Replace(in, esc, []byte{b}, -1) + } + return in +} + +// escapeStringField returns a copy of in with any double quotes or +// backslashes with escaped values +func escapeStringField(in string) string { + var out []byte + i := 0 + for { + if i >= len(in) { + break + } + // escape double-quotes + if in[i] == '\\' { + out = append(out, '\\') + out = append(out, '\\') + i += 1 + continue + } + // escape double-quotes + if in[i] == '"' { + out = append(out, '\\') + out = append(out, '"') + i += 1 + continue + } + out = append(out, in[i]) + i += 1 + + } + return string(out) +} + +// unescapeStringField returns a copy of in with any escaped double-quotes +// or backslashes unescaped +func unescapeStringField(in string) string { + var out []byte + i := 0 + for { + if i >= len(in) { + break + } + // unescape backslashes + if in[i] == '\\' && i+1 < len(in) && in[i+1] == '\\' { + out = append(out, '\\') + i += 2 + continue + } + // unescape double-quotes + if in[i] == '\\' && i+1 < len(in) && in[i+1] == '"' { + out = append(out, '"') + i += 2 + continue + } + out = append(out, in[i]) + i += 1 + + } + return string(out) +} + +// NewPoint returns a new point with the given measurement name, tags, fields and timestamp +func NewPoint(name string, tags Tags, fields Fields, time time.Time) Point { + return &point{ + key: MakeKey([]byte(name), tags), + time: time, + fields: fields.MarshalBinary(), + } +} + +func (p *point) Data() []byte { + return p.data +} + +func (p *point) SetData(b []byte) { + p.data = b +} + +func (p *point) Key() []byte { + return p.key +} + +func (p *point) name() []byte { + _, name := scanTo(p.key, 0, ',') + return name +} + +// Name return the measurement name for the point +func (p *point) Name() string { + if p.cachedName != "" { + return p.cachedName + } + p.cachedName = string(escape.Unescape(p.name())) + return p.cachedName +} + +// SetName updates the measurement name for the point +func (p *point) SetName(name string) { + p.cachedName = "" + p.key = MakeKey([]byte(name), p.Tags()) +} + +// Time return the timestamp for the point +func (p *point) Time() time.Time { + return p.time +} + +// SetTime updates the timestamp for the point +func (p *point) SetTime(t time.Time) { + p.time = t +} + +// Tags returns the tag set for the point +func (p *point) Tags() Tags { + tags := map[string]string{} + + if len(p.key) != 0 { + pos, name := scanTo(p.key, 0, ',') + + // it's an empyt key, so there are no tags + if len(name) == 0 { + return tags + } + + i := pos + 1 + var key, value []byte + for { + if i >= len(p.key) { + break + } + i, key = scanTo(p.key, i, '=') + i, value = scanTagValue(p.key, i+1) + + tags[string(unescapeTag(key))] = string(unescapeTag(value)) + + i += 1 + } + } + return tags +} + +func MakeKey(name []byte, tags Tags) []byte { + // unescape the name and then re-escape it to avoid double escaping. + // The key should always be stored in escaped form. + return append(escapeMeasurement(unescapeMeasurement(name)), tags.HashKey()...) +} + +// SetTags replaces the tags for the point +func (p *point) SetTags(tags Tags) { + p.key = MakeKey([]byte(p.Name()), tags) +} + +// AddTag adds or replaces a tag value for a point +func (p *point) AddTag(key, value string) { + tags := p.Tags() + tags[key] = value + p.key = MakeKey([]byte(p.Name()), tags) +} + +// Fields returns the fields for the point +func (p *point) Fields() Fields { + if p.cachedFields != nil { + return p.cachedFields + } + p.cachedFields = p.unmarshalBinary() + return p.cachedFields +} + +// AddField adds or replaces a field value for a point +func (p *point) AddField(name string, value interface{}) { + fields := p.Fields() + fields[name] = value + p.fields = fields.MarshalBinary() + p.cachedFields = nil +} + +// SetPrecision will round a time to the specified precision +func (p *point) SetPrecision(precision string) { + switch precision { + case "n": + case "u": + p.SetTime(p.Time().Truncate(time.Microsecond)) + case "ms": + p.SetTime(p.Time().Truncate(time.Millisecond)) + case "s": + p.SetTime(p.Time().Truncate(time.Second)) + case "m": + p.SetTime(p.Time().Truncate(time.Minute)) + case "h": + p.SetTime(p.Time().Truncate(time.Hour)) + } +} + +// GetPrecisionMultiplier will return a multiplier for the precision specified +func (p *point) GetPrecisionMultiplier(precision string) int64 { + d := time.Nanosecond + switch precision { + case "u": + d = time.Microsecond + case "ms": + d = time.Millisecond + case "s": + d = time.Second + case "m": + d = time.Minute + case "h": + d = time.Hour + } + return int64(d) +} + +func (p *point) String() string { + if p.Time().IsZero() { + return fmt.Sprintf("%s %s", p.Key(), string(p.fields)) + } + return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields), p.UnixNano()) +} + +func (p *point) PrecisionString(precision string) string { + if p.Time().IsZero() { + return fmt.Sprintf("%s %s", p.Key(), string(p.fields)) + } + return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields), + p.UnixNano()/p.GetPrecisionMultiplier(precision)) +} + +func (p *point) unmarshalBinary() Fields { + return newFieldsFromBinary(p.fields) +} + +func (p *point) HashID() uint64 { + h := fnv.New64a() + h.Write(p.key) + sum := h.Sum64() + return sum +} + +func (p *point) UnixNano() int64 { + return p.Time().UnixNano() +} + +type Tags map[string]string + +func (t Tags) HashKey() []byte { + // Empty maps marshal to empty bytes. + if len(t) == 0 { + return nil + } + + escaped := Tags{} + for k, v := range t { + ek := escapeTag([]byte(k)) + ev := escapeTag([]byte(v)) + escaped[string(ek)] = string(ev) + } + + // Extract keys and determine final size. + sz := len(escaped) + (len(escaped) * 2) // separators + keys := make([]string, len(escaped)+1) + i := 0 + for k, v := range escaped { + keys[i] = k + i += 1 + sz += len(k) + len(v) + } + keys = keys[:i] + sort.Strings(keys) + // Generate marshaled bytes. + b := make([]byte, sz) + buf := b + idx := 0 + for _, k := range keys { + buf[idx] = ',' + idx += 1 + copy(buf[idx:idx+len(k)], k) + idx += len(k) + buf[idx] = '=' + idx += 1 + v := escaped[k] + copy(buf[idx:idx+len(v)], v) + idx += len(v) + } + return b[:idx] +} + +type Fields map[string]interface{} + +func parseNumber(val []byte) (interface{}, error) { + if val[len(val)-1] == 'i' { + val = val[:len(val)-1] + return strconv.ParseInt(string(val), 10, 64) + } + for i := 0; i < len(val); i++ { + // If there is a decimal or an N (NaN), I (Inf), parse as float + if val[i] == '.' || val[i] == 'N' || val[i] == 'n' || val[i] == 'I' || val[i] == 'i' || val[i] == 'e' { + return strconv.ParseFloat(string(val), 64) + } + if val[i] < '0' && val[i] > '9' { + return string(val), nil + } + } + return strconv.ParseFloat(string(val), 64) +} + +func newFieldsFromBinary(buf []byte) Fields { + fields := Fields{} + var ( + i int + name, valueBuf []byte + value interface{} + err error + ) + for { + if i >= len(buf) { + break + } + + i, name = scanTo(buf, i, '=') + if len(name) == 0 { + continue + } + name = escape.Unescape(name) + + i, valueBuf = scanFieldValue(buf, i+1) + if len(valueBuf) == 0 { + fields[string(name)] = nil + continue + } + + // If the first char is a double-quote, then unmarshal as string + if valueBuf[0] == '"' { + value = unescapeStringField(string(valueBuf[1 : len(valueBuf)-1])) + // Check for numeric characters and special NaN or Inf + } else if (valueBuf[0] >= '0' && valueBuf[0] <= '9') || valueBuf[0] == '-' || valueBuf[0] == '+' || valueBuf[0] == '.' || + valueBuf[0] == 'N' || valueBuf[0] == 'n' || // NaN + valueBuf[0] == 'I' || valueBuf[0] == 'i' { // Inf + + value, err = parseNumber(valueBuf) + if err != nil { + panic(fmt.Sprintf("unable to parse number value '%v': %v", string(valueBuf), err)) + } + + // Otherwise parse it as bool + } else { + value, err = strconv.ParseBool(string(valueBuf)) + if err != nil { + panic(fmt.Sprintf("unable to parse bool value '%v': %v\n", string(valueBuf), err)) + } + } + fields[string(name)] = value + i += 1 + } + return fields +} + +// MarshalBinary encodes all the fields to their proper type and returns the binary +// represenation +// NOTE: uint64 is specifically not supported due to potential overflow when we decode +// again later to an int64 +func (p Fields) MarshalBinary() []byte { + b := []byte{} + keys := make([]string, len(p)) + i := 0 + for k, _ := range p { + keys[i] = k + i += 1 + } + sort.Strings(keys) + + for _, k := range keys { + v := p[k] + b = append(b, []byte(escape.String(k))...) + b = append(b, '=') + switch t := v.(type) { + case int: + b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) + b = append(b, 'i') + case int8: + b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) + b = append(b, 'i') + case int16: + b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) + b = append(b, 'i') + case int32: + b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) + b = append(b, 'i') + case int64: + b = append(b, []byte(strconv.FormatInt(t, 10))...) + b = append(b, 'i') + case uint: + b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) + b = append(b, 'i') + case uint8: + b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) + b = append(b, 'i') + case uint16: + b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) + b = append(b, 'i') + case uint32: + b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) + b = append(b, 'i') + case float32: + val := []byte(strconv.FormatFloat(float64(t), 'f', -1, 32)) + b = append(b, val...) + case float64: + val := []byte(strconv.FormatFloat(t, 'f', -1, 64)) + b = append(b, val...) + case bool: + b = append(b, []byte(strconv.FormatBool(t))...) + case []byte: + b = append(b, t...) + case string: + b = append(b, '"') + b = append(b, []byte(escapeStringField(t))...) + b = append(b, '"') + case nil: + // skip + default: + // Can't determine the type, so convert to string + b = append(b, '"') + b = append(b, []byte(escapeStringField(fmt.Sprintf("%v", v)))...) + b = append(b, '"') + + } + b = append(b, ',') + } + if len(b) > 0 { + return b[0 : len(b)-1] + } + return b +} + +type indexedSlice struct { + indices []int + b []byte +} + +func (s *indexedSlice) Less(i, j int) bool { + _, a := scanTo(s.b, s.indices[i], '=') + _, b := scanTo(s.b, s.indices[j], '=') + return bytes.Compare(a, b) < 0 +} + +func (s *indexedSlice) Swap(i, j int) { + s.indices[i], s.indices[j] = s.indices[j], s.indices[i] +} + +func (s *indexedSlice) Len() int { + return len(s.indices) +} diff --git a/_third_party/github.com/influxdb/influxdb/models/points_test.go b/_third_party/github.com/influxdb/influxdb/models/points_test.go new file mode 100644 index 0000000000..6b51c01615 --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/models/points_test.go @@ -0,0 +1,1421 @@ +package models_test + +import ( + "bytes" + "fmt" + "math" + "reflect" + "strconv" + "strings" + "testing" + "time" + + "bosun.org/_third_party/github.com/influxdb/influxdb/models" +) + +var ( + tags = models.Tags{"foo": "bar", "apple": "orange", "host": "serverA", "region": "uswest"} + maxFloat64 = strconv.FormatFloat(math.MaxFloat64, 'f', 1, 64) + minFloat64 = strconv.FormatFloat(-math.MaxFloat64, 'f', 1, 64) +) + +func TestMarshal(t *testing.T) { + got := tags.HashKey() + if exp := ",apple=orange,foo=bar,host=serverA,region=uswest"; string(got) != exp { + t.Log("got: ", string(got)) + t.Log("exp: ", exp) + t.Error("invalid match") + } +} + +func BenchmarkMarshal(b *testing.B) { + for i := 0; i < b.N; i++ { + tags.HashKey() + } +} + +func BenchmarkParsePointNoTags(b *testing.B) { + line := `cpu value=1i 1000000000` + for i := 0; i < b.N; i++ { + models.ParsePoints([]byte(line)) + b.SetBytes(int64(len(line))) + } +} + +func BenchmarkParsePointsTagsSorted2(b *testing.B) { + line := `cpu,host=serverA,region=us-west value=1i 1000000000` + for i := 0; i < b.N; i++ { + models.ParsePoints([]byte(line)) + b.SetBytes(int64(len(line))) + } +} + +func BenchmarkParsePointsTagsSorted5(b *testing.B) { + line := `cpu,env=prod,host=serverA,region=us-west,target=servers,zone=1c value=1i 1000000000` + for i := 0; i < b.N; i++ { + models.ParsePoints([]byte(line)) + b.SetBytes(int64(len(line))) + } +} + +func BenchmarkParsePointsTagsSorted10(b *testing.B) { + line := `cpu,env=prod,host=serverA,region=us-west,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5,target=servers,zone=1c value=1i 1000000000` + for i := 0; i < b.N; i++ { + models.ParsePoints([]byte(line)) + b.SetBytes(int64(len(line))) + } +} + +func BenchmarkParsePointsTagsUnSorted2(b *testing.B) { + line := `cpu,region=us-west,host=serverA value=1i 1000000000` + for i := 0; i < b.N; i++ { + pt, _ := models.ParsePoints([]byte(line)) + b.SetBytes(int64(len(line))) + pt[0].Key() + } +} + +func BenchmarkParsePointsTagsUnSorted5(b *testing.B) { + line := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c value=1i 1000000000` + for i := 0; i < b.N; i++ { + pt, _ := models.ParsePoints([]byte(line)) + b.SetBytes(int64(len(line))) + pt[0].Key() + } +} + +func BenchmarkParsePointsTagsUnSorted10(b *testing.B) { + line := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5 value=1i 1000000000` + for i := 0; i < b.N; i++ { + pt, _ := models.ParsePoints([]byte(line)) + b.SetBytes(int64(len(line))) + pt[0].Key() + } +} + +func test(t *testing.T, line string, point models.Point) { + pts, err := models.ParsePointsWithPrecision([]byte(line), time.Unix(0, 0), "n") + if err != nil { + t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, line, err) + } + + if exp := 1; len(pts) != exp { + t.Fatalf(`ParsePoints("%s") len mismatch. got %d, exp %d`, line, len(pts), exp) + } + + if exp := point.Key(); !bytes.Equal(pts[0].Key(), exp) { + t.Errorf("ParsePoints(\"%s\") key mismatch.\ngot %v\nexp %v", line, string(pts[0].Key()), string(exp)) + } + + if exp := len(point.Tags()); len(pts[0].Tags()) != exp { + t.Errorf(`ParsePoints("%s") tags mismatch. got %v, exp %v`, line, pts[0].Tags(), exp) + } + + for tag, value := range point.Tags() { + if pts[0].Tags()[tag] != value { + t.Errorf(`ParsePoints("%s") tags mismatch. got %v, exp %v`, line, pts[0].Tags()[tag], value) + } + } + + for name, value := range point.Fields() { + val := pts[0].Fields()[name] + expfval, ok := val.(float64) + + if ok && math.IsNaN(expfval) { + gotfval, ok := value.(float64) + if ok && !math.IsNaN(gotfval) { + t.Errorf(`ParsePoints("%s") field '%s' mismatch. exp NaN`, line, name) + } + } else if !reflect.DeepEqual(pts[0].Fields()[name], value) { + t.Errorf(`ParsePoints("%s") field '%s' mismatch. got %v, exp %v`, line, name, pts[0].Fields()[name], value) + } + } + + if !pts[0].Time().Equal(point.Time()) { + t.Errorf(`ParsePoints("%s") time mismatch. got %v, exp %v`, line, pts[0].Time(), point.Time()) + } + + if !strings.HasPrefix(pts[0].String(), line) { + t.Errorf("ParsePoints string mismatch.\ngot: %v\nexp: %v", pts[0].String(), line) + } +} + +func TestParsePointNoValue(t *testing.T) { + pts, err := models.ParsePointsString("") + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, "", err) + } + + if exp := 0; len(pts) != exp { + t.Errorf(`ParsePoints("%s") len mismatch. got %v, exp %v`, "", len(pts), exp) + } +} + +func TestParsePointWhitespaceValue(t *testing.T) { + pts, err := models.ParsePointsString(" ") + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, "", err) + } + + if exp := 0; len(pts) != exp { + t.Errorf(`ParsePoints("%s") len mismatch. got %v, exp %v`, "", len(pts), exp) + } +} + +func TestParsePointSingleEquals(t *testing.T) { + pts, err := models.ParsePointsString("=") + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. expected error`, "=") + } + + if exp := 0; len(pts) != exp { + t.Errorf(`ParsePoints("%s") len mismatch. got %v, exp %v`, "", len(pts), exp) + } +} + +func TestParsePointNoFields(t *testing.T) { + _, err := models.ParsePointsString("cpu_load_short,host=server01,region=us-west") + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, "cpu_load_short,host=server01,region=us-west") + } + + _, err = models.ParsePointsString("cpu") + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, "cpu") + } + + _, err = models.ParsePointsString("cpu,") + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, "cpu,") + } + + _, err = models.ParsePointsString("cpu, value=1") + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, "cpu, value=1") + } + + _, err = models.ParsePointsString("cpu,,, value=1") + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, "cpu,,, value=1") + } + +} + +func TestParsePointNoTimestamp(t *testing.T) { + test(t, "cpu value=1", models.NewPoint("cpu", nil, nil, time.Unix(0, 0))) +} + +func TestParsePointMissingQuote(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA value="test`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, "cpu") + } +} + +func TestParsePointMissingTagName(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,=us-east value=1i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,=us-east value=1i`) + } + + _, err = models.ParsePointsString(`cpu,host=serverAa\,,=us-east value=1i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverAa\,,=us-east value=1i`) + } + + _, err = models.ParsePointsString(`cpu,host=serverA\,,=us-east value=1i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA\,,=us-east value=1i`) + } + + _, err = models.ParsePointsString(`cpu,host=serverA,\ =us-east value=1i`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,\ =us-east value=1i`, err) + } +} + +func TestParsePointMissingTagValue(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host value=1i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host value=1i`) + } + + _, err = models.ParsePointsString(`cpu,host=serverA,region value=1i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region value=1i`) + } + _, err = models.ParsePointsString(`cpu,host=serverA,region= value=1i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region= value=1i`) + } +} + +func TestParsePointMissingFieldName(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west =`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west =`) + } + + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west =123i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west =123i`) + } + + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west a\ =123i`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west a\ =123i`) + } + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=123i,=456i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=123i,=456i`) + } + +} + +func TestParsePointMissingFieldValue(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=`) + } + + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value= 1000000000i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value= 1000000000i`) + } + + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=,value2=1i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=,value2=1i`) + } + + _, err = models.ParsePointsString(`cpu,host=server01,region=us-west 1434055562000000000i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=server01,region=us-west 1434055562000000000i`) + } + + _, err = models.ParsePointsString(`cpu,host=server01,region=us-west value=1i,b`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=server01,region=us-west value=1i,b`) + } +} + +func TestParsePointBadNumber(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1a`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=1a`) + } + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=1ii`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=1ii`) + } + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=1.0i`) + } +} + +func TestParsePointMaxInt64(t *testing.T) { + // out of range + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=9223372036854775808i`) + exp := `unable to parse 'cpu,host=serverA,region=us-west value=9223372036854775808i': unable to parse integer 9223372036854775808: strconv.ParseInt: parsing "9223372036854775808": value out of range` + if err == nil || (err != nil && err.Error() != exp) { + t.Fatalf("Error mismatch:\nexp: %s\ngot: %v", exp, err) + } + + // max int + p, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=9223372036854775807i`) + if err != nil { + t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=9223372036854775807i`, err) + } + if exp, got := int64(9223372036854775807), p[0].Fields()["value"].(int64); exp != got { + t.Fatalf("ParsePoints Value mistmatch. \nexp: %v\ngot: %v", exp, got) + } + + // leading zeros + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=0009223372036854775807i`) + if err != nil { + t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=0009223372036854775807i`, err) + } +} + +func TestParsePointMinInt64(t *testing.T) { + // out of range + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-9223372036854775809i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=-9223372036854775809i`) + } + + // min int + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=-9223372036854775808i`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-9223372036854775808i`, err) + } + + // leading zeros + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=-0009223372036854775808i`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-0009223372036854775808i`, err) + } +} + +func TestParsePointMaxFloat64(t *testing.T) { + // out of range + _, err := models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "1"+string(maxFloat64))) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=...`) + } + + // max float + _, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, string(maxFloat64))) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=9223372036854775807`, err) + } + + // leading zeros + _, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "0000"+string(maxFloat64))) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=0009223372036854775807`, err) + } +} + +func TestParsePointMinFloat64(t *testing.T) { + // out of range + _, err := models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "-1"+string(minFloat64)[1:])) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=...`) + } + + // min float + _, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, string(minFloat64))) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=...`, err) + } + + // leading zeros + _, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "-0000000"+string(minFloat64)[1:])) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=...`, err) + } +} + +func TestParsePointNumberNonNumeric(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=.1a`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=.1a`) + } +} + +func TestParsePointNegativeWrongPlace(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=0.-1`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=0.-1`) + } +} + +func TestParsePointOnlyNegativeSign(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=-`) + } +} + +func TestParsePointFloatMultipleDecimals(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.1.1`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=1.1.1`) + } +} + +func TestParsePointInteger(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1i`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1i`, err) + } +} + +func TestParsePointNegativeInteger(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-1i`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1i`, err) + } +} + +func TestParsePointNegativeFloat(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-1.0`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1.0`, err) + } +} + +func TestParsePointFloatNoLeadingDigit(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=.1`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1.0`, err) + } +} + +func TestParsePointFloatScientific(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0e4`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0e4`, err) + } + + pts, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1e4`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0e4`, err) + } + + if pts[0].Fields()["value"] != 1e4 { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1e4`, err) + } + +} + +func TestParsePointFloatScientificDecimal(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0e-4`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0e-4`, err) + } +} + +func TestParsePointFloatNegativeScientific(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-1.0e-4`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1.0e-4`, err) + } +} + +func TestParsePointBooleanInvalid(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=a`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=a`) + } +} + +func TestParsePointScientificIntInvalid(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=9ie10`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=9ie10`) + } + + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=9e10i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=9e10i`) + } + +} + +func TestParsePointUnescape(t *testing.T) { + test(t, `foo\,bar value=1i`, + models.NewPoint( + "foo,bar", // comma in the name + models.Tags{}, + models.Fields{ + "value": 1, + }, + time.Unix(0, 0))) + + // commas in measuremnt name + test(t, `cpu\,main,regions=east\,west value=1.0`, + models.NewPoint( + "cpu,main", // comma in the name + models.Tags{ + "regions": "east,west", + }, + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // spaces in measurement name + test(t, `cpu\ load,region=east value=1.0`, + models.NewPoint( + "cpu load", // space in the name + models.Tags{ + "region": "east", + }, + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // commas in tag names + test(t, `cpu,region\,zone=east value=1.0`, + models.NewPoint("cpu", + models.Tags{ + "region,zone": "east", // comma in the tag name + }, + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // spaces in tag names + test(t, `cpu,region\ zone=east value=1.0`, + models.NewPoint("cpu", + models.Tags{ + "region zone": "east", // comma in the tag name + }, + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // commas in tag values + test(t, `cpu,regions=east\,west value=1.0`, + models.NewPoint("cpu", + models.Tags{ + "regions": "east,west", // comma in the tag value + }, + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // spaces in tag values + test(t, `cpu,regions=east\ west value=1.0`, + models.NewPoint("cpu", + models.Tags{ + "regions": "east west", // comma in the tag value + }, + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // commas in field names + test(t, `cpu,regions=east value\,ms=1.0`, + models.NewPoint("cpu", + models.Tags{ + "regions": "east", + }, + models.Fields{ + "value,ms": 1.0, // comma in the field name + }, + time.Unix(0, 0))) + + // spaces in field names + test(t, `cpu,regions=east value\ ms=1.0`, + models.NewPoint("cpu", + models.Tags{ + "regions": "east", + }, + models.Fields{ + "value ms": 1.0, // comma in the field name + }, + time.Unix(0, 0))) + + // commas in field values + test(t, `cpu,regions=east value="1,0"`, + models.NewPoint("cpu", + models.Tags{ + "regions": "east", + }, + models.Fields{ + "value": "1,0", // comma in the field value + }, + time.Unix(0, 0))) + + // random character escaped + test(t, `cpu,regions=eas\t value=1.0`, + models.NewPoint( + "cpu", + models.Tags{ + "regions": "eas\\t", + }, + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // field name using escape char. + test(t, `cpu \a=1i`, + models.NewPoint( + "cpu", + models.Tags{}, + models.Fields{ + "\\a": 1, // Left as parsed since it's not a known escape sequence. + }, + time.Unix(0, 0))) + + // measurement, tag and tag value with equals + test(t, `cpu=load,equals\=foo=tag\=value value=1i`, + models.NewPoint( + "cpu=load", // Not escaped + models.Tags{ + "equals=foo": "tag=value", // Tag and value unescaped + }, + models.Fields{ + "value": 1, + }, + time.Unix(0, 0))) + +} + +func TestParsePointWithTags(t *testing.T) { + test(t, + "cpu,host=serverA,region=us-east value=1.0 1000000000", + models.NewPoint("cpu", + models.Tags{"host": "serverA", "region": "us-east"}, + models.Fields{"value": 1.0}, time.Unix(1, 0))) +} + +func TestParsPointWithDuplicateTags(t *testing.T) { + _, err := models.ParsePoints([]byte(`cpu,host=serverA,host=serverB value=1i 1000000000`)) + if err == nil { + t.Fatalf(`ParsePoint() expected error. got nil`) + } +} + +func TestParsePointWithStringField(t *testing.T) { + test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo",str2="bar" 1000000000`, + models.NewPoint("cpu", + models.Tags{ + "host": "serverA", + "region": "us-east", + }, + models.Fields{ + "value": 1.0, + "str": "foo", + "str2": "bar", + }, + time.Unix(1, 0)), + ) + + test(t, `cpu,host=serverA,region=us-east str="foo \" bar" 1000000000`, + models.NewPoint("cpu", + models.Tags{ + "host": "serverA", + "region": "us-east", + }, + models.Fields{ + "str": `foo " bar`, + }, + time.Unix(1, 0)), + ) + +} + +func TestParsePointWithStringWithSpaces(t *testing.T) { + test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo bar" 1000000000`, + models.NewPoint( + "cpu", + models.Tags{ + "host": "serverA", + "region": "us-east", + }, + models.Fields{ + "value": 1.0, + "str": "foo bar", // spaces in string value + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointWithStringWithNewline(t *testing.T) { + test(t, "cpu,host=serverA,region=us-east value=1.0,str=\"foo\nbar\" 1000000000", + models.NewPoint( + "cpu", + models.Tags{ + "host": "serverA", + "region": "us-east", + }, + models.Fields{ + "value": 1.0, + "str": "foo\nbar", // newline in string value + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointWithStringWithCommas(t *testing.T) { + // escaped comma + test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo\,bar" 1000000000`, + models.NewPoint( + "cpu", + models.Tags{ + "host": "serverA", + "region": "us-east", + }, + models.Fields{ + "value": 1.0, + "str": `foo\,bar`, // commas in string value + }, + time.Unix(1, 0)), + ) + + // non-escaped comma + test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo,bar" 1000000000`, + models.NewPoint( + "cpu", + models.Tags{ + "host": "serverA", + "region": "us-east", + }, + models.Fields{ + "value": 1.0, + "str": "foo,bar", // commas in string value + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointQuotedMeasurement(t *testing.T) { + // non-escaped comma + test(t, `"cpu",host=serverA,region=us-east value=1.0 1000000000`, + models.NewPoint( + `"cpu"`, + models.Tags{ + "host": "serverA", + "region": "us-east", + }, + models.Fields{ + "value": 1.0, + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointQuotedTags(t *testing.T) { + test(t, `cpu,"host"="serverA",region=us-east value=1.0 1000000000`, + models.NewPoint( + "cpu", + models.Tags{ + `"host"`: `"serverA"`, + "region": "us-east", + }, + models.Fields{ + "value": 1.0, + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointsUnbalancedQuotedTags(t *testing.T) { + pts, err := models.ParsePointsString("baz,mytag=\"a x=1 1441103862125\nbaz,mytag=a z=1 1441103862126") + if err != nil { + t.Fatalf("ParsePoints failed: %v", err) + } + + if exp := 2; len(pts) != exp { + t.Fatalf("ParsePoints count mismatch. got %v, exp %v", len(pts), exp) + } + + // Expected " in the tag value + exp := models.NewPoint("baz", models.Tags{"mytag": `"a`}, + models.Fields{"x": float64(1)}, time.Unix(0, 1441103862125)) + + if pts[0].String() != exp.String() { + t.Errorf("Point mismatch:\ngot: %v\nexp: %v", pts[0].String(), exp.String()) + } + + // Expected two points to ensure we did not overscan the line + exp = models.NewPoint("baz", models.Tags{"mytag": `a`}, + models.Fields{"z": float64(1)}, time.Unix(0, 1441103862126)) + + if pts[1].String() != exp.String() { + t.Errorf("Point mismatch:\ngot: %v\nexp: %v", pts[1].String(), exp.String()) + } + +} + +func TestParsePointEscapedStringsAndCommas(t *testing.T) { + // non-escaped comma and quotes + test(t, `cpu,host=serverA,region=us-east value="{Hello\"{,}\" World}" 1000000000`, + models.NewPoint( + "cpu", + models.Tags{ + "host": "serverA", + "region": "us-east", + }, + models.Fields{ + "value": `{Hello"{,}" World}`, + }, + time.Unix(1, 0)), + ) + + // escaped comma and quotes + test(t, `cpu,host=serverA,region=us-east value="{Hello\"{\,}\" World}" 1000000000`, + models.NewPoint( + "cpu", + models.Tags{ + "host": "serverA", + "region": "us-east", + }, + models.Fields{ + "value": `{Hello"{\,}" World}`, + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointWithStringWithEquals(t *testing.T) { + test(t, `cpu,host=serverA,region=us-east str="foo=bar",value=1.0 1000000000`, + models.NewPoint( + "cpu", + models.Tags{ + "host": "serverA", + "region": "us-east", + }, + models.Fields{ + "value": 1.0, + "str": "foo=bar", // spaces in string value + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointWithStringWithBackslash(t *testing.T) { + test(t, `cpu value="test\\\"" 1000000000`, + models.NewPoint( + "cpu", + models.Tags{}, + models.Fields{ + "value": `test\"`, + }, + time.Unix(1, 0)), + ) + + test(t, `cpu value="test\\" 1000000000`, + models.NewPoint( + "cpu", + models.Tags{}, + models.Fields{ + "value": `test\`, + }, + time.Unix(1, 0)), + ) + + test(t, `cpu value="test\\\"" 1000000000`, + models.NewPoint( + "cpu", + models.Tags{}, + models.Fields{ + "value": `test\"`, + }, + time.Unix(1, 0)), + ) + + test(t, `cpu value="test\"" 1000000000`, + models.NewPoint( + "cpu", + models.Tags{}, + models.Fields{ + "value": `test"`, + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointWithBoolField(t *testing.T) { + test(t, `cpu,host=serverA,region=us-east true=true,t=t,T=T,TRUE=TRUE,True=True,false=false,f=f,F=F,FALSE=FALSE,False=False 1000000000`, + models.NewPoint( + "cpu", + models.Tags{ + "host": "serverA", + "region": "us-east", + }, + models.Fields{ + "t": true, + "T": true, + "true": true, + "True": true, + "TRUE": true, + "f": false, + "F": false, + "false": false, + "False": false, + "FALSE": false, + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointUnicodeString(t *testing.T) { + test(t, `cpu,host=serverA,region=us-east value="wè" 1000000000`, + models.NewPoint( + "cpu", + models.Tags{ + "host": "serverA", + "region": "us-east", + }, + models.Fields{ + "value": "wè", + }, + time.Unix(1, 0)), + ) +} + +func TestNewPointFloatWithoutDecimal(t *testing.T) { + test(t, `cpu value=1 1000000000`, + models.NewPoint( + "cpu", + models.Tags{}, + models.Fields{ + "value": 1.0, + }, + time.Unix(1, 0)), + ) +} +func TestNewPointNegativeFloat(t *testing.T) { + test(t, `cpu value=-0.64 1000000000`, + models.NewPoint( + "cpu", + models.Tags{}, + models.Fields{ + "value": -0.64, + }, + time.Unix(1, 0)), + ) +} + +func TestNewPointFloatNoDecimal(t *testing.T) { + test(t, `cpu value=1. 1000000000`, + models.NewPoint( + "cpu", + models.Tags{}, + models.Fields{ + "value": 1.0, + }, + time.Unix(1, 0)), + ) +} + +func TestNewPointFloatScientific(t *testing.T) { + test(t, `cpu value=6.632243e+06 1000000000`, + models.NewPoint( + "cpu", + models.Tags{}, + models.Fields{ + "value": float64(6632243), + }, + time.Unix(1, 0)), + ) +} + +func TestNewPointLargeInteger(t *testing.T) { + test(t, `cpu value=6632243i 1000000000`, + models.NewPoint( + "cpu", + models.Tags{}, + models.Fields{ + "value": 6632243, // if incorrectly encoded as a float, it would show up as 6.632243e+06 + }, + time.Unix(1, 0)), + ) +} + +func TestNewPointNaN(t *testing.T) { + test(t, `cpu value=NaN 1000000000`, + models.NewPoint( + "cpu", + models.Tags{}, + models.Fields{ + "value": math.NaN(), + }, + time.Unix(1, 0)), + ) + + test(t, `cpu value=nAn 1000000000`, + models.NewPoint( + "cpu", + models.Tags{}, + models.Fields{ + "value": math.NaN(), + }, + time.Unix(1, 0)), + ) + + test(t, `nan value=NaN`, + models.NewPoint( + "nan", + models.Tags{}, + models.Fields{ + "value": math.NaN(), + }, + time.Unix(0, 0)), + ) + +} + +func TestNewPointLargeNumberOfTags(t *testing.T) { + tags := "" + for i := 0; i < 255; i++ { + tags += fmt.Sprintf(",tag%d=value%d", i, i) + } + + pt, err := models.ParsePointsString(fmt.Sprintf("cpu%s value=1", tags)) + if err != nil { + t.Fatalf("ParsePoints() with max tags failed: %v", err) + } + + if len(pt[0].Tags()) != 255 { + t.Fatalf("ParsePoints() with max tags failed: %v", err) + } +} + +func TestParsePointIntsFloats(t *testing.T) { + pts, err := models.ParsePoints([]byte(`cpu,host=serverA,region=us-east int=10i,float=11.0,float2=12.1 1000000000`)) + if err != nil { + t.Fatalf(`ParsePoints() failed. got %s`, err) + } + + if exp := 1; len(pts) != exp { + t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp) + } + pt := pts[0] + + if _, ok := pt.Fields()["int"].(int64); !ok { + t.Errorf("ParsePoint() int field mismatch: got %T, exp %T", pt.Fields()["int"], int64(10)) + } + + if _, ok := pt.Fields()["float"].(float64); !ok { + t.Errorf("ParsePoint() float field mismatch: got %T, exp %T", pt.Fields()["float64"], float64(11.0)) + } + + if _, ok := pt.Fields()["float2"].(float64); !ok { + t.Errorf("ParsePoint() float field mismatch: got %T, exp %T", pt.Fields()["float64"], float64(12.1)) + } + +} + +func TestParsePointKeyUnsorted(t *testing.T) { + pts, err := models.ParsePoints([]byte("cpu,last=1,first=2 value=1i")) + if err != nil { + t.Fatalf(`ParsePoints() failed. got %s`, err) + } + + if exp := 1; len(pts) != exp { + t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp) + } + pt := pts[0] + + if exp := "cpu,first=2,last=1"; string(pt.Key()) != exp { + t.Errorf("ParsePoint key not sorted. got %v, exp %v", pt.Key(), exp) + } +} + +func TestParsePointToString(t *testing.T) { + line := `cpu,host=serverA,region=us-east bool=false,float=11,float2=12.123,int=10i,str="string val" 1000000000` + pts, err := models.ParsePoints([]byte(line)) + if err != nil { + t.Fatalf(`ParsePoints() failed. got %s`, err) + } + if exp := 1; len(pts) != exp { + t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp) + } + pt := pts[0] + + got := pt.String() + if line != got { + t.Errorf("ParsePoint() to string mismatch:\n got %v\n exp %v", got, line) + } + + pt = models.NewPoint("cpu", models.Tags{"host": "serverA", "region": "us-east"}, + models.Fields{"int": 10, "float": float64(11.0), "float2": float64(12.123), "bool": false, "str": "string val"}, + time.Unix(1, 0)) + + got = pt.String() + if line != got { + t.Errorf("NewPoint() to string mismatch:\n got %v\n exp %v", got, line) + } +} + +func TestParsePointsWithPrecision(t *testing.T) { + tests := []struct { + name string + line string + precision string + exp string + }{ + { + name: "nanosecond by default", + line: `cpu,host=serverA,region=us-east value=1.0 946730096789012345`, + precision: "", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + }, + { + name: "nanosecond", + line: `cpu,host=serverA,region=us-east value=1.0 946730096789012345`, + precision: "n", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + }, + { + name: "microsecond", + line: `cpu,host=serverA,region=us-east value=1.0 946730096789012`, + precision: "u", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012000", + }, + { + name: "millisecond", + line: `cpu,host=serverA,region=us-east value=1.0 946730096789`, + precision: "ms", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789000000", + }, + { + name: "second", + line: `cpu,host=serverA,region=us-east value=1.0 946730096`, + precision: "s", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096000000000", + }, + { + name: "minute", + line: `cpu,host=serverA,region=us-east value=1.0 15778834`, + precision: "m", + exp: "cpu,host=serverA,region=us-east value=1.0 946730040000000000", + }, + { + name: "hour", + line: `cpu,host=serverA,region=us-east value=1.0 262980`, + precision: "h", + exp: "cpu,host=serverA,region=us-east value=1.0 946728000000000000", + }, + } + for _, test := range tests { + pts, err := models.ParsePointsWithPrecision([]byte(test.line), time.Now().UTC(), test.precision) + if err != nil { + t.Fatalf(`%s: ParsePoints() failed. got %s`, test.name, err) + } + if exp := 1; len(pts) != exp { + t.Errorf("%s: ParsePoint() len mismatch: got %v, exp %v", test.name, len(pts), exp) + } + pt := pts[0] + + got := pt.String() + if got != test.exp { + t.Errorf("%s: ParsePoint() to string mismatch:\n got %v\n exp %v", test.name, got, test.exp) + } + } +} + +func TestParsePointsWithPrecisionNoTime(t *testing.T) { + line := `cpu,host=serverA,region=us-east value=1.0` + tm, _ := time.Parse(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z") + tests := []struct { + name string + precision string + exp string + }{ + { + name: "no precision", + precision: "", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + }, + { + name: "nanosecond precision", + precision: "n", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + }, + { + name: "microsecond precision", + precision: "u", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012000", + }, + { + name: "millisecond precision", + precision: "ms", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789000000", + }, + { + name: "second precision", + precision: "s", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096000000000", + }, + { + name: "minute precision", + precision: "m", + exp: "cpu,host=serverA,region=us-east value=1.0 946730040000000000", + }, + { + name: "hour precision", + precision: "h", + exp: "cpu,host=serverA,region=us-east value=1.0 946728000000000000", + }, + } + + for _, test := range tests { + pts, err := models.ParsePointsWithPrecision([]byte(line), tm, test.precision) + if err != nil { + t.Fatalf(`%s: ParsePoints() failed. got %s`, test.name, err) + } + if exp := 1; len(pts) != exp { + t.Errorf("%s: ParsePoint() len mismatch: got %v, exp %v", test.name, len(pts), exp) + } + pt := pts[0] + + got := pt.String() + if got != test.exp { + t.Errorf("%s: ParsePoint() to string mismatch:\n got %v\n exp %v", test.name, got, test.exp) + } + } +} + +func TestParsePointsWithPrecisionComments(t *testing.T) { + tests := []struct { + name string + batch string + exp string + lenPoints int + }{ + { + name: "comment only", + batch: `# comment only`, + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + lenPoints: 0, + }, + { + name: "point with comment above", + batch: `# a point is below +cpu,host=serverA,region=us-east value=1.0 946730096789012345`, + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + lenPoints: 1, + }, + { + name: "point with comment below", + batch: `cpu,host=serverA,region=us-east value=1.0 946730096789012345 +# end of points`, + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + lenPoints: 1, + }, + { + name: "indented comment", + batch: ` # a point is below +cpu,host=serverA,region=us-east value=1.0 946730096789012345`, + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + lenPoints: 1, + }, + } + for _, test := range tests { + pts, err := models.ParsePointsWithPrecision([]byte(test.batch), time.Now().UTC(), "") + if err != nil { + t.Fatalf(`%s: ParsePoints() failed. got %s`, test.name, err) + } + pointsLength := len(pts) + if exp := test.lenPoints; pointsLength != exp { + t.Errorf("%s: ParsePoint() len mismatch: got %v, exp %v", test.name, pointsLength, exp) + } + + if pointsLength > 0 { + pt := pts[0] + + got := pt.String() + if got != test.exp { + t.Errorf("%s: ParsePoint() to string mismatch:\n got %v\n exp %v", test.name, got, test.exp) + } + } + } +} + +func TestNewPointEscaped(t *testing.T) { + // commas + pt := models.NewPoint("cpu,main", models.Tags{"tag,bar": "value"}, models.Fields{"name,bar": 1.0}, time.Unix(0, 0)) + if exp := `cpu\,main,tag\,bar=value name\,bar=1 0`; pt.String() != exp { + t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) + } + + // spaces + pt = models.NewPoint("cpu main", models.Tags{"tag bar": "value"}, models.Fields{"name bar": 1.0}, time.Unix(0, 0)) + if exp := `cpu\ main,tag\ bar=value name\ bar=1 0`; pt.String() != exp { + t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) + } + + // equals + pt = models.NewPoint("cpu=main", models.Tags{"tag=bar": "value=foo"}, models.Fields{"name=bar": 1.0}, time.Unix(0, 0)) + if exp := `cpu=main,tag\=bar=value\=foo name\=bar=1 0`; pt.String() != exp { + t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) + } +} + +func TestNewPointUnhandledType(t *testing.T) { + // nil value + pt := models.NewPoint("cpu", nil, models.Fields{"value": nil}, time.Unix(0, 0)) + if exp := `cpu value= 0`; pt.String() != exp { + t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) + } + + // unsupported type gets stored as string + now := time.Unix(0, 0).UTC() + pt = models.NewPoint("cpu", nil, models.Fields{"value": now}, time.Unix(0, 0)) + if exp := `cpu value="1970-01-01 00:00:00 +0000 UTC" 0`; pt.String() != exp { + t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) + } + + if exp := "1970-01-01 00:00:00 +0000 UTC"; pt.Fields()["value"] != exp { + t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) + } +} + +func TestMakeKeyEscaped(t *testing.T) { + if exp, got := `cpu\ load`, models.MakeKey([]byte(`cpu\ load`), models.Tags{}); string(got) != exp { + t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) + } + + if exp, got := `cpu\ load`, models.MakeKey([]byte(`cpu load`), models.Tags{}); string(got) != exp { + t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) + } + + if exp, got := `cpu\,load`, models.MakeKey([]byte(`cpu\,load`), models.Tags{}); string(got) != exp { + t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) + } + + if exp, got := `cpu\,load`, models.MakeKey([]byte(`cpu,load`), models.Tags{}); string(got) != exp { + t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) + } + +} + +func TestPrecisionString(t *testing.T) { + tags := map[string]interface{}{"value": float64(1)} + tm, _ := time.Parse(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z") + tests := []struct { + name string + precision string + exp string + }{ + { + name: "no precision", + precision: "", + exp: "cpu value=1 946730096789012345", + }, + { + name: "nanosecond precision", + precision: "ns", + exp: "cpu value=1 946730096789012345", + }, + { + name: "microsecond precision", + precision: "u", + exp: "cpu value=1 946730096789012", + }, + { + name: "millisecond precision", + precision: "ms", + exp: "cpu value=1 946730096789", + }, + { + name: "second precision", + precision: "s", + exp: "cpu value=1 946730096", + }, + { + name: "minute precision", + precision: "m", + exp: "cpu value=1 15778834", + }, + { + name: "hour precision", + precision: "h", + exp: "cpu value=1 262980", + }, + } + + for _, test := range tests { + pt := models.NewPoint("cpu", nil, tags, tm) + act := pt.PrecisionString(test.precision) + + if act != test.exp { + t.Errorf("%s: PrecisionString() mismatch:\n actual: %v\n exp: %v", + test.name, act, test.exp) + } + } +} diff --git a/_third_party/github.com/influxdb/influxdb/models/rows.go b/_third_party/github.com/influxdb/influxdb/models/rows.go new file mode 100644 index 0000000000..a4350faa7a --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/models/rows.go @@ -0,0 +1,59 @@ +package models + +import ( + "hash/fnv" + "sort" +) + +// Row represents a single row returned from the execution of a statement. +type Row struct { + Name string `json:"name,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Columns []string `json:"columns,omitempty"` + Values [][]interface{} `json:"values,omitempty"` + Err error `json:"err,omitempty"` +} + +// SameSeries returns true if r contains values for the same series as o. +func (r *Row) SameSeries(o *Row) bool { + return r.tagsHash() == o.tagsHash() && r.Name == o.Name +} + +// tagsHash returns a hash of tag key/value pairs. +func (r *Row) tagsHash() uint64 { + h := fnv.New64a() + keys := r.tagsKeys() + for _, k := range keys { + h.Write([]byte(k)) + h.Write([]byte(r.Tags[k])) + } + return h.Sum64() +} + +// tagKeys returns a sorted list of tag keys. +func (r *Row) tagsKeys() []string { + a := make([]string, 0, len(r.Tags)) + for k := range r.Tags { + a = append(a, k) + } + sort.Strings(a) + return a +} + +type Rows []*Row + +func (p Rows) Len() int { return len(p) } + +func (p Rows) Less(i, j int) bool { + // Sort by name first. + if p[i].Name != p[j].Name { + return p[i].Name < p[j].Name + } + + // Sort by tag set hash. Tags don't have a meaningful sort order so we + // just compute a hash and sort by that instead. This allows the tests + // to receive rows in a predictable order every time. + return p[i].tagsHash() < p[j].tagsHash() +} + +func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/_third_party/github.com/influxdb/influxdb/nightly.sh b/_third_party/github.com/influxdb/influxdb/nightly.sh new file mode 100755 index 0000000000..2114b4618a --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/nightly.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# Bump this whenever a release branch is created from master +MASTER_VERSION=0.9.5 + +REPO_DIR=`mktemp -d` +echo "Using $REPO_DIR for all work..." + +cd $REPO_DIR +export GOPATH=`pwd` +mkdir -p $GOPATH/src/github.com/influxdb +cd $GOPATH/src/github.com/influxdb +git clone https://github.com/influxdb/influxdb.git + +cd $GOPATH/src/github.com/influxdb/influxdb +NIGHTLY_BUILD=true ./package.sh $MASTER_VERSION-nightly-`git log --pretty=format:'%h' -n 1` +rm -rf $REPO_DIR diff --git a/_third_party/github.com/influxdb/influxdb/package.sh b/_third_party/github.com/influxdb/influxdb/package.sh new file mode 100755 index 0000000000..dd5dea77f8 --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/package.sh @@ -0,0 +1,624 @@ +#!/usr/bin/env bash + +########################################################################### +# Packaging script which creates debian and RPM packages. It optionally +# tags the repo with the given version. +# +# Requirements: GOPATH must be set. 'fpm' must be on the path, and the AWS +# CLI tools must also be installed. +# +# https://github.com/jordansissel/fpm +# http://aws.amazon.com/cli/ +# +# Packaging process: to package a build, simple execute: +# +# package.sh +# +# where is the desired version. If generation of a debian and RPM +# package is successful, the script will offer to tag the repo using the +# supplied version string. +# +# See package.sh -h for options +# +# AWS upload: the script will also offer to upload the packages to S3. If +# this option is selected, the credentials should be present in the file +# ~/aws.conf. The contents should be of the form: +# +# [default] +# aws_access_key_id= +# aws_secret_access_key= +# region = us-east-1 +# +# Trim the leading spaces when creating the file. The script will exit if +# S3 upload is requested, but this file does not exist. + +[ -z $DEBUG ] || set -x + +AWS_FILE=~/aws.conf + +INSTALL_ROOT_DIR=/opt/influxdb +INFLUXDB_LOG_DIR=/var/log/influxdb +INFLUXDB_DATA_DIR=/var/opt/influxdb +CONFIG_ROOT_DIR=/etc/opt/influxdb +LOGROTATE_DIR=/etc/logrotate.d + +SAMPLE_CONFIGURATION=etc/config.sample.toml +INITD_SCRIPT=scripts/init.sh +SYSTEMD_SCRIPT=scripts/influxdb.service +LOGROTATE=scripts/logrotate + +TMP_WORK_DIR=`mktemp -d` +POST_INSTALL_PATH=`mktemp` +POST_UNINSTALL_PATH=`mktemp` +ARCH=`uname -i` +LICENSE=MIT +URL=influxdb.com +MAINTAINER=support@influxdb.com +VENDOR=Influxdb +DESCRIPTION="Distributed time-series database" + +# Allow path to FPM to be set by environment variables. Some execution contexts +# like cron don't have PATH set correctly to pick it up. +if [ -z "$FPM" ]; then + FPM=`which fpm` +fi + +GO_VERSION="go1.5" +GOPATH_INSTALL= +BINS=( + influxd + influx + ) + +########################################################################### +# Helper functions. + +# usage prints simple usage information. +usage() { + cat << EOF >&2 +$0 [-h] [-p|-w] [-t ] [-r ] + + should be a dotted version such as 0.9.5. + + -r release candidate number, if any. + Example: -r 7 + -p just build packages + -w build packages for current working directory + imply -p + -t + build package for + can be rpm, tar or deb + can have multiple -t + + Examples: + + $0 0.9.5 -r 9 # Creates 0.9.5-rc9 + $0 0.9.4 # Creates 0.9.4 + +EOF + cleanup_exit $1 +} + +# full_version echoes the full version string, given a version and an optiona;l +# RC number. If the just the version is present, that is echoed. If the RC is +# also provided, then "rc" and the number is concatenated with the version. +# For example, 0.9.4rc4 would be returned if version was 0.9.4 and the RC number +# was 4. +full_version() { + version=$1 + rc=$2 + if [ -z "$rc" ]; then + echo $version + else + echo ${version}-rc${rc} + fi +} + +# rpm_release echoes the RPM release or "iteration" given an RC number. +rpm_release() { + rc=$1 + if [ -z "$rc" ]; then + echo 1 + else + echo 0.1.rc${rc} + fi +} + +# cleanup_exit removes all resources created during the process and exits with +# the supplied returned code. +cleanup_exit() { + rm -r $TMP_WORK_DIR + rm $POST_INSTALL_PATH + rm $POST_UNINSTALL_PATH + exit $1 +} + +# current_branch echos the current git branch. +current_branch() { + echo `git rev-parse --abbrev-ref HEAD` +} + +# check_gopath sanity checks the value of the GOPATH env variable, and determines +# the path where build artifacts are installed. GOPATH may be a colon-delimited +# list of directories. +check_gopath() { + [ -z "$GOPATH" ] && echo "GOPATH is not set." && cleanup_exit 1 + GOPATH_INSTALL=`echo $GOPATH | cut -d ':' -f 1` + [ ! -d "$GOPATH_INSTALL" ] && echo "GOPATH_INSTALL is not a directory." && cleanup_exit 1 + echo "GOPATH ($GOPATH) looks sane, using $GOPATH_INSTALL for installation." +} + +check_gvm() { + if [ -n "$GOPATH" ]; then + existing_gopath=$GOPATH + fi + + source $HOME/.gvm/scripts/gvm + which gvm + if [ $? -ne 0 ]; then + echo "gvm not found -- aborting." + cleanup_exit $1 + fi + gvm use $GO_VERSION + if [ $? -ne 0 ]; then + echo "gvm cannot find Go version $GO_VERSION -- aborting." + cleanup_exit $1 + fi + + # Keep any existing GOPATH set. + if [ -n "$existing_gopath" ]; then + GOPATH=$existing_gopath + fi +} + +# check_clean_tree ensures that no source file is locally modified. +check_clean_tree() { + modified=$(git ls-files --modified | wc -l) + if [ $modified -ne 0 ]; then + echo "The source tree is not clean -- aborting." + cleanup_exit 1 + fi + echo "Git tree is clean." +} + +# update_tree ensures the tree is in-sync with the repo. +update_tree() { + git pull origin $TARGET_BRANCH + if [ $? -ne 0 ]; then + echo "Failed to pull latest code -- aborting." + cleanup_exit 1 + fi + git fetch --tags + if [ $? -ne 0 ]; then + echo "Failed to fetch tags -- aborting." + cleanup_exit 1 + fi + echo "Git tree updated successfully." +} + +# check_tag_exists checks if the existing release already exists in the tags. +check_tag_exists () { + version=$1 + git tag | grep -q "^v$version$" + if [ $? -eq 0 ]; then + echo "Proposed version $version already exists as a tag -- aborting." + cleanup_exit 1 + fi +} + +# make_dir_tree creates the directory structure within the packages. +make_dir_tree() { + work_dir=$1 + version=$2 + mkdir -p $work_dir/$INSTALL_ROOT_DIR/versions/$version/scripts + if [ $? -ne 0 ]; then + echo "Failed to create installation directory -- aborting." + cleanup_exit 1 + fi + mkdir -p $work_dir/$CONFIG_ROOT_DIR + if [ $? -ne 0 ]; then + echo "Failed to create configuration directory -- aborting." + cleanup_exit 1 + fi + mkdir -p $work_dir/$LOGROTATE_DIR + if [ $? -ne 0 ]; then + echo "Failed to create logrotate directory -- aborting." + cleanup_exit 1 + fi +} + +# do_build builds the code. The version and commit must be passed in. +do_build() { + for b in ${BINS[*]}; do + rm -f $GOPATH_INSTALL/bin/$b + done + + if [ -n "$WORKING_DIR" ]; then + STASH=`git stash create -a` + if [ $? -ne 0 ]; then + echo "WARNING: failed to stash uncommited local changes" + fi + git reset --hard + fi + + go get -u -f -d ./... + if [ $? -ne 0 ]; then + echo "WARNING: failed to 'go get' packages." + fi + + git checkout $TARGET_BRANCH # go get switches to master, so ensure we're back. + + if [ -n "$WORKING_DIR" ]; then + git stash apply $STASH + if [ $? -ne 0 ]; then #and apply previous uncommited local changes + echo "WARNING: failed to restore uncommited local changes" + fi + fi + + version=$1 + commit=`git rev-parse HEAD` + branch=`current_branch` + if [ $? -ne 0 ]; then + echo "Unable to retrieve current commit -- aborting" + cleanup_exit 1 + fi + + date=`date -u --iso-8601=seconds` + go install -a -ldflags="-X main.version=$version -X main.branch=$branch -X main.commit=$commit -X main.buildTime='$date'" ./... + if [ $? -ne 0 ]; then + echo "Build failed, unable to create package -- aborting" + cleanup_exit 1 + fi + echo "Build completed successfully." +} + +# generate_postinstall_script creates the post-install script for the +# package. It must be passed the version. +generate_postinstall_script() { + version=$1 + cat <$POST_INSTALL_PATH +#!/bin/sh +rm -f $INSTALL_ROOT_DIR/influxd +rm -f $INSTALL_ROOT_DIR/influx +rm -f $INSTALL_ROOT_DIR/init.sh +ln -s $INSTALL_ROOT_DIR/versions/$version/influxd $INSTALL_ROOT_DIR/influxd +ln -s $INSTALL_ROOT_DIR/versions/$version/influx $INSTALL_ROOT_DIR/influx +ln -s $INSTALL_ROOT_DIR/versions/$version/scripts/init.sh $INSTALL_ROOT_DIR/init.sh + +if ! id influxdb >/dev/null 2>&1; then + useradd --system -U -M influxdb +fi + +# Systemd +if which systemctl > /dev/null 2>&1 ; then + cp $INSTALL_ROOT_DIR/versions/$version/scripts/influxdb.service \ + /lib/systemd/system/influxdb.service + systemctl enable influxdb + +# Sysv +else + rm -f /etc/init.d/influxdb + ln -sfn $INSTALL_ROOT_DIR/init.sh /etc/init.d/influxdb + chmod +x /etc/init.d/influxdb + if which update-rc.d > /dev/null 2>&1 ; then + update-rc.d -f influxdb remove + update-rc.d influxdb defaults + else + chkconfig --add influxdb + fi +fi + +chown -R -L influxdb:influxdb $INSTALL_ROOT_DIR +chmod -R a+rX $INSTALL_ROOT_DIR + +mkdir -p $INFLUXDB_LOG_DIR +chown -R -L influxdb:influxdb $INFLUXDB_LOG_DIR +mkdir -p $INFLUXDB_DATA_DIR +chown -R -L influxdb:influxdb $INFLUXDB_DATA_DIR +EOF + echo "Post-install script created successfully at $POST_INSTALL_PATH" +} + +generate_postuninstall_script() { + cat << EOF >$POST_UNINSTALL_PATH +#!/bin/sh +if [ -d $INSTALL_ROOT_DIR ]; then + rm -r "$INSTALL_ROOT_DIR" +fi + +EOF + echo "Post-uninstall script created successfully at $POST_UNINSTALL_PATH" +} + +########################################################################### +# Process options +while : +do + case $1 in + -h | --help) + usage 0 + ;; + + -p | --packages-only) + PACKAGES_ONLY="PACKAGES_ONLY" + shift + ;; + + -t | --target) + case "$2" in + 'tar') TAR_WANTED="gz" + ;; + 'deb') DEB_WANTED="deb" + ;; + 'rpm') RPM_WANTED="rpm" + ;; + *) + echo "Unknown target distribution $2" + usage 1 + ;; + esac + shift 2 + ;; + + -r) + RC=$2 + if [ -z "$RC" ]; then + echo "RC number required" + fi + shift 2 + ;; + + -w | --working-directory) + PACKAGES_ONLY="PACKAGES_ONLY" + WORKING_DIR="WORKING_DIR" + shift + ;; + + -*) + echo "Unknown option $1" + usage 1 + ;; + + ?*) + if [ -z $VERSION ]; then + VERSION=$1 + shift + else + echo "$1 : aborting version already set to $VERSION" + usage 1 + fi + + echo $VERSION | grep -i '[r|c]' 2>&1 >/dev/null + if [ $? -ne 1 -a -z "$NIGHTLY_BUILD" ]; then + echo + echo "$VERSION contains reference to RC - specify RC separately" + echo + usage 1 + fi + ;; + + *) break + esac +done + +if [ -z "$DEB_WANTED$RPM_WANTED$TAR_WANTED" ]; then + TAR_WANTED="gz" + DEB_WANTED="deb" + RPM_WANTED="rpm" +fi + +if [ -z "$VERSION" ]; then + echo -e "Missing version" + usage 1 +fi + +########################################################################### +# Start the packaging process. + +echo -e "\nStarting package process...\n" + +# Ensure the current is correct. +TARGET_BRANCH=`current_branch` +if [ -z "$NIGHTLY_BUILD" -a -z "$PACKAGES_ONLY" ]; then +echo -n "Current branch is $TARGET_BRANCH. Start packaging this branch? [Y/n] " + read response + response=`echo $response | tr 'A-Z' 'a-z'` + if [ "x$response" == "xn" ]; then + echo "Packaging aborted." + cleanup_exit 1 + fi +fi + +check_gvm +check_gopath +if [ -z "$NIGHTLY_BUILD" -a -z "$PACKAGES_ONLY" ]; then + check_clean_tree + update_tree + check_tag_exists `full_version $VERSION $RC` +fi + +do_build `full_version $VERSION $RC` +make_dir_tree $TMP_WORK_DIR `full_version $VERSION $RC` + +########################################################################### +# Copy the assets to the installation directories. + +for b in ${BINS[*]}; do + cp $GOPATH_INSTALL/bin/$b $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/`full_version $VERSION $RC` + if [ $? -ne 0 ]; then + echo "Failed to copy binaries to packaging directory -- aborting." + cleanup_exit 1 + fi +done +echo "${BINS[*]} copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/`full_version $VERSION $RC`" + +cp $INITD_SCRIPT $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/`full_version $VERSION $RC`/scripts +if [ $? -ne 0 ]; then + echo "Failed to copy init.d script to packaging directory -- aborting." + cleanup_exit 1 +fi +echo "$INITD_SCRIPT copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/`full_version $VERSION $RC`/scripts" + +cp $SYSTEMD_SCRIPT $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/`full_version $VERSION $RC`/scripts +if [ $? -ne 0 ]; then + echo "Failed to copy systemd script to packaging directory -- aborting." + cleanup_exit 1 +fi +echo "$SYSTEMD_SCRIPT copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/`full_version $VERSION $RC`/scripts" + +cp $SAMPLE_CONFIGURATION $TMP_WORK_DIR/$CONFIG_ROOT_DIR/influxdb.conf +if [ $? -ne 0 ]; then + echo "Failed to copy $SAMPLE_CONFIGURATION to packaging directory -- aborting." + cleanup_exit 1 +fi + +cp $LOGROTATE $TMP_WORK_DIR/$LOGROTATE_DIR/influxd +if [ $? -ne 0 ]; then + echo "Failed to copy logrotate configuration to packaging directory -- aborting." + cleanup_exit 1 +fi + +generate_postinstall_script `full_version $VERSION $RC` +generate_postuninstall_script + +########################################################################### +# Create the actual packages. + +if [ -z "$NIGHTLY_BUILD" -a -z "$PACKAGES_ONLY" ]; then + echo -n "Commence creation of $ARCH packages, version `full_version $VERSION $RC`? [Y/n] " + read response + response=`echo $response | tr 'A-Z' 'a-z'` + if [ "x$response" == "xn" ]; then + echo "Packaging aborted." + cleanup_exit 1 + fi +fi + +if [ $ARCH == "i386" ]; then + rpm_package=influxdb-${VERSION}-1.i686.rpm # RPM packages use 1 for default package release. + debian_package=influxdb_`full_version $VERSION $RC`_i686.deb + deb_args="-a i686" + rpm_args="setarch i686" +elif [ $ARCH == "arm" ]; then + rpm_package=influxdb-${VERSION}-1.armel.rpm + debian_package=influxdb_`full_version $VERSION $RC`_armel.deb +else + rpm_package=influxdb-${VERSION}-1.x86_64.rpm + debian_package=influxdb_`full_version $VERSION $RC`_amd64.deb +fi + +COMMON_FPM_ARGS="\ +--log error \ +-C $TMP_WORK_DIR \ +--vendor $VENDOR \ +--url $URL \ +--license $LICENSE \ +--maintainer $MAINTAINER \ +--after-install $POST_INSTALL_PATH \ +--after-remove $POST_UNINSTALL_PATH \ +--name influxdb \ +--description "$DESCRIPTION" \ +--config-files $CONFIG_ROOT_DIR \ +--config-files $LOGROTATE_DIR" + +if [ -n "$DEB_WANTED" ]; then + $FPM -s dir -t deb $deb_args $COMMON_FPM_ARGS --version `full_version $VERSION $RC` . + if [ $? -ne 0 ]; then + echo "Failed to create Debian package -- aborting." + cleanup_exit 1 + fi + echo "Debian package created successfully." +fi + +if [ -n "$TAR_WANTED" ]; then + $FPM -s dir -t tar --prefix influxdb_`full_version $VERSION $RC`_${ARCH} -p influxdb_`full_version $VERSION $RC`_${ARCH}.tar.gz $COMMON_FPM_ARGS --version `full_version $VERSION $RC ` . + if [ $? -ne 0 ]; then + echo "Failed to create Tar package -- aborting." + cleanup_exit 1 + fi + echo "Tar package created successfully." +fi + +if [ -n "$RPM_WANTED" ]; then + $rpm_args $FPM -s dir -t rpm $COMMON_FPM_ARGS --depends coreutils --version $VERSION --iteration `rpm_release $RC` . + if [ $? -ne 0 ]; then + echo "Failed to create RPM package -- aborting." + cleanup_exit 1 + fi + echo "RPM package created successfully." +fi + +########################################################################### +# Offer to tag the repo. + +if [ -z "$NIGHTLY_BUILD" -a -z "$PACKAGES_ONLY" ]; then + echo -n "Tag source tree with v`full_version $VERSION $RC` and push to repo? [y/N] " + read response + response=`echo $response | tr 'A-Z' 'a-z'` + if [ "x$response" == "xy" ]; then + echo "Creating tag v`full_version $VERSION $RC` and pushing to repo" + git tag v`full_version $VERSION $RC` + if [ $? -ne 0 ]; then + echo "Failed to create tag v`full_version $VERSION $RC` -- aborting" + cleanup_exit 1 + fi + echo "Tag v`full_version $VERSION $RC` created" + git push origin v`full_version $VERSION $RC` + if [ $? -ne 0 ]; then + echo "Failed to push tag v`full_version $VERSION $RC` to repo -- aborting" + cleanup_exit 1 + fi + echo "Tag v`full_version $VERSION $RC` pushed to repo" + else + echo "Not creating tag v`full_version $VERSION $RC`." + fi +fi + +########################################################################### +# Offer to publish the packages. + +if [ -z "$NIGHTLY_BUILD" -a -z "$PACKAGES_ONLY" ]; then + echo -n "Publish packages to S3? [y/N] " + read response + response=`echo $response | tr 'A-Z' 'a-z'` +fi + +if [ "x$response" == "xy" -o -n "$NIGHTLY_BUILD" ]; then + echo "Publishing packages to S3." + if [ ! -e "$AWS_FILE" ]; then + echo "$AWS_FILE does not exist -- aborting." + cleanup_exit 1 + fi + + for filepath in `ls *.{$DEB_WANTED,$RPM_WANTED,$TAR_WANTED} 2> /dev/null`; do + filename=`basename $filepath` + + if [ -n "$NIGHTLY_BUILD" ]; then + # Replace the version string in the filename with "nightly". + v=`full_version $VERSION $RC` + v_underscored=`echo "$v" | tr - _` + v_rpm=$VERSION-`rpm_release $RC` + + # It's ok to run each of these since only 1 will match, leaving + # filename untouched otherwise. + filename=`echo $filename | sed s/$v/nightly/` + filename=`echo $filename | sed s/$v_underscored/nightly/` + filename=`echo $filename | sed s/$v_rpm/nightly-1/` + fi + + AWS_CONFIG_FILE=$AWS_FILE aws s3 cp $filepath s3://influxdb/$filename --acl public-read --region us-east-1 + if [ $? -ne 0 ]; then + echo "Upload failed ($filename) -- aborting". + cleanup_exit 1 + fi + echo "$filename uploaded" + done +else + echo "Not publishing packages to S3." +fi + +########################################################################### +# All done. + +echo -e "\nPackaging process complete." +cleanup_exit 0 diff --git a/_third_party/github.com/influxdb/influxdb/pkg/escape/bytes.go b/_third_party/github.com/influxdb/influxdb/pkg/escape/bytes.go new file mode 100644 index 0000000000..15e9cf29d5 --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/pkg/escape/bytes.go @@ -0,0 +1,45 @@ +package escape + +import "bytes" + +func Bytes(in []byte) []byte { + for b, esc := range Codes { + in = bytes.Replace(in, []byte{b}, esc, -1) + } + return in +} + +func Unescape(in []byte) []byte { + i := 0 + inLen := len(in) + var out []byte + + for { + if i >= inLen { + break + } + if in[i] == '\\' && i+1 < inLen { + switch in[i+1] { + case ',': + out = append(out, ',') + i += 2 + continue + case '"': + out = append(out, '"') + i += 2 + continue + case ' ': + out = append(out, ' ') + i += 2 + continue + case '=': + out = append(out, '=') + i += 2 + continue + } + } + out = append(out, in[i]) + i += 1 + } + return out +} diff --git a/_third_party/github.com/influxdb/influxdb/pkg/escape/strings.go b/_third_party/github.com/influxdb/influxdb/pkg/escape/strings.go new file mode 100644 index 0000000000..330fbf4226 --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/pkg/escape/strings.go @@ -0,0 +1,34 @@ +package escape + +import "strings" + +var ( + Codes = map[byte][]byte{ + ',': []byte(`\,`), + '"': []byte(`\"`), + ' ': []byte(`\ `), + '=': []byte(`\=`), + } + + codesStr = map[string]string{} +) + +func init() { + for k, v := range Codes { + codesStr[string(k)] = string(v) + } +} + +func UnescapeString(in string) string { + for b, esc := range codesStr { + in = strings.Replace(in, esc, b, -1) + } + return in +} + +func String(in string) string { + for b, esc := range codesStr { + in = strings.Replace(in, b, esc, -1) + } + return in +} diff --git a/_third_party/github.com/influxdb/influxdb/pkg/slices/strings.go b/_third_party/github.com/influxdb/influxdb/pkg/slices/strings.go new file mode 100644 index 0000000000..16d6a13f74 --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/pkg/slices/strings.go @@ -0,0 +1,37 @@ +package slices + +import "strings" + +func Union(setA, setB []string, ignoreCase bool) []string { + for _, b := range setB { + if ignoreCase { + if !ExistsIgnoreCase(setA, b) { + setA = append(setA, b) + } + continue + } + if !Exists(setA, b) { + setA = append(setA, b) + } + } + return setA +} + +func Exists(set []string, find string) bool { + for _, s := range set { + if s == find { + return true + } + } + return false +} + +func ExistsIgnoreCase(set []string, find string) bool { + find = strings.ToLower(find) + for _, s := range set { + if strings.ToLower(s) == find { + return true + } + } + return false +} diff --git a/_third_party/github.com/influxdb/influxdb/test-32bit-docker.sh b/_third_party/github.com/influxdb/influxdb/test-32bit-docker.sh new file mode 100755 index 0000000000..145ee37676 --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/test-32bit-docker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +docker build -f Dockerfile_test_ubuntu32 -t ubuntu-32-influxdb-test . +docker run -v $(pwd):/root/go/src/github.com/influxdb/influxdb -t ubuntu-32-influxdb-test bash -c "cd /root/go/src/github.com/influxdb/influxdb && go get -t -d -v ./... && go build -v ./... && go test -v ./..." diff --git a/_third_party/github.com/influxdb/influxdb/tsdb/batcher.go b/_third_party/github.com/influxdb/influxdb/tsdb/batcher.go index aefbea723e..940b96cbfe 100644 --- a/_third_party/github.com/influxdb/influxdb/tsdb/batcher.go +++ b/_third_party/github.com/influxdb/influxdb/tsdb/batcher.go @@ -4,6 +4,8 @@ import ( "sync" "sync/atomic" "time" + + "bosun.org/_third_party/github.com/influxdb/influxdb/models" ) // PointBatcher accepts Points and will emit a batch of those points when either @@ -15,21 +17,24 @@ type PointBatcher struct { duration time.Duration stop chan struct{} - in chan Point - out chan []Point + in chan models.Point + out chan []models.Point flush chan struct{} wg *sync.WaitGroup } -// NewPointBatcher returns a new PointBatcher. -func NewPointBatcher(sz int, d time.Duration) *PointBatcher { +// NewPointBatcher returns a new PointBatcher. sz is the batching size, +// bp is the maximum number of batches that may be pending. d is the time +// after which a batch will be emitted after the first point is received +// for the batch, regardless of its size. +func NewPointBatcher(sz int, bp int, d time.Duration) *PointBatcher { return &PointBatcher{ size: sz, duration: d, stop: make(chan struct{}), - in: make(chan Point), - out: make(chan []Point), + in: make(chan models.Point, bp*sz), + out: make(chan []models.Point), flush: make(chan struct{}), } } @@ -51,7 +56,7 @@ func (b *PointBatcher) Start() { } var timer *time.Timer - var batch []Point + var batch []models.Point var timerCh <-chan time.Time emit := func() { @@ -76,7 +81,7 @@ func (b *PointBatcher) Start() { case p := <-b.in: atomic.AddUint64(&b.stats.PointTotal, 1) if batch == nil { - batch = make([]Point, 0, b.size) + batch = make([]models.Point, 0, b.size) if b.duration > 0 { timer = time.NewTimer(b.duration) timerCh = timer.C @@ -115,12 +120,12 @@ func (b *PointBatcher) Stop() { } // In returns the channel to which points should be written. -func (b *PointBatcher) In() chan<- Point { +func (b *PointBatcher) In() chan<- models.Point { return b.in } // Out returns the channel from which batches should be read. -func (b *PointBatcher) Out() <-chan []Point { +func (b *PointBatcher) Out() <-chan []models.Point { return b.out } diff --git a/_third_party/github.com/influxdb/influxdb/tsdb/batcher_test.go b/_third_party/github.com/influxdb/influxdb/tsdb/batcher_test.go index 637ce8b047..52d5478830 100644 --- a/_third_party/github.com/influxdb/influxdb/tsdb/batcher_test.go +++ b/_third_party/github.com/influxdb/influxdb/tsdb/batcher_test.go @@ -4,20 +4,44 @@ import ( "testing" "time" + "bosun.org/_third_party/github.com/influxdb/influxdb/models" "bosun.org/_third_party/github.com/influxdb/influxdb/tsdb" ) // TestBatch_Size ensures that a batcher generates a batch when the size threshold is reached. func TestBatch_Size(t *testing.T) { batchSize := 5 - batcher := tsdb.NewPointBatcher(batchSize, time.Hour) + batcher := tsdb.NewPointBatcher(batchSize, 0, time.Hour) if batcher == nil { t.Fatal("failed to create batcher for size test") } batcher.Start() - var p tsdb.Point + var p models.Point + go func() { + for i := 0; i < batchSize; i++ { + batcher.In() <- p + } + }() + batch := <-batcher.Out() + if len(batch) != batchSize { + t.Errorf("received batch has incorrect length exp %d, got %d", batchSize, len(batch)) + } + checkPointBatcherStats(t, batcher, -1, batchSize, 1, 0) +} + +// TestBatch_Size ensures that a buffered batcher generates a batch when the size threshold is reached. +func TestBatch_SizeBuffered(t *testing.T) { + batchSize := 5 + batcher := tsdb.NewPointBatcher(batchSize, 5, time.Hour) + if batcher == nil { + t.Fatal("failed to create batcher for size test") + } + + batcher.Start() + + var p models.Point go func() { for i := 0; i < batchSize; i++ { batcher.In() <- p @@ -33,14 +57,14 @@ func TestBatch_Size(t *testing.T) { // TestBatch_Size ensures that a batcher generates a batch when the timeout triggers. func TestBatch_Timeout(t *testing.T) { batchSize := 5 - batcher := tsdb.NewPointBatcher(batchSize+1, 100*time.Millisecond) + batcher := tsdb.NewPointBatcher(batchSize+1, 0, 100*time.Millisecond) if batcher == nil { t.Fatal("failed to create batcher for timeout test") } batcher.Start() - var p tsdb.Point + var p models.Point go func() { for i := 0; i < batchSize; i++ { batcher.In() <- p @@ -56,14 +80,14 @@ func TestBatch_Timeout(t *testing.T) { // TestBatch_Flush ensures that a batcher generates a batch when flushed func TestBatch_Flush(t *testing.T) { batchSize := 2 - batcher := tsdb.NewPointBatcher(batchSize, time.Hour) + batcher := tsdb.NewPointBatcher(batchSize, 0, time.Hour) if batcher == nil { t.Fatal("failed to create batcher for flush test") } batcher.Start() - var p tsdb.Point + var p models.Point go func() { batcher.In() <- p batcher.Flush() @@ -78,15 +102,15 @@ func TestBatch_Flush(t *testing.T) { // TestBatch_MultipleBatches ensures that a batcher correctly processes multiple batches. func TestBatch_MultipleBatches(t *testing.T) { batchSize := 2 - batcher := tsdb.NewPointBatcher(batchSize, 100*time.Millisecond) + batcher := tsdb.NewPointBatcher(batchSize, 0, 100*time.Millisecond) if batcher == nil { t.Fatal("failed to create batcher for size test") } batcher.Start() - var p tsdb.Point - var b []tsdb.Point + var p models.Point + var b []models.Point batcher.In() <- p batcher.In() <- p diff --git a/_third_party/github.com/influxdb/influxdb/tsdb/config.go b/_third_party/github.com/influxdb/influxdb/tsdb/config.go index e3f439caf3..9c03ebddd8 100644 --- a/_third_party/github.com/influxdb/influxdb/tsdb/config.go +++ b/_third_party/github.com/influxdb/influxdb/tsdb/config.go @@ -7,6 +7,9 @@ import ( ) const ( + // DefaultEngine is the default engine for new shards + DefaultEngine = "bz1" + // DefaultMaxWALSize is the default size of the WAL before it is flushed. DefaultMaxWALSize = 100 * 1024 * 1024 // 100MB @@ -30,7 +33,7 @@ const ( // DefaultFlushColdInterval specifies how long after a partition has been cold // for writes that a full flush and compaction are forced - DefaultFlushColdInterval = 5 * time.Minute + DefaultFlushColdInterval = 5 * time.Second // DefaultParititionSizeThreshold specifies when a partition gets to this size in // memory, we should slow down writes until it gets a chance to compact. @@ -43,7 +46,8 @@ const ( ) type Config struct { - Dir string `toml:"dir"` + Dir string `toml:"dir"` + Engine string `toml:"engine"` // WAL config options for b1 (introduced in 0.9.2) MaxWALSize int `toml:"max-wal-size"` @@ -52,25 +56,31 @@ type Config struct { // WAL configuration options for bz1 (introduced in 0.9.3) WALDir string `toml:"wal-dir"` - WALEnableLogging bool `toml:"wal-enable-logging"` + WALLoggingEnabled bool `toml:"wal-logging-enabled"` WALReadySeriesSize int `toml:"wal-ready-series-size"` WALCompactionThreshold float64 `toml:"wal-compaction-threshold"` WALMaxSeriesSize int `toml:"wal-max-series-size"` WALFlushColdInterval toml.Duration `toml:"wal-flush-cold-interval"` WALPartitionSizeThreshold uint64 `toml:"wal-partition-size-threshold"` + + // Query logging + QueryLogEnabled bool `toml:"query-log-enabled"` } func NewConfig() Config { return Config{ + Engine: DefaultEngine, MaxWALSize: DefaultMaxWALSize, WALFlushInterval: toml.Duration(DefaultWALFlushInterval), WALPartitionFlushDelay: toml.Duration(DefaultWALPartitionFlushDelay), - WALEnableLogging: true, + WALLoggingEnabled: true, WALReadySeriesSize: DefaultReadySeriesSize, WALCompactionThreshold: DefaultCompactionThreshold, WALMaxSeriesSize: DefaultMaxSeriesSize, WALFlushColdInterval: toml.Duration(DefaultFlushColdInterval), WALPartitionSizeThreshold: DefaultPartitionSizeThreshold, + + QueryLogEnabled: true, } } diff --git a/_third_party/github.com/influxdb/influxdb/tsdb/cursor.go b/_third_party/github.com/influxdb/influxdb/tsdb/cursor.go index e5c42ff1dc..bb197410ed 100644 --- a/_third_party/github.com/influxdb/influxdb/tsdb/cursor.go +++ b/_third_party/github.com/influxdb/influxdb/tsdb/cursor.go @@ -1,34 +1,50 @@ package tsdb import ( - "bytes" "container/heap" + "encoding/binary" + "sort" + "strings" + + "bosun.org/_third_party/github.com/influxdb/influxdb/influxql" ) +// EOF represents a "not found" key returned by a Cursor. +const EOF = int64(-1) + +// Cursor represents an iterator over a series. +type Cursor interface { + SeekTo(seek int64) (key int64, value interface{}) + Next() (key int64, value interface{}) + Ascending() bool +} + // MultiCursor returns a single cursor that combines the results of all cursors in order. // // If the same key is returned from multiple cursors then the first cursor // specified will take precendence. A key will only be returned once from the // returned cursor. func MultiCursor(cursors ...Cursor) Cursor { - return &multiCursor{cursors: cursors} + return &multiCursor{ + cursors: cursors, + } } // multiCursor represents a cursor that combines multiple cursors into one. type multiCursor struct { cursors []Cursor heap cursorHeap - prev []byte + prev int64 // previously read key } // Seek moves the cursor to a given key. -func (mc *multiCursor) Seek(seek []byte) (key, value []byte) { +func (mc *multiCursor) SeekTo(seek int64) (int64, interface{}) { // Initialize heap. h := make(cursorHeap, 0, len(mc.cursors)) for i, c := range mc.cursors { // Move cursor to position. Skip if it's empty. - k, v := c.Seek(seek) - if k == nil { + k, v := c.SeekTo(seek) + if k == EOF { continue } @@ -43,24 +59,32 @@ func (mc *multiCursor) Seek(seek []byte) (key, value []byte) { heap.Init(&h) mc.heap = h - mc.prev = nil + mc.prev = EOF return mc.pop() } +// Ascending returns the direction of the first cursor. +func (mc *multiCursor) Ascending() bool { + if len(mc.cursors) == 0 { + return true + } + return mc.cursors[0].Ascending() +} + // Next returns the next key/value from the cursor. -func (mc *multiCursor) Next() (key, value []byte) { return mc.pop() } +func (mc *multiCursor) Next() (int64, interface{}) { return mc.pop() } // pop returns the next item from the heap. // Reads the next key/value from item's cursor and puts it back on the heap. -func (mc *multiCursor) pop() (key, value []byte) { +func (mc *multiCursor) pop() (key int64, value interface{}) { // Read items until we have a key that doesn't match the previously read one. // This is to perform deduplication when there's multiple items with the same key. // The highest priority cursor will be read first and then remaining keys will be dropped. for { - // Return nil if there are no more items left. + // Return EOF marker if there are no more items left. if len(mc.heap) == 0 { - return nil, nil + return EOF, nil } // Read the next item from the heap. @@ -70,12 +94,12 @@ func (mc *multiCursor) pop() (key, value []byte) { key, value = item.key, item.value // Read the next item from the cursor. Push back to heap if one exists. - if item.key, item.value = item.cursor.Next(); item.key != nil { + if item.key, item.value = item.cursor.Next(); item.key != EOF { heap.Push(&mc.heap, item) } // Skip if this key matches the previously returned one. - if bytes.Equal(mc.prev, key) { + if key == mc.prev { continue } @@ -90,12 +114,16 @@ type cursorHeap []*cursorHeapItem func (h cursorHeap) Len() int { return len(h) } func (h cursorHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } func (h cursorHeap) Less(i, j int) bool { - if cmp := bytes.Compare(h[i].key, h[j].key); cmp == -1 { - return true - } else if cmp == 0 { + // Use priority if the keys are the same. + if h[i].key == h[j].key { return h[i].priority > h[j].priority } - return false + + // Otherwise compare based on cursor direction. + if h[i].cursor.Ascending() { + return h[i].key < h[j].key + } + return h[i].key > h[j].key } func (h *cursorHeap) Push(x interface{}) { @@ -112,8 +140,261 @@ func (h *cursorHeap) Pop() interface{} { // cursorHeapItem is something we manage in a priority queue. type cursorHeapItem struct { - key []byte - value []byte + key int64 + value interface{} cursor Cursor priority int } + +// TagSetCursor is virtual cursor that iterates over multiple TagsCursors. +type TagSetCursor struct { + measurement string // Measurement name + currentFields interface{} // the current decoded and selected fields for the cursor in play + tags map[string]string // Tag key-value pairs + cursors []*TagsCursor // Underlying tags cursors. + currentTags map[string]string // the current tags for the underlying series cursor in play + + SelectFields []string // fields to be selected + SelectWhereFields []string // fields in both the select and where clause to be returned or filtered on + + // Min-heap of cursors ordered by timestamp. + heap *pointHeap + + // Memoize the cursor's tagset-based key. + memokey string +} + +// NewTagSetCursor returns a instance of TagSetCursor. +func NewTagSetCursor(m string, t map[string]string, c []*TagsCursor) *TagSetCursor { + return &TagSetCursor{ + measurement: m, + tags: t, + cursors: c, + heap: newPointHeap(), + } +} + +func (tsc *TagSetCursor) key() string { + if tsc.memokey == "" { + if len(tsc.tags) == 0 { + tsc.memokey = tsc.measurement + } else { + tsc.memokey = strings.Join([]string{tsc.measurement, string(MarshalTags(tsc.tags))}, "|") + } + } + return tsc.memokey +} + +func (tsc *TagSetCursor) Init(seek int64) { + tsc.heap = newPointHeap() + + // Prime the buffers. + for i := 0; i < len(tsc.cursors); i++ { + k, v := tsc.cursors[i].SeekTo(seek) + if k == EOF { + k, v = tsc.cursors[i].Next() + } + if k == EOF { + continue + } + + heap.Push(tsc.heap, &pointHeapItem{ + timestamp: k, + value: v, + cursor: tsc.cursors[i], + }) + } +} + +// Next returns the next matching series-key, timestamp byte slice and meta tags for the tagset. Filtering +// is enforced on the values. If there is no matching value, then a nil result is returned. +func (tsc *TagSetCursor) Next(tmin, tmax int64) (int64, interface{}) { + for { + // If we're out of points, we're done. + if tsc.heap.Len() == 0 { + return -1, nil + } + + // Grab the next point with the lowest timestamp. + p := heap.Pop(tsc.heap).(*pointHeapItem) + + // We're done if the point is outside the query's time range [tmin:tmax). + if p.timestamp != tmin && (p.timestamp < tmin || p.timestamp >= tmax) { + return -1, nil + } + + // Save timestamp & value. + timestamp, value := p.timestamp, p.value + + // Keep track of all fields for series cursor so we can + // respond with them if asked + tsc.currentFields = value + + // Keep track of the current tags for the series cursor so we can + // respond with them if asked + tsc.currentTags = p.cursor.tags + + // Advance the cursor. + if nextKey, nextVal := p.cursor.Next(); nextKey != -1 { + *p = pointHeapItem{ + timestamp: nextKey, + value: nextVal, + cursor: p.cursor, + } + heap.Push(tsc.heap, p) + } + + // Value didn't match, look for the next one. + if value == nil { + continue + } + + // Filter value. + if p.cursor.filter != nil { + // Convert value to a map for filter evaluation. + m, ok := value.(map[string]interface{}) + if !ok { + m = map[string]interface{}{tsc.SelectFields[0]: value} + } + + // If filter fails then skip to the next value. + if !influxql.EvalBool(p.cursor.filter, m) { + continue + } + } + + // Filter out single field, if specified. + if len(tsc.SelectFields) == 1 { + if m, ok := value.(map[string]interface{}); ok { + value = m[tsc.SelectFields[0]] + } + if value == nil { + continue + } + } + + return timestamp, value + } +} + +// Fields returns the current fields of the current cursor +func (tsc *TagSetCursor) Fields() map[string]interface{} { + switch v := tsc.currentFields.(type) { + case map[string]interface{}: + return v + default: + return map[string]interface{}{"": v} + } +} + +// Tags returns the current tags of the current cursor +// if there is no current currsor, it returns nil +func (tsc *TagSetCursor) Tags() map[string]string { return tsc.currentTags } + +type pointHeapItem struct { + timestamp int64 + value interface{} + cursor *TagsCursor // cursor whence pointHeapItem came +} + +type pointHeap []*pointHeapItem + +func newPointHeap() *pointHeap { + q := make(pointHeap, 0) + heap.Init(&q) + return &q +} + +func (pq pointHeap) Len() int { return len(pq) } + +func (pq pointHeap) Less(i, j int) bool { + // We want a min-heap (points in chronological order), so use less than. + return pq[i].timestamp < pq[j].timestamp +} + +func (pq pointHeap) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] } + +func (pq *pointHeap) Push(x interface{}) { + item := x.(*pointHeapItem) + *pq = append(*pq, item) +} + +func (pq *pointHeap) Pop() interface{} { + old := *pq + n := len(old) + item := old[n-1] + *pq = old[0 : n-1] + return item +} + +// TagsCursor is a cursor with attached tags and filter. +type TagsCursor struct { + cursor Cursor + filter influxql.Expr + tags map[string]string + + seek int64 + buf struct { + key int64 + value interface{} + } +} + +// NewTagsCursor returns a new instance of a series cursor. +func NewTagsCursor(c Cursor, filter influxql.Expr, tags map[string]string) *TagsCursor { + return &TagsCursor{ + cursor: c, + filter: filter, + tags: tags, + seek: EOF, + } +} + +// Seek positions returning the key and value at that key. +func (c *TagsCursor) SeekTo(seek int64) (int64, interface{}) { + // We've seeked on this cursor. This seek is after that previous cached seek + // and the result it gave was after the key for this seek. + // + // In this case, any seek would just return what we got before, so there's + // no point in reseeking. + if c.seek != -1 && c.seek < seek && (c.buf.key == EOF || c.buf.key >= seek) { + return c.buf.key, c.buf.value + } + + // Seek to key/value in underlying cursor. + key, value := c.cursor.SeekTo(seek) + + // Save the seek to the buffer. + c.seek = seek + c.buf.key, c.buf.value = key, value + return key, value +} + +// Next returns the next timestamp and value from the cursor. +func (c *TagsCursor) Next() (int64, interface{}) { + // Invalidate the seek. + c.seek = -1 + c.buf.key, c.buf.value = 0, nil + + // Return next key/value. + return c.cursor.Next() +} + +// TagSetCursors represents a sortable slice of TagSetCursors. +type TagSetCursors []*TagSetCursor + +func (a TagSetCursors) Len() int { return len(a) } +func (a TagSetCursors) Less(i, j int) bool { return a[i].key() < a[j].key() } +func (a TagSetCursors) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func (a TagSetCursors) Keys() []string { + keys := []string{} + for i := range a { + keys = append(keys, a[i].key()) + } + sort.Strings(keys) + return keys +} + +// btou64 converts an 8-byte slice into an uint64. +func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) } diff --git a/_third_party/github.com/influxdb/influxdb/tsdb/cursor_test.go b/_third_party/github.com/influxdb/influxdb/tsdb/cursor_test.go index 012f1a32ce..56e6a671e7 100644 --- a/_third_party/github.com/influxdb/influxdb/tsdb/cursor_test.go +++ b/_third_party/github.com/influxdb/influxdb/tsdb/cursor_test.go @@ -14,50 +14,96 @@ import ( // Ensure the multi-cursor can correctly iterate across a single subcursor. func TestMultiCursor_Single(t *testing.T) { + mc := tsdb.MultiCursor(NewCursor([]CursorItem{ + {Key: 0, Value: 0}, + {Key: 1, Value: 10}, + {Key: 2, Value: 20}, + }, true)) + + if k, v := mc.SeekTo(0); k != 0 || v.(int) != 0 { + t.Fatalf("unexpected key/value: %x / %x", k, v) + } else if k, v = mc.Next(); k != 1 || v.(int) != 10 { + t.Fatalf("unexpected key/value: %x / %x", k, v) + } else if k, v = mc.Next(); k != 2 || v.(int) != 20 { + t.Fatalf("unexpected key/value: %x / %x", k, v) + } else if k, v = mc.Next(); k != tsdb.EOF { + t.Fatalf("expected eof, got: %x / %x", k, v) + } +} + +// Ensure the multi-cursor can correctly iterate across a single subcursor in reverse order. +func TestMultiCursor_Single_Reverse(t *testing.T) { + mc := tsdb.MultiCursor(NewCursor([]CursorItem{ + {Key: 0, Value: 0}, + {Key: 1, Value: 10}, + {Key: 2, Value: 20}, + }, false)) + + if k, v := mc.SeekTo(2); k != 2 || v.(int) != 20 { + t.Fatalf("unexpected key/value: %x / %x", k, v) + } else if k, v = mc.Next(); k != 1 || v.(int) != 10 { + t.Fatalf("unexpected key/value: %x / %x", k, v) + } else if k, v = mc.Next(); k != 0 || v.(int) != 0 { + t.Fatalf("unexpected key/value: %x / %x", k, v) + } else if k, v = mc.Next(); k != tsdb.EOF { + t.Fatalf("expected eof, got: %x / %x", k, v) + } +} + +// Ensure the multi-cursor can correctly iterate across multiple non-overlapping subcursors. +func TestMultiCursor_Multiple_NonOverlapping(t *testing.T) { mc := tsdb.MultiCursor( NewCursor([]CursorItem{ - {Key: []byte{0x00}, Value: []byte{0x00}}, - {Key: []byte{0x01}, Value: []byte{0x10}}, - {Key: []byte{0x02}, Value: []byte{0x20}}, - }), + {Key: 0, Value: 0}, + {Key: 3, Value: 30}, + {Key: 4, Value: 40}, + }, true), + NewCursor([]CursorItem{ + {Key: 1, Value: 10}, + {Key: 2, Value: 20}, + }, true), ) - if k, v := mc.Seek([]byte{0x00}); !bytes.Equal(k, []byte{0x00}) || !bytes.Equal(v, []byte{0x00}) { + if k, v := mc.SeekTo(0); k != 0 || v.(int) != 0 { + t.Fatalf("unexpected key/value: %x / %x", k, v) + } else if k, v = mc.Next(); k != 1 || v.(int) != 10 { + t.Fatalf("unexpected key/value: %x / %x", k, v) + } else if k, v = mc.Next(); k != 2 || v.(int) != 20 { t.Fatalf("unexpected key/value: %x / %x", k, v) - } else if k, v = mc.Next(); !bytes.Equal(k, []byte{0x01}) || !bytes.Equal(v, []byte{0x10}) { + } else if k, v = mc.Next(); k != 3 || v.(int) != 30 { t.Fatalf("unexpected key/value: %x / %x", k, v) - } else if k, v = mc.Next(); !bytes.Equal(k, []byte{0x02}) || !bytes.Equal(v, []byte{0x20}) { + } else if k, v = mc.Next(); k != 4 || v.(int) != 40 { t.Fatalf("unexpected key/value: %x / %x", k, v) - } else if k, v = mc.Next(); k != nil { + } else if k, v = mc.Next(); k != tsdb.EOF { t.Fatalf("expected eof, got: %x / %x", k, v) } } // Ensure the multi-cursor can correctly iterate across multiple non-overlapping subcursors. -func TestMultiCursor_Multiple_NonOverlapping(t *testing.T) { +func TestMultiCursor_Multiple_NonOverlapping_Reverse(t *testing.T) { mc := tsdb.MultiCursor( NewCursor([]CursorItem{ - {Key: []byte{0x00}, Value: []byte{0x00}}, - {Key: []byte{0x03}, Value: []byte{0x30}}, - {Key: []byte{0x04}, Value: []byte{0x40}}, - }), + {Key: 0, Value: 0}, + {Key: 3, Value: 30}, + {Key: 4, Value: 40}, + }, false), NewCursor([]CursorItem{ - {Key: []byte{0x01}, Value: []byte{0x10}}, - {Key: []byte{0x02}, Value: []byte{0x20}}, - }), + {Key: 1, Value: 10}, + {Key: 2, Value: 20}, + }, false), ) - if k, v := mc.Seek([]byte{0x00}); !bytes.Equal(k, []byte{0x00}) || !bytes.Equal(v, []byte{0x00}) { + if k, v := mc.SeekTo(4); k != 4 || v.(int) != 40 { t.Fatalf("unexpected key/value: %x / %x", k, v) - } else if k, v = mc.Next(); !bytes.Equal(k, []byte{0x01}) || !bytes.Equal(v, []byte{0x10}) { + } else if k, v = mc.Next(); k != 3 || v.(int) != 30 { t.Fatalf("unexpected key/value: %x / %x", k, v) - } else if k, v = mc.Next(); !bytes.Equal(k, []byte{0x02}) || !bytes.Equal(v, []byte{0x20}) { + } else if k, v = mc.Next(); k != 2 || v.(int) != 20 { t.Fatalf("unexpected key/value: %x / %x", k, v) - } else if k, v = mc.Next(); !bytes.Equal(k, []byte{0x03}) || !bytes.Equal(v, []byte{0x30}) { + } else if k, v = mc.Next(); k != 1 || v.(int) != 10 { t.Fatalf("unexpected key/value: %x / %x", k, v) - } else if k, v = mc.Next(); !bytes.Equal(k, []byte{0x04}) || !bytes.Equal(v, []byte{0x40}) { + } else if k, v = mc.Next(); k != 0 || v.(int) != 00 { t.Fatalf("unexpected key/value: %x / %x", k, v) - } else if k, v = mc.Next(); k != nil { + } else if k, v = mc.Next(); k != tsdb.EOF { t.Fatalf("expected eof, got: %x / %x", k, v) } } @@ -66,61 +112,89 @@ func TestMultiCursor_Multiple_NonOverlapping(t *testing.T) { func TestMultiCursor_Multiple_Overlapping(t *testing.T) { mc := tsdb.MultiCursor( NewCursor([]CursorItem{ - {Key: []byte{0x00}, Value: []byte{0x00}}, - {Key: []byte{0x03}, Value: []byte{0x03}}, - {Key: []byte{0x04}, Value: []byte{0x04}}, - }), + {Key: 0, Value: 0}, + {Key: 3, Value: 3}, + {Key: 4, Value: 4}, + }, true), + NewCursor([]CursorItem{ + {Key: 0, Value: 0xF0}, + {Key: 2, Value: 0xF2}, + {Key: 4, Value: 0xF4}, + }, true), + ) + + if k, v := mc.SeekTo(0); k != 0 || v.(int) != 0 { + t.Fatalf("unexpected key/value: %x / %x", k, v) + } else if k, v = mc.Next(); k != 2 || v.(int) != 0xF2 { + t.Fatalf("unexpected key/value: %x / %x", k, v) + } else if k, v = mc.Next(); k != 3 || v.(int) != 3 { + t.Fatalf("unexpected key/value: %x / %x", k, v) + } else if k, v = mc.Next(); k != 4 || v.(int) != 4 { + t.Fatalf("unexpected key/value: %x / %x", k, v) + } else if k, v = mc.Next(); k != tsdb.EOF { + t.Fatalf("expected eof, got: %x / %x", k, v) + } +} + +// Ensure the multi-cursor can correctly iterate across multiple overlapping subcursors. +func TestMultiCursor_Multiple_Overlapping_Reverse(t *testing.T) { + mc := tsdb.MultiCursor( + NewCursor([]CursorItem{ + {Key: 0, Value: 0}, + {Key: 3, Value: 3}, + {Key: 4, Value: 4}, + }, false), NewCursor([]CursorItem{ - {Key: []byte{0x00}, Value: []byte{0xF0}}, - {Key: []byte{0x02}, Value: []byte{0xF2}}, - {Key: []byte{0x04}, Value: []byte{0xF4}}, - }), + {Key: 0, Value: 0xF0}, + {Key: 2, Value: 0xF2}, + {Key: 4, Value: 0xF4}, + }, false), ) - if k, v := mc.Seek([]byte{0x00}); !bytes.Equal(k, []byte{0x00}) || !bytes.Equal(v, []byte{0x00}) { + if k, v := mc.SeekTo(4); k != 4 || v.(int) != 4 { t.Fatalf("unexpected key/value: %x / %x", k, v) - } else if k, v = mc.Next(); !bytes.Equal(k, []byte{0x02}) || !bytes.Equal(v, []byte{0xF2}) { + } else if k, v = mc.Next(); k != 3 || v.(int) != 3 { t.Fatalf("unexpected key/value: %x / %x", k, v) - } else if k, v = mc.Next(); !bytes.Equal(k, []byte{0x03}) || !bytes.Equal(v, []byte{0x03}) { + } else if k, v = mc.Next(); k != 2 || v.(int) != 0xF2 { t.Fatalf("unexpected key/value: %x / %x", k, v) - } else if k, v = mc.Next(); !bytes.Equal(k, []byte{0x04}) || !bytes.Equal(v, []byte{0x04}) { + } else if k, v = mc.Next(); k != 0 || v.(int) != 0 { t.Fatalf("unexpected key/value: %x / %x", k, v) - } else if k, v = mc.Next(); k != nil { + } else if k, v = mc.Next(); k != tsdb.EOF { t.Fatalf("expected eof, got: %x / %x", k, v) } } // Ensure the multi-cursor can handle randomly generated data. func TestMultiCursor_Quick(t *testing.T) { - quick.Check(func(seek uint64, cursors []Cursor) bool { - var got, exp [][]byte - seek %= 100 + quick.Check(func(useek uint64, cursors []Cursor) bool { + var got, exp []CursorItem + seek := int64(useek) % 100 // Merge all cursor data to determine expected output. // First seen key overrides all other items with the same key. - m := make(map[string][]byte) + m := make(map[int64]CursorItem) for _, c := range cursors { for _, item := range c.items { - if bytes.Compare(item.Key, u64tob(seek)) == -1 { + if item.Key < seek { continue } - if _, ok := m[string(item.Key)]; ok { + if _, ok := m[item.Key]; ok { continue } - m[string(item.Key)] = item.Value + m[item.Key] = item } } // Convert map back to single item list. - for k, v := range m { - exp = append(exp, append([]byte(k), v...)) + for _, item := range m { + exp = append(exp, item) } - sort.Sort(byteSlices(exp)) + sort.Sort(CursorItems(exp)) // Create multi-cursor and iterate over all items. mc := tsdb.MultiCursor(tsdbCursorSlice(cursors)...) - for k, v := mc.Seek(u64tob(seek)); k != nil; k, v = mc.Next() { - got = append(got, append(k, v...)) + for k, v := mc.SeekTo(seek); k != tsdb.EOF; k, v = mc.Next() { + got = append(got, CursorItem{k, v.(int)}) } // Verify results. @@ -134,48 +208,86 @@ func TestMultiCursor_Quick(t *testing.T) { // Cursor represents an in-memory test cursor. type Cursor struct { - items []CursorItem - index int + items []CursorItem + index int + ascending bool } // NewCursor returns a new instance of Cursor. -func NewCursor(items []CursorItem) *Cursor { +func NewCursor(items []CursorItem, ascending bool) *Cursor { + index := 0 sort.Sort(CursorItems(items)) - return &Cursor{items: items} + + if !ascending { + index = len(items) + } + return &Cursor{ + items: items, + index: index, + ascending: ascending, + } } +func (c *Cursor) Ascending() bool { return c.ascending } + // Seek seeks to an item by key. -func (c *Cursor) Seek(seek []byte) (key, value []byte) { +func (c *Cursor) SeekTo(seek int64) (key int64, value interface{}) { + if c.ascending { + return c.seekForward(seek) + } + return c.seekReverse(seek) +} + +func (c *Cursor) seekForward(seek int64) (key int64, value interface{}) { for c.index = 0; c.index < len(c.items); c.index++ { - if bytes.Compare(c.items[c.index].Key, seek) == -1 { // skip keys less than seek + if c.items[c.index].Key < seek { // skip keys less than seek continue } return c.items[c.index].Key, c.items[c.index].Value } - return nil, nil + return tsdb.EOF, nil +} + +func (c *Cursor) seekReverse(seek int64) (key int64, value interface{}) { + for c.index = len(c.items) - 1; c.index >= 0; c.index-- { + if c.items[c.index].Key > seek { // skip keys greater than seek + continue + } + return c.items[c.index].Key, c.items[c.index].Value + } + return tsdb.EOF, nil } // Next returns the next key/value pair. -func (c *Cursor) Next() (key, value []byte) { - if c.index >= len(c.items)-1 { - return nil, nil +func (c *Cursor) Next() (key int64, value interface{}) { + if !c.ascending && c.index < 0 { + return tsdb.EOF, nil + } + + if c.ascending && c.index >= len(c.items) { + return tsdb.EOF, nil } - c.index++ - return c.items[c.index].Key, c.items[c.index].Value + k, v := c.items[c.index].Key, c.items[c.index].Value + + if c.ascending { + c.index++ + } else { + c.index-- + } + return k, v } // Generate returns a randomly generated cursor. Implements quick.Generator. func (c Cursor) Generate(rand *rand.Rand, size int) reflect.Value { c.index = 0 + c.ascending = true c.items = make([]CursorItem, rand.Intn(size)) for i := range c.items { - value, _ := quick.Value(reflect.TypeOf([]byte(nil)), rand) - c.items[i] = CursorItem{ - Key: u64tob(uint64(rand.Intn(size))), - Value: value.Interface().([]byte), + Key: rand.Int63n(int64(size)), + Value: rand.Int(), } } @@ -196,15 +308,15 @@ func tsdbCursorSlice(a []Cursor) []tsdb.Cursor { // CursorItem represents a key/value pair in a cursor. type CursorItem struct { - Key []byte - Value []byte + Key int64 + Value int } type CursorItems []CursorItem func (a CursorItems) Len() int { return len(a) } func (a CursorItems) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a CursorItems) Less(i, j int) bool { return bytes.Compare(a[i].Key, a[j].Key) == -1 } +func (a CursorItems) Less(i, j int) bool { return a[i].Key < a[j].Key } // byteSlices represents a sortable slice of byte slices. type byteSlices [][]byte diff --git a/_third_party/github.com/influxdb/influxdb/tsdb/engine.go b/_third_party/github.com/influxdb/influxdb/tsdb/engine.go index 3de8c66d16..ee19a49472 100644 --- a/_third_party/github.com/influxdb/influxdb/tsdb/engine.go +++ b/_third_party/github.com/influxdb/influxdb/tsdb/engine.go @@ -10,6 +10,7 @@ import ( "time" "bosun.org/_third_party/github.com/boltdb/bolt" + "bosun.org/_third_party/github.com/influxdb/influxdb/models" ) var ( @@ -17,9 +18,6 @@ var ( ErrFormatNotFound = errors.New("format not found") ) -// DefaultEngine is the default engine used by the shard when initializing. -const DefaultEngine = "bz1" - // Engine represents a swappable storage engine for the shard. type Engine interface { Open() error @@ -29,10 +27,12 @@ type Engine interface { LoadMetadataIndex(index *DatabaseIndex, measurementFields map[string]*MeasurementFields) error Begin(writable bool) (Tx, error) - WritePoints(points []Point, measurementFieldsToSave map[string]*MeasurementFields, seriesToCreate []*SeriesCreate) error + WritePoints(points []models.Point, measurementFieldsToSave map[string]*MeasurementFields, seriesToCreate []*SeriesCreate) error DeleteSeries(keys []string) error DeleteMeasurement(name string, seriesKeys []string) error SeriesCount() (n int, err error) + + io.WriterTo } // NewEngineFunc creates a new engine. @@ -121,16 +121,11 @@ func NewEngineOptions() EngineOptions { type Tx interface { io.WriterTo - Cursor(series string) Cursor Size() int64 Commit() error Rollback() error -} -// Cursor represents an iterator over a series. -type Cursor interface { - Seek(seek []byte) (key, value []byte) - Next() (key, value []byte) + Cursor(series string, fields []string, dec *FieldCodec, ascending bool) Cursor } // DedupeEntries returns slices with unique keys (the first 8 bytes). diff --git a/_third_party/github.com/influxdb/influxdb/tsdb/executor.go b/_third_party/github.com/influxdb/influxdb/tsdb/executor.go index 92295740aa..be96bd92ce 100644 --- a/_third_party/github.com/influxdb/influxdb/tsdb/executor.go +++ b/_third_party/github.com/influxdb/influxdb/tsdb/executor.go @@ -7,6 +7,7 @@ import ( "time" "bosun.org/_third_party/github.com/influxdb/influxdb/influxql" + "bosun.org/_third_party/github.com/influxdb/influxdb/models" ) const ( @@ -23,40 +24,7 @@ const ( // Executor is an interface for a query executor. type Executor interface { - Execute() <-chan *influxql.Row -} - -// Mapper is the interface all Mapper types must implement. -type Mapper interface { - Open() error - SetRemote(m Mapper) error - TagSets() []string - Fields() []string - NextChunk() (interface{}, error) - Close() -} - -// StatefulMapper encapsulates a Mapper and some state that the executor needs to -// track for that mapper. -type StatefulMapper struct { - Mapper - bufferedChunk *MapperOutput // Last read chunk. - drained bool -} - -// NextChunk wraps a RawMapper and some state. -func (sm *StatefulMapper) NextChunk() (*MapperOutput, error) { - c, err := sm.Mapper.NextChunk() - if err != nil { - return nil, err - } - chunk, ok := c.(*MapperOutput) - if !ok { - if chunk == interface{}(nil) { - return nil, nil - } - } - return chunk, nil + Execute() <-chan *models.Row } type SelectExecutor struct { @@ -81,9 +49,9 @@ func NewSelectExecutor(stmt *influxql.SelectStatement, mappers []Mapper, chunkSi } // Execute begins execution of the query and returns a channel to receive rows. -func (e *SelectExecutor) Execute() <-chan *influxql.Row { +func (e *SelectExecutor) Execute() <-chan *models.Row { // Create output channel and stream data in a separate goroutine. - out := make(chan *influxql.Row, 0) + out := make(chan *models.Row, 0) // Certain operations on the SELECT statement can be performed by the SelectExecutor without // assistance from the Mappers. This allows the SelectExecutor to prepare aggregation functions @@ -140,6 +108,23 @@ func (e *SelectExecutor) nextMapperLowestTime(tagset string) int64 { return minTime } +// nextMapperHighestTime returns the highest time across all Mappers, for the given tagset. +func (e *SelectExecutor) nextMapperHighestTime(tagset string) int64 { + maxTime := int64(math.MinInt64) + for _, m := range e.mappers { + if !m.drained && m.bufferedChunk != nil { + if m.bufferedChunk.key() != tagset { + continue + } + t := m.bufferedChunk.Values[0].Time + if t > maxTime { + maxTime = t + } + } + } + return maxTime +} + // tagSetIsLimited returns whether data for the given tagset has been LIMITed. func (e *SelectExecutor) tagSetIsLimited(tagset string) bool { _, ok := e.limitedTagSets[tagset] @@ -151,14 +136,14 @@ func (e *SelectExecutor) limitTagSet(tagset string) { e.limitedTagSets[tagset] = struct{}{} } -func (e *SelectExecutor) executeRaw(out chan *influxql.Row) { +func (e *SelectExecutor) executeRaw(out chan *models.Row) { // It's important that all resources are released when execution completes. defer e.close() // Open the mappers. for _, m := range e.mappers { if err := m.Open(); err != nil { - out <- &influxql.Row{Err: err} + out <- &models.Row{Err: err} return } } @@ -195,7 +180,7 @@ func (e *SelectExecutor) executeRaw(out chan *influxql.Row) { if m.bufferedChunk == nil { m.bufferedChunk, err = m.NextChunk() if err != nil { - out <- &influxql.Row{Err: err} + out <- &models.Row{Err: err} return } if m.bufferedChunk == nil { @@ -246,9 +231,20 @@ func (e *SelectExecutor) executeRaw(out chan *influxql.Row) { rowWriter = nil } - // Process the mapper outputs. We can send out everything up to the min of the last time - // of the chunks for the next tagset. - minTime := e.nextMapperLowestTime(tagset) + ascending := true + if len(e.stmt.SortFields) > 0 { + ascending = e.stmt.SortFields[0].Ascending + } + + var timeBoundary int64 + + if ascending { + // Process the mapper outputs. We can send out everything up to the min of the last time + // of the chunks for the next tagset. + timeBoundary = e.nextMapperLowestTime(tagset) + } else { + timeBoundary = e.nextMapperHighestTime(tagset) + } // Now empty out all the chunks up to the min time. Create new output struct for this data. var chunkedOutput *MapperOutput @@ -257,19 +253,30 @@ func (e *SelectExecutor) executeRaw(out chan *influxql.Row) { continue } + chunkBoundary := false + if ascending { + chunkBoundary = m.bufferedChunk.Values[0].Time > timeBoundary + } else { + chunkBoundary = m.bufferedChunk.Values[0].Time < timeBoundary + } + // This mapper's next chunk is not for the next tagset, or the very first value of // the chunk is at a higher acceptable timestamp. Skip it. - if m.bufferedChunk.key() != tagset || m.bufferedChunk.Values[0].Time > minTime { + if m.bufferedChunk.key() != tagset || chunkBoundary { continue } // Find the index of the point up to the min. ind := len(m.bufferedChunk.Values) for i, mo := range m.bufferedChunk.Values { - if mo.Time > minTime { + if ascending && mo.Time > timeBoundary { + ind = i + break + } else if !ascending && mo.Time < timeBoundary { ind = i break } + } // Add up to the index to the values @@ -293,8 +300,12 @@ func (e *SelectExecutor) executeRaw(out chan *influxql.Row) { } } - // Sort the values by time first so we can then handle offset and limit - sort.Sort(MapperValues(chunkedOutput.Values)) + if ascending { + // Sort the values by time first so we can then handle offset and limit + sort.Sort(MapperValues(chunkedOutput.Values)) + } else { + sort.Sort(sort.Reverse(MapperValues(chunkedOutput.Values))) + } // Now that we have full name and tag details, initialize the rowWriter. // The Name and Tags will be the same for all mappers. @@ -314,7 +325,7 @@ func (e *SelectExecutor) executeRaw(out chan *influxql.Row) { if e.stmt.HasDerivative() { interval, err := derivativeInterval(e.stmt) if err != nil { - out <- &influxql.Row{Err: err} + out <- &models.Row{Err: err} return } rowWriter.transformer = &RawQueryDerivativeProcessor{ @@ -334,7 +345,7 @@ func (e *SelectExecutor) executeRaw(out chan *influxql.Row) { close(out) } -func (e *SelectExecutor) executeAggregate(out chan *influxql.Row) { +func (e *SelectExecutor) executeAggregate(out chan *models.Row) { // It's important to close all resources when execution completes. defer e.close() @@ -343,27 +354,23 @@ func (e *SelectExecutor) executeAggregate(out chan *influxql.Row) { // the offsets within the value slices that are returned by the // mapper. aggregates := e.stmt.FunctionCalls() - reduceFuncs := make([]influxql.ReduceFunc, len(aggregates)) + reduceFuncs := make([]reduceFunc, len(aggregates)) for i, c := range aggregates { - reduceFunc, err := influxql.InitializeReduceFunc(c) + reduceFunc, err := initializeReduceFunc(c) if err != nil { - out <- &influxql.Row{Err: err} + out <- &models.Row{Err: err} return } reduceFuncs[i] = reduceFunc } // Put together the rows to return, starting with columns. - columnNames := make([]string, len(e.stmt.Fields)+1) - columnNames[0] = "time" - for i, f := range e.stmt.Fields { - columnNames[i+1] = f.Name() - } + columnNames := e.stmt.ColumnNames() // Open the mappers. for _, m := range e.mappers { if err := m.Open(); err != nil { - out <- &influxql.Row{Err: err} + out <- &models.Row{Err: err} return } } @@ -382,7 +389,7 @@ func (e *SelectExecutor) executeAggregate(out chan *influxql.Row) { for _, m := range e.mappers { m.bufferedChunk, err = m.NextChunk() if err != nil { - out <- &influxql.Row{Err: err} + out <- &models.Row{Err: err} return } if m.bufferedChunk == nil { @@ -390,6 +397,11 @@ func (e *SelectExecutor) executeAggregate(out chan *influxql.Row) { } } + ascending := true + if len(e.stmt.SortFields) > 0 { + ascending = e.stmt.SortFields[0].Ascending + } + // Keep looping until all mappers drained. for !e.mappersDrained() { // Send out data for the next alphabetically-lowest tagset. All Mappers send out in this order @@ -408,7 +420,7 @@ func (e *SelectExecutor) executeAggregate(out chan *influxql.Row) { if m.bufferedChunk == nil { m.bufferedChunk, err = m.NextChunk() if err != nil { - out <- &influxql.Row{Err: err} + out <- &models.Row{Err: err} return } if m.bufferedChunk == nil { @@ -429,14 +441,14 @@ func (e *SelectExecutor) executeAggregate(out chan *influxql.Row) { } // Prep a row, ready for kicking out. - var row *influxql.Row + var row *models.Row // Prep for bucketing data by start time of the interval. buckets := map[int64][][]interface{}{} for _, chunk := range chunks { if row == nil { - row = &influxql.Row{ + row = &models.Row{ Name: chunk.Name, Tags: chunk.Tags, Columns: columnNames, @@ -462,7 +474,12 @@ func (e *SelectExecutor) executeAggregate(out chan *influxql.Row) { for k, _ := range buckets { tMins = append(tMins, k) } - sort.Sort(tMins) + + if ascending { + sort.Sort(tMins) + } else { + sort.Sort(sort.Reverse(tMins)) + } values := make([][]interface{}, len(tMins)) for i, t := range tMins { @@ -475,6 +492,12 @@ func (e *SelectExecutor) executeAggregate(out chan *influxql.Row) { } } + // Perform aggregate unwraps + values, err = e.processFunctions(values, columnNames) + if err != nil { + out <- &models.Row{Err: err} + } + // Perform any mathematics. values = processForMath(e.stmt.Fields, values) @@ -569,6 +592,135 @@ func (e *SelectExecutor) close() { } } +func (e *SelectExecutor) processFunctions(results [][]interface{}, columnNames []string) ([][]interface{}, error) { + callInPosition := e.stmt.FunctionCallsByPosition() + hasTimeField := e.stmt.HasTimeFieldSpecified() + + var err error + for i, calls := range callInPosition { + // We can only support expanding fields if a single selector call was specified + // i.e. select tx, max(rx) from foo + // If you have multiple selectors or aggregates, there is no way of knowing who gets to insert the values, so we don't + // i.e. select tx, max(rx), min(rx) from foo + if len(calls) == 1 { + var c *influxql.Call + c = calls[0] + + switch c.Name { + case "top", "bottom": + results, err = e.processAggregates(results, columnNames, c) + if err != nil { + return results, err + } + case "first", "last", "min", "max": + results, err = e.processSelectors(results, i, hasTimeField, columnNames) + if err != nil { + return results, err + } + } + } + } + + return results, nil +} + +func (e *SelectExecutor) processSelectors(results [][]interface{}, callPosition int, hasTimeField bool, columnNames []string) ([][]interface{}, error) { + for i, vals := range results { + for j := 1; j < len(vals); j++ { + switch v := vals[j].(type) { + case PositionPoint: + tMin := vals[0].(time.Time) + results[i] = e.selectorPointToQueryResult(vals, hasTimeField, callPosition, v, tMin, columnNames) + } + } + } + return results, nil +} + +func (e *SelectExecutor) selectorPointToQueryResult(row []interface{}, hasTimeField bool, columnIndex int, p PositionPoint, tMin time.Time, columnNames []string) []interface{} { + // if the row doesn't have enough columns, expand it + if len(row) != len(columnNames) { + row = append(row, make([]interface{}, len(columnNames)-len(row))...) + } + callCount := len(e.stmt.FunctionCalls()) + if callCount == 1 { + tm := time.Unix(0, p.Time).UTC().Format(time.RFC3339Nano) + // If we didn't explicity ask for time, and we have a group by, then use TMIN for the time returned + if len(e.stmt.Dimensions) > 0 && !hasTimeField { + tm = tMin.UTC().Format(time.RFC3339Nano) + } + row[0] = tm + } + for i, c := range columnNames { + // skip over time, we already handled that above + if i == 0 { + continue + } + if (i == columnIndex && hasTimeField) || (i == columnIndex+1 && !hasTimeField) { + row[i] = p.Value + continue + } + + if callCount == 1 { + // Always favor fields over tags if there is a name collision + if t, ok := p.Fields[c]; ok { + row[i] = t + } else if t, ok := p.Tags[c]; ok { + // look in the tags for a value + row[i] = t + } + } + } + return row +} + +func (e *SelectExecutor) processAggregates(results [][]interface{}, columnNames []string, call *influxql.Call) ([][]interface{}, error) { + var values [][]interface{} + + // Check if we have a group by, if not, rewrite the entire result by flattening it out + for _, vals := range results { + // start at 1 because the first value is always time + for j := 1; j < len(vals); j++ { + switch v := vals[j].(type) { + case PositionPoints: + tMin := vals[0].(time.Time) + for _, p := range v { + result := e.aggregatePointToQueryResult(p, tMin, call, columnNames) + values = append(values, result) + } + case nil: + continue + default: + return nil, fmt.Errorf("unrechable code - processAggregates for type %T %v", v, v) + } + } + } + return values, nil +} + +func (e *SelectExecutor) aggregatePointToQueryResult(p PositionPoint, tMin time.Time, call *influxql.Call, columnNames []string) []interface{} { + tm := time.Unix(0, p.Time).UTC().Format(time.RFC3339Nano) + // If we didn't explicity ask for time, and we have a group by, then use TMIN for the time returned + if len(e.stmt.Dimensions) > 0 && !e.stmt.HasTimeFieldSpecified() { + tm = tMin.UTC().Format(time.RFC3339Nano) + } + vals := []interface{}{tm} + for _, c := range columnNames { + if c == call.Name { + vals = append(vals, p.Value) + continue + } + // TODO in the future fields will also be available to us. + // we should always favor fields over tags if there is a name collision + + // look in the tags for a value + if t, ok := p.Tags[c]; ok { + vals = append(vals, t) + } + } + return vals +} + // limitedRowWriter accepts raw mapper values, and will emit those values as rows in chunks // of the given size. If the chunk size is 0, no chunking will be performed. In addiiton if // limit is reached, outstanding values will be emitted. If limit is zero, no limit is enforced. @@ -581,7 +733,7 @@ type limitedRowWriter struct { fields influxql.Fields selectNames []string aliasNames []string - c chan *influxql.Row + c chan *models.Row currValues []*MapperValue totalOffSet int @@ -669,7 +821,7 @@ func (r *limitedRowWriter) Flush() { } // processValues emits the given values in a single row. -func (r *limitedRowWriter) processValues(values []*MapperValue) *influxql.Row { +func (r *limitedRowWriter) processValues(values []*MapperValue) *models.Row { defer func() { r.totalSent += len(values) }() @@ -711,7 +863,7 @@ func (r *limitedRowWriter) processValues(values []*MapperValue) *influxql.Row { } } - row := &influxql.Row{ + row := &models.Row{ Name: r.name, Tags: r.tags, Columns: aliasFields, @@ -774,16 +926,15 @@ type RawQueryDerivativeProcessor struct { DerivativeInterval time.Duration } -func (rqdp *RawQueryDerivativeProcessor) canProcess(input []*MapperValue) bool { - // If we only have 1 value, then the value did not change, so return - // a single row with 0.0 - if len(input) == 1 { +func (rqdp *RawQueryDerivativeProcessor) canProcess(input *MapperValue) bool { + // Cannot process a nil value + if input == nil { return false } // See if the field value is numeric, if it's not, we can't process the derivative validType := false - switch input[0].Value.(type) { + switch input.Value.(type) { case int64: validType = true case float64: @@ -798,7 +949,7 @@ func (rqdp *RawQueryDerivativeProcessor) Process(input []*MapperValue) []*Mapper return input } - if !rqdp.canProcess(input) { + if len(input) == 1 { return []*MapperValue{ &MapperValue{ Time: input[0].Time, @@ -815,6 +966,16 @@ func (rqdp *RawQueryDerivativeProcessor) Process(input []*MapperValue) []*Mapper for i := 1; i < len(input); i++ { v := input[i] + // If we can't use the current or prev value (wrong time, nil), just append + // nil + if !rqdp.canProcess(v) || !rqdp.canProcess(rqdp.LastValueFromPreviousChunk) { + derivativeValues = append(derivativeValues, &MapperValue{ + Time: v.Time, + Value: nil, + }) + continue + } + // Calculate the derivative of successive points by dividing the difference // of each value by the elapsed time normalized to the interval diff := int64toFloat64(v.Value) - int64toFloat64(rqdp.LastValueFromPreviousChunk.Value) @@ -892,22 +1053,6 @@ func ProcessAggregateDerivative(results [][]interface{}, isNonNegative bool, int } } - // Check the value's type to ensure it's an numeric, if not, return a 0 result. We only check the first value - // because derivatives cannot be combined with other aggregates currently. - validType := false - switch results[0][1].(type) { - case int64: - validType = true - case float64: - validType = true - } - - if !validType { - return [][]interface{}{ - []interface{}{results[0][0], 0.0}, - } - } - // Otherwise calculate the derivatives as the difference between consecutive // points divided by the elapsed time. Then normalize to the requested // interval. @@ -916,7 +1061,28 @@ func ProcessAggregateDerivative(results [][]interface{}, isNonNegative bool, int prev := results[i-1] cur := results[i] - if cur[1] == nil || prev[1] == nil { + // If current value is nil, append nil for the value + if prev[1] == nil || cur[1] == nil { + derivatives = append(derivatives, []interface{}{ + cur[0], nil, + }) + continue + } + + // Check the value's type to ensure it's an numeric, if not, return a nil result. We only check the first value + // because derivatives cannot be combined with other aggregates currently. + validType := false + switch cur[1].(type) { + case int64: + validType = true + case float64: + validType = true + } + + if !validType { + derivatives = append(derivatives, []interface{}{ + cur[0], nil, + }) continue } diff --git a/_third_party/github.com/influxdb/influxdb/tsdb/executor_test.go b/_third_party/github.com/influxdb/influxdb/tsdb/executor_test.go index b6b6cdab63..2cf3d2c62a 100644 --- a/_third_party/github.com/influxdb/influxdb/tsdb/executor_test.go +++ b/_third_party/github.com/influxdb/influxdb/tsdb/executor_test.go @@ -11,6 +11,7 @@ import ( "bosun.org/_third_party/github.com/influxdb/influxdb/influxql" "bosun.org/_third_party/github.com/influxdb/influxdb/meta" + "bosun.org/_third_party/github.com/influxdb/influxdb/models" "bosun.org/_third_party/github.com/influxdb/influxdb/tsdb" ) @@ -34,8 +35,8 @@ func TestWritePointsAndExecuteTwoShards(t *testing.T) { EndTime: time.Now().Add(time.Hour), Shards: []meta.ShardInfo{ { - ID: uint64(sID0), - OwnerIDs: []uint64{nID}, + ID: uint64(sID0), + Owners: []meta.ShardOwner{{NodeID: nID}}, }, }, }, @@ -45,8 +46,8 @@ func TestWritePointsAndExecuteTwoShards(t *testing.T) { EndTime: time.Now().Add(-time.Hour), Shards: []meta.ShardInfo{ { - ID: uint64(sID1), - OwnerIDs: []uint64{nID}, + ID: uint64(sID1), + Owners: []meta.ShardOwner{{NodeID: nID}}, }, }, }, @@ -56,7 +57,7 @@ func TestWritePointsAndExecuteTwoShards(t *testing.T) { // Write two points across shards. pt1time := time.Unix(1, 0).UTC() - if err := store.WriteToShard(sID0, []tsdb.Point{tsdb.NewPoint( + if err := store.WriteToShard(sID0, []models.Point{models.NewPoint( "cpu", map[string]string{"host": "serverA", "region": "us-east"}, map[string]interface{}{"value": 100}, @@ -65,7 +66,7 @@ func TestWritePointsAndExecuteTwoShards(t *testing.T) { t.Fatalf(err.Error()) } pt2time := time.Unix(2, 0).UTC() - if err := store.WriteToShard(sID1, []tsdb.Point{tsdb.NewPoint( + if err := store.WriteToShard(sID1, []models.Point{models.NewPoint( "cpu", map[string]string{"host": "serverB", "region": "us-east"}, map[string]interface{}{"value": 200}, @@ -164,8 +165,8 @@ func TestWritePointsAndExecuteTwoShardsAlign(t *testing.T) { EndTime: time.Now().Add(-time.Hour), Shards: []meta.ShardInfo{ { - ID: uint64(sID1), - OwnerIDs: []uint64{nID}, + ID: uint64(sID1), + Owners: []meta.ShardOwner{{NodeID: nID}}, }, }, }, @@ -175,8 +176,8 @@ func TestWritePointsAndExecuteTwoShardsAlign(t *testing.T) { EndTime: time.Now().Add(time.Hour), Shards: []meta.ShardInfo{ { - ID: uint64(sID0), - OwnerIDs: []uint64{nID}, + ID: uint64(sID0), + Owners: []meta.ShardOwner{{NodeID: nID}}, }, }, }, @@ -185,7 +186,7 @@ func TestWritePointsAndExecuteTwoShardsAlign(t *testing.T) { } // Write interleaving, by time, chunks to the shards. - if err := store.WriteToShard(sID0, []tsdb.Point{tsdb.NewPoint( + if err := store.WriteToShard(sID0, []models.Point{models.NewPoint( "cpu", map[string]string{"host": "serverA"}, map[string]interface{}{"value": 100}, @@ -193,7 +194,7 @@ func TestWritePointsAndExecuteTwoShardsAlign(t *testing.T) { )}); err != nil { t.Fatalf(err.Error()) } - if err := store.WriteToShard(sID1, []tsdb.Point{tsdb.NewPoint( + if err := store.WriteToShard(sID1, []models.Point{models.NewPoint( "cpu", map[string]string{"host": "serverB"}, map[string]interface{}{"value": 200}, @@ -201,7 +202,7 @@ func TestWritePointsAndExecuteTwoShardsAlign(t *testing.T) { )}); err != nil { t.Fatalf(err.Error()) } - if err := store.WriteToShard(sID1, []tsdb.Point{tsdb.NewPoint( + if err := store.WriteToShard(sID1, []models.Point{models.NewPoint( "cpu", map[string]string{"host": "serverA"}, map[string]interface{}{"value": 300}, @@ -251,7 +252,7 @@ func TestWritePointsAndExecuteTwoShardsAlign(t *testing.T) { // Test to ensure the engine handles query re-writing across stores. func TestWritePointsAndExecuteTwoShardsQueryRewrite(t *testing.T) { - // Create two distinct stores, ensuring shard mappers will shard nothing. + // Create two distinct stores, ensuring shard mappers will share nothing. store0 := testStore() defer os.RemoveAll(store0.Path()) store1 := testStore() @@ -265,7 +266,7 @@ func TestWritePointsAndExecuteTwoShardsQueryRewrite(t *testing.T) { // Write two points across shards. pt1time := time.Unix(1, 0).UTC() - if err := store0.WriteToShard(sID0, []tsdb.Point{tsdb.NewPoint( + if err := store0.WriteToShard(sID0, []models.Point{models.NewPoint( "cpu", map[string]string{"host": "serverA"}, map[string]interface{}{"value1": 100}, @@ -274,7 +275,7 @@ func TestWritePointsAndExecuteTwoShardsQueryRewrite(t *testing.T) { t.Fatalf(err.Error()) } pt2time := time.Unix(2, 0).UTC() - if err := store1.WriteToShard(sID1, []tsdb.Point{tsdb.NewPoint( + if err := store1.WriteToShard(sID1, []models.Point{models.NewPoint( "cpu", map[string]string{"host": "serverB"}, map[string]interface{}{"value2": 200}, @@ -338,8 +339,8 @@ func TestWritePointsAndExecuteTwoShardsTagSetOrdering(t *testing.T) { ID: sgID, Shards: []meta.ShardInfo{ { - ID: uint64(sID0), - OwnerIDs: []uint64{nID}, + ID: uint64(sID0), + Owners: []meta.ShardOwner{{NodeID: nID}}, }, }, }, @@ -347,8 +348,8 @@ func TestWritePointsAndExecuteTwoShardsTagSetOrdering(t *testing.T) { ID: sgID, Shards: []meta.ShardInfo{ { - ID: uint64(sID1), - OwnerIDs: []uint64{nID}, + ID: uint64(sID1), + Owners: []meta.ShardOwner{{NodeID: nID}}, }, }, }, @@ -357,7 +358,7 @@ func TestWritePointsAndExecuteTwoShardsTagSetOrdering(t *testing.T) { } // Write tagsets "y" and "z" to first shard. - if err := store.WriteToShard(sID0, []tsdb.Point{tsdb.NewPoint( + if err := store.WriteToShard(sID0, []models.Point{models.NewPoint( "cpu", map[string]string{"host": "y"}, map[string]interface{}{"value": 100}, @@ -365,7 +366,7 @@ func TestWritePointsAndExecuteTwoShardsTagSetOrdering(t *testing.T) { )}); err != nil { t.Fatalf(err.Error()) } - if err := store.WriteToShard(sID0, []tsdb.Point{tsdb.NewPoint( + if err := store.WriteToShard(sID0, []models.Point{models.NewPoint( "cpu", map[string]string{"host": "z"}, map[string]interface{}{"value": 200}, @@ -375,7 +376,7 @@ func TestWritePointsAndExecuteTwoShardsTagSetOrdering(t *testing.T) { } // Write tagsets "x", y" and "z" to second shard. - if err := store.WriteToShard(sID1, []tsdb.Point{tsdb.NewPoint( + if err := store.WriteToShard(sID1, []models.Point{models.NewPoint( "cpu", map[string]string{"host": "x"}, map[string]interface{}{"value": 300}, @@ -383,7 +384,7 @@ func TestWritePointsAndExecuteTwoShardsTagSetOrdering(t *testing.T) { )}); err != nil { t.Fatalf(err.Error()) } - if err := store.WriteToShard(sID1, []tsdb.Point{tsdb.NewPoint( + if err := store.WriteToShard(sID1, []models.Point{models.NewPoint( "cpu", map[string]string{"host": "y"}, map[string]interface{}{"value": 400}, @@ -391,7 +392,7 @@ func TestWritePointsAndExecuteTwoShardsTagSetOrdering(t *testing.T) { )}); err != nil { t.Fatalf(err.Error()) } - if err := store.WriteToShard(sID1, []tsdb.Point{tsdb.NewPoint( + if err := store.WriteToShard(sID1, []models.Point{models.NewPoint( "cpu", map[string]string{"host": "z"}, map[string]interface{}{"value": 500}, @@ -433,8 +434,8 @@ func TestWritePointsAndExecuteTwoShardsTagSetOrdering(t *testing.T) { } // Test to ensure the engine handles measurements across stores. -func TestWritePointsAndExecuteTwoShardsShowMeasurements(t *testing.T) { - // Create two distinct stores, ensuring shard mappers will shard nothing. +func TestShowMeasurementsMultipleShards(t *testing.T) { + // Create two distinct stores, ensuring shard mappers will share nothing. store0 := testStore() defer os.RemoveAll(store0.Path()) store1 := testStore() @@ -448,21 +449,36 @@ func TestWritePointsAndExecuteTwoShardsShowMeasurements(t *testing.T) { // Write two points across shards. pt1time := time.Unix(1, 0).UTC() - if err := store0.WriteToShard(sID0, []tsdb.Point{tsdb.NewPoint( - "cpu", - map[string]string{"host": "serverA"}, - map[string]interface{}{"value1": 100}, - pt1time, - )}); err != nil { + if err := store0.WriteToShard(sID0, []models.Point{ + models.NewPoint( + "cpu_user", + map[string]string{"host": "serverA", "region": "east", "cpuid": "cpu0"}, + map[string]interface{}{"value1": 100}, + pt1time, + ), + models.NewPoint( + "mem_free", + map[string]string{"host": "serverA", "region": "east"}, + map[string]interface{}{"value2": 200}, + pt1time, + ), + }); err != nil { t.Fatalf(err.Error()) } pt2time := time.Unix(2, 0).UTC() - if err := store1.WriteToShard(sID1, []tsdb.Point{tsdb.NewPoint( - "mem", - map[string]string{"host": "serverB"}, - map[string]interface{}{"value2": 200}, + if err := store1.WriteToShard(sID1, []models.Point{models.NewPoint( + "mem_used", + map[string]string{"host": "serverB", "region": "west"}, + map[string]interface{}{"value3": 300}, pt2time, - )}); err != nil { + ), + models.NewPoint( + "cpu_sys", + map[string]string{"host": "serverB", "region": "west", "cpuid": "cpu0"}, + map[string]interface{}{"value4": 400}, + pt2time, + ), + }); err != nil { t.Fatalf(err.Error()) } var tests = []struct { @@ -473,11 +489,15 @@ func TestWritePointsAndExecuteTwoShardsShowMeasurements(t *testing.T) { }{ { stmt: `SHOW MEASUREMENTS`, - expected: `[{"name":"measurements","columns":["name"],"values":[["cpu"],["mem"]]}]`, + expected: `[{"name":"measurements","columns":["name"],"values":[["cpu_sys"],["cpu_user"],["mem_free"],["mem_used"]]}]`, }, { stmt: `SHOW MEASUREMENTS WHERE host='serverB'`, - expected: `[{"name":"measurements","columns":["name"],"values":[["mem"]]}]`, + expected: `[{"name":"measurements","columns":["name"],"values":[["cpu_sys"],["mem_used"]]}]`, + }, + { + stmt: `SHOW MEASUREMENTS WHERE cpuid != '' AND region != ''`, + expected: `[{"name":"measurements","columns":["name"],"values":[["cpu_sys"],["cpu_user"]]}]`, }, { stmt: `SHOW MEASUREMENTS WHERE host='serverX'`, @@ -512,6 +532,133 @@ func TestWritePointsAndExecuteTwoShardsShowMeasurements(t *testing.T) { } } +// Test to ensure the engine handles tag keys across stores. +func TestShowShowTagKeysMultipleShards(t *testing.T) { + // Create two distinct stores, ensuring shard mappers will share nothing. + store0 := testStore() + defer os.RemoveAll(store0.Path()) + store1 := testStore() + defer os.RemoveAll(store1.Path()) + + // Create a shard in each store. + database := "foo" + retentionPolicy := "bar" + store0.CreateShard(database, retentionPolicy, sID0) + store1.CreateShard(database, retentionPolicy, sID1) + + // Write two points across shards. + pt1time := time.Unix(1, 0).UTC() + if err := store0.WriteToShard(sID0, []models.Point{ + models.NewPoint( + "cpu", + map[string]string{"host": "serverA", "region": "uswest"}, + map[string]interface{}{"value1": 100}, + pt1time, + ), + models.NewPoint( + "cpu", + map[string]string{"host": "serverB", "region": "useast"}, + map[string]interface{}{"value1": 100}, + pt1time, + ), + }); err != nil { + t.Fatalf(err.Error()) + } + pt2time := time.Unix(2, 0).UTC() + if err := store1.WriteToShard(sID1, []models.Point{ + models.NewPoint( + "cpu", + map[string]string{"host": "serverB", "region": "useast", "rack": "12"}, + map[string]interface{}{"value1": 100}, + pt1time, + ), + models.NewPoint( + "mem", + map[string]string{"host": "serverB"}, + map[string]interface{}{"value2": 200}, + pt2time, + )}); err != nil { + t.Fatalf(err.Error()) + } + var tests = []struct { + skip bool // Skip test + stmt string // Query statement + chunkSize int // Chunk size for driving the executor + expected string // Expected results, rendered as a string + }{ + { + stmt: `SHOW TAG KEYS`, + expected: `[{"name":"cpu","columns":["tagKey"],"values":[["host"],["rack"],["region"]]},{"name":"mem","columns":["tagKey"],"values":[["host"]]}]`, + }, + { + stmt: `SHOW TAG KEYS SLIMIT 1`, + expected: `[{"name":"cpu","columns":["tagKey"],"values":[["host"],["rack"],["region"]]}]`, + }, + { + stmt: `SHOW TAG KEYS SLIMIT 1 SOFFSET 1`, + expected: `[{"name":"mem","columns":["tagKey"],"values":[["host"]]}]`, + }, + { + stmt: `SHOW TAG KEYS SOFFSET 1`, + expected: `[{"name":"mem","columns":["tagKey"],"values":[["host"]]}]`, + }, + { + stmt: `SHOW TAG KEYS LIMIT 1`, + expected: `[{"name":"cpu","columns":["tagKey"],"values":[["host"]]},{"name":"mem","columns":["tagKey"],"values":[["host"]]}]`, + }, + { + stmt: `SHOW TAG KEYS LIMIT 1 OFFSET 1`, + expected: `[{"name":"cpu","columns":["tagKey"],"values":[["rack"]]},{"name":"mem","columns":["tagKey"]}]`, + }, + { + stmt: `SHOW TAG KEYS OFFSET 1`, + expected: `[{"name":"cpu","columns":["tagKey"],"values":[["rack"],["region"]]},{"name":"mem","columns":["tagKey"]}]`, + }, + { + stmt: `SHOW TAG KEYS FROM cpu`, + expected: `[{"name":"cpu","columns":["tagKey"],"values":[["host"],["rack"],["region"]]}]`, + }, + { + stmt: `SHOW TAG KEYS FROM cpu WHERE region = 'uswest'`, + expected: `[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]}]`, + }, + { + stmt: `SHOW TAG KEYS FROM doesntexist`, + expected: `null`, + }, + { + stmt: `SHOW TAG KEYS FROM cpu WHERE region = 'doesntexist'`, + expected: `null`, + }, + } + for _, tt := range tests { + if tt.skip { + t.Logf("Skipping test %s", tt.stmt) + continue + } + + parsedStmt := mustParseStatement(tt.stmt).(*influxql.ShowTagKeysStatement) + + // Create Mappers and Executor. + mapper0, err := store0.CreateMapper(sID0, parsedStmt, tt.chunkSize) + if err != nil { + t.Fatalf("failed to create mapper0: %s", err.Error()) + } + mapper1, err := store1.CreateMapper(sID1, parsedStmt, tt.chunkSize) + if err != nil { + t.Fatalf("failed to create mapper1: %s", err.Error()) + } + executor := tsdb.NewShowTagKeysExecutor(parsedStmt, []tsdb.Mapper{mapper0, mapper1}, tt.chunkSize) + + // Check the results. + got := executeAndGetResults(executor) + if got != tt.expected { + t.Fatalf("Test %s\nexp: %s\ngot: %s\n", tt.stmt, tt.expected, got) + } + + } +} + // TestProccessAggregateDerivative tests the RawQueryDerivativeProcessor transformation function on the engine. // The is called for a query with a GROUP BY. func TestProcessAggregateDerivative(t *testing.T) { @@ -713,7 +860,43 @@ func TestProcessAggregateDerivative(t *testing.T) { }, exp: [][]interface{}{ []interface{}{ - time.Unix(0, 0), 0.0, + time.Unix(0, 0).Add(24 * time.Hour), nil, + }, + []interface{}{ + time.Unix(0, 0).Add(48 * time.Hour), nil, + }, + []interface{}{ + time.Unix(0, 0).Add(72 * time.Hour), nil, + }, + }, + }, + { + name: "bool derivatives", + fn: "derivative", + interval: 24 * time.Hour, + in: [][]interface{}{ + []interface{}{ + time.Unix(0, 0), "1.0", + }, + []interface{}{ + time.Unix(0, 0).Add(24 * time.Hour), true, + }, + []interface{}{ + time.Unix(0, 0).Add(48 * time.Hour), true, + }, + []interface{}{ + time.Unix(0, 0).Add(72 * time.Hour), true, + }, + }, + exp: [][]interface{}{ + []interface{}{ + time.Unix(0, 0).Add(24 * time.Hour), nil, + }, + []interface{}{ + time.Unix(0, 0).Add(48 * time.Hour), nil, + }, + []interface{}{ + time.Unix(0, 0).Add(72 * time.Hour), nil, }, }, }, @@ -975,9 +1158,54 @@ func TestProcessRawQueryDerivative(t *testing.T) { }, }, exp: []*tsdb.MapperValue{ + { + Time: time.Unix(0, 0).Add(24 * time.Hour).UnixNano(), + Value: nil, + }, + { + Time: time.Unix(0, 0).Add(48 * time.Hour).UnixNano(), + Value: nil, + }, + { + Time: time.Unix(0, 0).Add(72 * time.Hour).UnixNano(), + Value: nil, + }, + }, + }, + { + name: "bool derivatives", + fn: "derivative", + interval: 24 * time.Hour, + in: []*tsdb.MapperValue{ { Time: time.Unix(0, 0).Unix(), - Value: 0.0, + Value: true, + }, + { + Time: time.Unix(0, 0).Add(24 * time.Hour).UnixNano(), + Value: true, + }, + { + Time: time.Unix(0, 0).Add(48 * time.Hour).UnixNano(), + Value: false, + }, + { + Time: time.Unix(0, 0).Add(72 * time.Hour).UnixNano(), + Value: false, + }, + }, + exp: []*tsdb.MapperValue{ + { + Time: time.Unix(0, 0).Add(24 * time.Hour).UnixNano(), + Value: nil, + }, + { + Time: time.Unix(0, 0).Add(48 * time.Hour).UnixNano(), + Value: nil, + }, + { + Time: time.Unix(0, 0).Add(72 * time.Hour).UnixNano(), + Value: nil, }, }, }, @@ -995,8 +1223,14 @@ func TestProcessRawQueryDerivative(t *testing.T) { } for i := 0; i < len(test.exp); i++ { - if test.exp[i].Time != got[i].Time || math.Abs((test.exp[i].Value.(float64)-got[i].Value.(float64))) > 0.0000001 { - t.Fatalf("RawQueryDerivativeProcessor - %s results mismatch:\ngot %v\nexp %v", test.name, got, test.exp) + if v, ok := test.exp[i].Value.(float64); ok { + if test.exp[i].Time != got[i].Time || math.Abs((v-got[i].Value.(float64))) > 0.0000001 { + t.Fatalf("RawQueryDerivativeProcessor - %s results mismatch:\ngot %v\nexp %v", test.name, got, test.exp) + } + } else { + if test.exp[i].Time != got[i].Time || test.exp[i].Value != got[i].Value { + t.Fatalf("RawQueryDerivativeProcessor - %s results mismatch:\ngot %v\nexp %v", test.name, got, test.exp) + } } } } @@ -1061,7 +1295,7 @@ func (t *testQEShardMapper) CreateMapper(shard meta.ShardInfo, stmt influxql.Sta func executeAndGetResults(executor tsdb.Executor) string { ch := executor.Execute() - var rows []*influxql.Row + var rows []*models.Row for r := range ch { rows = append(rows, r) } diff --git a/_third_party/github.com/influxdb/influxdb/tsdb/functions.go b/_third_party/github.com/influxdb/influxdb/tsdb/functions.go new file mode 100644 index 0000000000..9af5adbfbe --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/tsdb/functions.go @@ -0,0 +1,1722 @@ +package tsdb + +// All aggregate and query functions are defined in this file along with any intermediate data objects they need to process. +// Query functions are represented as two discreet functions: Map and Reduce. These roughly follow the MapReduce +// paradigm popularized by Google and Hadoop. +// +// When adding an aggregate function, define a mapper, a reducer, and add them in the switch statement in the MapreduceFuncs function + +import ( + "container/heap" + "encoding/json" + "fmt" + "math" + "math/rand" + "reflect" + "sort" + "strings" + + "bosun.org/_third_party/github.com/influxdb/influxdb/influxql" +) + +// Iterator represents a forward-only iterator over a set of points. +// These are used by the mapFunctions in this file +type Iterator interface { + Next() (time int64, value interface{}) + Fields() map[string]interface{} + Tags() map[string]string + TMin() int64 +} + +type MapInput struct { + TMin int64 + Items []MapItem +} + +type MapItem struct { + Timestamp int64 + Value interface{} + + // TODO(benbjohnson): + // Move fields and tags up to MapInput. Currently the engine combines + // multiple series together during processing. This needs to be fixed so + // that each map function only operates on a single series at a time instead. + Fields map[string]interface{} + Tags map[string]string +} + +// mapFunc represents a function used for mapping over a sequential series of data. +// The iterator represents a single group by interval +type mapFunc func(*MapInput) interface{} + +// reduceFunc represents a function used for reducing mapper output. +type reduceFunc func([]interface{}) interface{} + +// UnmarshalFunc represents a function that can take bytes from a mapper from remote +// server and marshal it into an interface the reducer can use +type UnmarshalFunc func([]byte) (interface{}, error) + +// initializemapFunc takes an aggregate call from the query and returns the mapFunc +func initializeMapFunc(c *influxql.Call) (mapFunc, error) { + // see if it's a query for raw data + if c == nil { + return MapRawQuery, nil + } + + // Retrieve map function by name. + switch c.Name { + case "count": + if _, ok := c.Args[0].(*influxql.Distinct); ok { + return MapCountDistinct, nil + } + if c, ok := c.Args[0].(*influxql.Call); ok { + if c.Name == "distinct" { + return MapCountDistinct, nil + } + } + return MapCount, nil + case "distinct": + return MapDistinct, nil + case "sum": + return MapSum, nil + case "mean": + return MapMean, nil + case "median": + return MapStddev, nil + case "min": + return func(input *MapInput) interface{} { + return MapMin(input, c.Fields()[0]) + }, nil + case "max": + return func(input *MapInput) interface{} { + return MapMax(input, c.Fields()[0]) + }, nil + case "spread": + return MapSpread, nil + case "stddev": + return MapStddev, nil + case "first": + return func(input *MapInput) interface{} { + return MapFirst(input, c.Fields()[0]) + }, nil + case "last": + return func(input *MapInput) interface{} { + return MapLast(input, c.Fields()[0]) + }, nil + + case "top", "bottom": + // Capture information from the call that the Map function will require + lit, _ := c.Args[len(c.Args)-1].(*influxql.NumberLiteral) + limit := int(lit.Val) + fields := topCallArgs(c) + + return func(input *MapInput) interface{} { + return MapTopBottom(input, limit, fields, len(c.Args), c.Name) + }, nil + case "percentile": + return MapEcho, nil + case "derivative", "non_negative_derivative": + // If the arg is another aggregate e.g. derivative(mean(value)), then + // use the map func for that nested aggregate + if fn, ok := c.Args[0].(*influxql.Call); ok { + return initializeMapFunc(fn) + } + return MapRawQuery, nil + default: + return nil, fmt.Errorf("function not found: %q", c.Name) + } +} + +// InitializereduceFunc takes an aggregate call from the query and returns the reduceFunc +func initializeReduceFunc(c *influxql.Call) (reduceFunc, error) { + // Retrieve reduce function by name. + switch c.Name { + case "count": + if _, ok := c.Args[0].(*influxql.Distinct); ok { + return ReduceCountDistinct, nil + } + if c, ok := c.Args[0].(*influxql.Call); ok { + if c.Name == "distinct" { + return ReduceCountDistinct, nil + } + } + return ReduceSum, nil + case "distinct": + return ReduceDistinct, nil + case "sum": + return ReduceSum, nil + case "mean": + return ReduceMean, nil + case "median": + return ReduceMedian, nil + case "min": + return ReduceMin, nil + case "max": + return ReduceMax, nil + case "spread": + return ReduceSpread, nil + case "stddev": + return ReduceStddev, nil + case "first": + return ReduceFirst, nil + case "last": + return ReduceLast, nil + case "top", "bottom": + return func(values []interface{}) interface{} { + return ReduceTopBottom(values, c) + }, nil + case "percentile": + return func(values []interface{}) interface{} { + return ReducePercentile(values, c) + }, nil + case "derivative", "non_negative_derivative": + // If the arg is another aggregate e.g. derivative(mean(value)), then + // use the map func for that nested aggregate + if fn, ok := c.Args[0].(*influxql.Call); ok { + return initializeReduceFunc(fn) + } + return nil, fmt.Errorf("expected function argument to %s", c.Name) + default: + return nil, fmt.Errorf("function not found: %q", c.Name) + } +} + +func InitializeUnmarshaller(c *influxql.Call) (UnmarshalFunc, error) { + // if c is nil it's a raw data query + if c == nil { + return func(b []byte) (interface{}, error) { + a := make([]*rawQueryMapOutput, 0) + err := json.Unmarshal(b, &a) + return a, err + }, nil + } + + // Retrieve marshal function by name + switch c.Name { + case "mean": + return func(b []byte) (interface{}, error) { + var o meanMapOutput + err := json.Unmarshal(b, &o) + return &o, err + }, nil + case "spread": + return func(b []byte) (interface{}, error) { + var o spreadMapOutput + err := json.Unmarshal(b, &o) + return &o, err + }, nil + case "distinct": + return func(b []byte) (interface{}, error) { + var val interfaceValues + err := json.Unmarshal(b, &val) + return val, err + }, nil + case "first": + return func(b []byte) (interface{}, error) { + var o firstLastMapOutput + err := json.Unmarshal(b, &o) + return &o, err + }, nil + case "last": + return func(b []byte) (interface{}, error) { + var o firstLastMapOutput + err := json.Unmarshal(b, &o) + return &o, err + }, nil + case "stddev": + return func(b []byte) (interface{}, error) { + val := make([]float64, 0) + err := json.Unmarshal(b, &val) + return val, err + }, nil + case "median": + return func(b []byte) (interface{}, error) { + a := make([]float64, 0) + err := json.Unmarshal(b, &a) + return a, err + }, nil + default: + return func(b []byte) (interface{}, error) { + var val interface{} + err := json.Unmarshal(b, &val) + return val, err + }, nil + } +} + +// MapCount computes the number of values in an iterator. +func MapCount(input *MapInput) interface{} { + n := float64(0) + for range input.Items { + n++ + } + if n > 0 { + return n + } + return nil +} + +type interfaceValues []interface{} + +func (d interfaceValues) Len() int { return len(d) } +func (d interfaceValues) Swap(i, j int) { d[i], d[j] = d[j], d[i] } +func (d interfaceValues) Less(i, j int) bool { + cmpt, a, b := typeCompare(d[i], d[j]) + cmpv := valueCompare(a, b) + if cmpv == 0 { + return cmpt < 0 + } + return cmpv < 0 +} + +// MapDistinct computes the unique values in an iterator. +func MapDistinct(input *MapInput) interface{} { + m := make(map[interface{}]struct{}) + for _, item := range input.Items { + m[item.Value] = struct{}{} + } + + if len(m) == 0 { + return nil + } + + results := make(interfaceValues, len(m)) + var i int + for value, _ := range m { + results[i] = value + i++ + } + return results +} + +// ReduceDistinct finds the unique values for each key. +func ReduceDistinct(values []interface{}) interface{} { + var index = make(map[interface{}]struct{}) + + // index distinct values from each mapper + for _, v := range values { + if v == nil { + continue + } + d, ok := v.(interfaceValues) + if !ok { + msg := fmt.Sprintf("expected distinctValues, got: %T", v) + panic(msg) + } + for _, distinctValue := range d { + index[distinctValue] = struct{}{} + } + } + + // convert map keys to an array + results := make(interfaceValues, len(index)) + var i int + for k, _ := range index { + results[i] = k + i++ + } + if len(results) > 0 { + sort.Sort(results) + return results + } + return nil +} + +// MapCountDistinct computes the unique count of values in an iterator. +func MapCountDistinct(input *MapInput) interface{} { + var index = make(map[interface{}]struct{}) + + for _, item := range input.Items { + index[item.Value] = struct{}{} + } + + if len(index) == 0 { + return nil + } + + return index +} + +// ReduceCountDistinct finds the unique counts of values. +func ReduceCountDistinct(values []interface{}) interface{} { + var index = make(map[interface{}]struct{}) + + // index distinct values from each mapper + for _, v := range values { + if v == nil { + continue + } + d, ok := v.(map[interface{}]struct{}) + if !ok { + msg := fmt.Sprintf("expected map[interface{}]struct{}, got: %T", v) + panic(msg) + } + for distinctCountValue, _ := range d { + index[distinctCountValue] = struct{}{} + } + } + + return len(index) +} + +type NumberType int8 + +const ( + Float64Type NumberType = iota + Int64Type +) + +// MapSum computes the summation of values in an iterator. +func MapSum(input *MapInput) interface{} { + if len(input.Items) == 0 { + return nil + } + + n := float64(0) + var resultType NumberType + for _, item := range input.Items { + switch v := item.Value.(type) { + case float64: + n += v + case int64: + n += float64(v) + resultType = Int64Type + } + } + + switch resultType { + case Float64Type: + return n + case Int64Type: + return int64(n) + default: + return nil + } +} + +// ReduceSum computes the sum of values for each key. +func ReduceSum(values []interface{}) interface{} { + var n float64 + count := 0 + var resultType NumberType + for _, v := range values { + if v == nil { + continue + } + count++ + switch n1 := v.(type) { + case float64: + n += n1 + case int64: + n += float64(n1) + resultType = Int64Type + } + } + if count > 0 { + switch resultType { + case Float64Type: + return n + case Int64Type: + return int64(n) + } + } + return nil +} + +// MapMean computes the count and sum of values in an iterator to be combined by the reducer. +func MapMean(input *MapInput) interface{} { + if len(input.Items) == 0 { + return nil + } + + out := &meanMapOutput{} + for _, item := range input.Items { + out.Count++ + switch v := item.Value.(type) { + case float64: + out.Mean += (v - out.Mean) / float64(out.Count) + case int64: + out.Mean += (float64(v) - out.Mean) / float64(out.Count) + out.ResultType = Int64Type + } + } + return out +} + +type meanMapOutput struct { + Count int + Mean float64 + ResultType NumberType +} + +// ReduceMean computes the mean of values for each key. +func ReduceMean(values []interface{}) interface{} { + out := &meanMapOutput{} + var countSum int + for _, v := range values { + if v == nil { + continue + } + val := v.(*meanMapOutput) + countSum = out.Count + val.Count + out.Mean = val.Mean*(float64(val.Count)/float64(countSum)) + out.Mean*(float64(out.Count)/float64(countSum)) + out.Count = countSum + } + if out.Count > 0 { + return out.Mean + } + return nil +} + +// ReduceMedian computes the median of values +func ReduceMedian(values []interface{}) interface{} { + var data []float64 + // Collect all the data points + for _, value := range values { + if value == nil { + continue + } + data = append(data, value.([]float64)...) + } + + length := len(data) + if length < 2 { + if length == 0 { + return nil + } + return data[0] + } + middle := length / 2 + var sortedRange []float64 + if length%2 == 0 { + sortedRange = getSortedRange(data, middle-1, 2) + var low, high = sortedRange[0], sortedRange[1] + return low + (high-low)/2 + } + sortedRange = getSortedRange(data, middle, 1) + return sortedRange[0] +} + +// getSortedRange returns a sorted subset of data. By using discardLowerRange and discardUpperRange to get the target +// subset (unsorted) and then just sorting that subset, the work can be reduced from O(N lg N), where N is len(data), to +// O(N + count lg count) for the average case +// - O(N) to discard the unwanted items +// - O(count lg count) to sort the count number of extracted items +// This can be useful for: +// - finding the median: getSortedRange(data, middle, 1) +// - finding the top N: getSortedRange(data, len(data) - N, N) +// - finding the bottom N: getSortedRange(data, 0, N) +func getSortedRange(data []float64, start int, count int) []float64 { + out := discardLowerRange(data, start) + k := len(out) - count + if k > 0 { + out = discardUpperRange(out, k) + } + sort.Float64s(out) + + return out +} + +// discardLowerRange discards the lower k elements of the sorted data set without sorting all the data. Sorting all of +// the data would take O(NlgN), where N is len(data), but partitioning to find the kth largest number is O(N) in the +// average case. The remaining N-k unsorted elements are returned - no kind of ordering is guaranteed on these elements. +func discardLowerRange(data []float64, k int) []float64 { + out := make([]float64, len(data)-k) + i := 0 + + // discard values lower than the desired range + for k > 0 { + lows, pivotValue, highs := partition(data) + + lowLength := len(lows) + if lowLength > k { + // keep all the highs and the pivot + out[i] = pivotValue + i++ + copy(out[i:], highs) + i += len(highs) + // iterate over the lows again + data = lows + } else { + // discard all the lows + data = highs + k -= lowLength + if k == 0 { + // if discarded enough lows, keep the pivot + out[i] = pivotValue + i++ + } else { + // able to discard the pivot too + k-- + } + } + } + copy(out[i:], data) + return out +} + +// discardUpperRange discards the upper k elements of the sorted data set without sorting all the data. Sorting all of +// the data would take O(NlgN), where N is len(data), but partitioning to find the kth largest number is O(N) in the +// average case. The remaining N-k unsorted elements are returned - no kind of ordering is guaranteed on these elements. +func discardUpperRange(data []float64, k int) []float64 { + out := make([]float64, len(data)-k) + i := 0 + + // discard values higher than the desired range + for k > 0 { + lows, pivotValue, highs := partition(data) + + highLength := len(highs) + if highLength > k { + // keep all the lows and the pivot + out[i] = pivotValue + i++ + copy(out[i:], lows) + i += len(lows) + // iterate over the highs again + data = highs + } else { + // discard all the highs + data = lows + k -= highLength + if k == 0 { + // if discarded enough highs, keep the pivot + out[i] = pivotValue + i++ + } else { + // able to discard the pivot too + k-- + } + } + } + copy(out[i:], data) + return out +} + +// partition takes a list of data, chooses a random pivot index and returns a list of elements lower than the +// pivotValue, the pivotValue, and a list of elements higher than the pivotValue. partition mutates data. +func partition(data []float64) (lows []float64, pivotValue float64, highs []float64) { + length := len(data) + // there are better (more complex) ways to calculate pivotIndex (e.g. median of 3, median of 3 medians) if this + // proves to be inadequate. + pivotIndex := rand.Int() % length + pivotValue = data[pivotIndex] + low, high := 1, length-1 + + // put the pivot in the first position + data[pivotIndex], data[0] = data[0], data[pivotIndex] + + // partition the data around the pivot + for low <= high { + for low <= high && data[low] <= pivotValue { + low++ + } + for high >= low && data[high] >= pivotValue { + high-- + } + if low < high { + data[low], data[high] = data[high], data[low] + } + } + + return data[1:low], pivotValue, data[high+1:] +} + +type minMaxMapOut struct { + Time int64 + Val float64 + Type NumberType + Fields map[string]interface{} + Tags map[string]string +} + +// MapMin collects the values to pass to the reducer +func MapMin(input *MapInput, fieldName string) interface{} { + min := &minMaxMapOut{} + + pointsYielded := false + var val float64 + + for _, item := range input.Items { + switch v := item.Value.(type) { + case float64: + val = v + case int64: + val = float64(v) + min.Type = Int64Type + case map[string]interface{}: + if d, t, ok := decodeValueAndNumberType(v[fieldName]); ok { + val, min.Type = d, t + } else { + continue + } + } + + // Initialize min + if !pointsYielded { + min.Time = item.Timestamp + min.Val = val + min.Fields = item.Fields + min.Tags = item.Tags + pointsYielded = true + } + current := min.Val + min.Val = math.Min(min.Val, val) + + // Check to see if the value changed, if so, update the fields/tags + if current != min.Val { + min.Time = item.Timestamp + min.Fields = item.Fields + min.Tags = item.Tags + } + } + if pointsYielded { + return min + } + return nil +} + +// ReduceMin computes the min of value. +func ReduceMin(values []interface{}) interface{} { + min := &minMaxMapOut{} + pointsYielded := false + + for _, value := range values { + if value == nil { + continue + } + + v, ok := value.(*minMaxMapOut) + if !ok { + continue + } + + // Initialize min + if !pointsYielded { + min.Time = v.Time + min.Val = v.Val + min.Type = v.Type + min.Fields = v.Fields + min.Tags = v.Tags + pointsYielded = true + } + min.Val = math.Min(min.Val, v.Val) + current := min.Val + if current != min.Val { + min.Time = v.Time + min.Fields = v.Fields + min.Tags = v.Tags + } + } + if pointsYielded { + switch min.Type { + case Float64Type: + return PositionPoint{ + Time: min.Time, + Value: min.Val, + Fields: min.Fields, + Tags: min.Tags, + } + case Int64Type: + return PositionPoint{ + Time: min.Time, + Value: int64(min.Val), + Fields: min.Fields, + Tags: min.Tags, + } + } + } + return nil +} + +func decodeValueAndNumberType(v interface{}) (float64, NumberType, bool) { + switch n := v.(type) { + case float64: + return n, Float64Type, true + case int64: + return float64(n), Int64Type, true + default: + return 0, Float64Type, false + } +} + +// MapMax collects the values to pass to the reducer +func MapMax(input *MapInput, fieldName string) interface{} { + max := &minMaxMapOut{} + + pointsYielded := false + var val float64 + + for _, item := range input.Items { + switch v := item.Value.(type) { + case float64: + val = v + case int64: + val = float64(v) + max.Type = Int64Type + case map[string]interface{}: + if d, t, ok := decodeValueAndNumberType(v[fieldName]); ok { + val, max.Type = d, t + } else { + continue + } + } + + // Initialize max + if !pointsYielded { + max.Time = item.Timestamp + max.Val = val + max.Fields = item.Fields + max.Tags = item.Tags + pointsYielded = true + } + current := max.Val + max.Val = math.Max(max.Val, val) + + // Check to see if the value changed, if so, update the fields/tags + if current != max.Val { + max.Time = item.Timestamp + max.Fields = item.Fields + max.Tags = item.Tags + } + } + if pointsYielded { + return max + } + return nil +} + +// ReduceMax computes the max of value. +func ReduceMax(values []interface{}) interface{} { + max := &minMaxMapOut{} + pointsYielded := false + + for _, value := range values { + if value == nil { + continue + } + + v, ok := value.(*minMaxMapOut) + if !ok { + continue + } + + // Initialize max + if !pointsYielded { + max.Time = v.Time + max.Val = v.Val + max.Type = v.Type + max.Fields = v.Fields + max.Tags = v.Tags + pointsYielded = true + } + current := max.Val + max.Val = math.Max(max.Val, v.Val) + if current != max.Val { + max.Time = v.Time + max.Fields = v.Fields + max.Tags = v.Tags + } + } + if pointsYielded { + switch max.Type { + case Float64Type: + return PositionPoint{ + Time: max.Time, + Value: max.Val, + Fields: max.Fields, + Tags: max.Tags, + } + case Int64Type: + return PositionPoint{ + Time: max.Time, + Value: int64(max.Val), + Fields: max.Fields, + Tags: max.Tags, + } + } + } + return nil +} + +type spreadMapOutput struct { + Min, Max float64 + Type NumberType +} + +// MapSpread collects the values to pass to the reducer +func MapSpread(input *MapInput) interface{} { + out := &spreadMapOutput{} + pointsYielded := false + var val float64 + + for _, item := range input.Items { + switch v := item.Value.(type) { + case float64: + val = v + case int64: + val = float64(v) + out.Type = Int64Type + } + + // Initialize + if !pointsYielded { + out.Max = val + out.Min = val + pointsYielded = true + } + out.Max = math.Max(out.Max, val) + out.Min = math.Min(out.Min, val) + } + if pointsYielded { + return out + } + return nil +} + +// ReduceSpread computes the spread of values. +func ReduceSpread(values []interface{}) interface{} { + result := &spreadMapOutput{} + pointsYielded := false + + for _, v := range values { + if v == nil { + continue + } + val := v.(*spreadMapOutput) + // Initialize + if !pointsYielded { + result.Max = val.Max + result.Min = val.Min + result.Type = val.Type + pointsYielded = true + } + result.Max = math.Max(result.Max, val.Max) + result.Min = math.Min(result.Min, val.Min) + } + if pointsYielded { + switch result.Type { + case Float64Type: + return result.Max - result.Min + case Int64Type: + return int64(result.Max - result.Min) + } + } + return nil +} + +// MapStddev collects the values to pass to the reducer +func MapStddev(input *MapInput) interface{} { + var a []float64 + for _, item := range input.Items { + switch v := item.Value.(type) { + case float64: + a = append(a, v) + case int64: + a = append(a, float64(v)) + } + } + return a +} + +// ReduceStddev computes the stddev of values. +func ReduceStddev(values []interface{}) interface{} { + var data []float64 + // Collect all the data points + for _, value := range values { + if value == nil { + continue + } + data = append(data, value.([]float64)...) + } + + // If no data or we only have one point, it's nil or undefined + if len(data) < 2 { + return nil + } + + // Get the mean + var mean float64 + var count int + for _, v := range data { + count++ + mean += (v - mean) / float64(count) + } + // Get the variance + var variance float64 + for _, v := range data { + dif := v - mean + sq := math.Pow(dif, 2) + variance += sq + } + variance = variance / float64(count-1) + stddev := math.Sqrt(variance) + + return stddev +} + +type firstLastMapOutput struct { + Time int64 + Value interface{} + Fields map[string]interface{} + Tags map[string]string +} + +// MapFirst collects the values to pass to the reducer +// This function assumes time ordered input +func MapFirst(input *MapInput, fieldName string) interface{} { + if len(input.Items) == 0 { + return nil + } + + k, v := input.Items[0].Timestamp, input.Items[0].Value + tags := input.Items[0].Tags + fields := input.Items[0].Fields + if n, ok := v.(map[string]interface{}); ok { + v = n[fieldName] + } + + // Find greatest value at same timestamp. + for _, item := range input.Items[1:] { + nextk, nextv := item.Timestamp, item.Value + if nextk != k { + break + } + if n, ok := nextv.(map[string]interface{}); ok { + nextv = n[fieldName] + } + + if greaterThan(nextv, v) { + fields = item.Fields + tags = item.Tags + v = nextv + } + } + return &firstLastMapOutput{Time: k, Value: v, Fields: fields, Tags: tags} +} + +// ReduceFirst computes the first of value. +func ReduceFirst(values []interface{}) interface{} { + out := &firstLastMapOutput{} + pointsYielded := false + + for _, v := range values { + if v == nil { + continue + } + val := v.(*firstLastMapOutput) + // Initialize first + if !pointsYielded { + out.Time = val.Time + out.Value = val.Value + out.Fields = val.Fields + out.Tags = val.Tags + pointsYielded = true + } + if val.Time < out.Time { + out.Time = val.Time + out.Value = val.Value + out.Fields = val.Fields + out.Tags = val.Tags + } else if val.Time == out.Time && greaterThan(val.Value, out.Value) { + out.Value = val.Value + out.Fields = val.Fields + out.Tags = val.Tags + } + } + if pointsYielded { + return PositionPoint{ + Time: out.Time, + Value: out.Value, + Fields: out.Fields, + Tags: out.Tags, + } + } + return nil +} + +// MapLast collects the values to pass to the reducer +func MapLast(input *MapInput, fieldName string) interface{} { + out := &firstLastMapOutput{} + pointsYielded := false + + for _, item := range input.Items { + k, v := item.Timestamp, item.Value + if m, ok := v.(map[string]interface{}); ok { + v = m[fieldName] + } + + // Initialize last + if !pointsYielded { + out.Time = k + out.Value = v + out.Fields = item.Fields + out.Tags = item.Tags + pointsYielded = true + } + if k > out.Time { + out.Time = k + out.Value = v + out.Fields = item.Fields + out.Tags = item.Tags + } else if k == out.Time && greaterThan(v, out.Value) { + out.Value = v + out.Fields = item.Fields + out.Tags = item.Tags + } + } + if pointsYielded { + return out + } + return nil +} + +// ReduceLast computes the last of value. +func ReduceLast(values []interface{}) interface{} { + out := &firstLastMapOutput{} + pointsYielded := false + + for _, v := range values { + if v == nil { + continue + } + + val := v.(*firstLastMapOutput) + // Initialize last + if !pointsYielded { + out.Time = val.Time + out.Value = val.Value + out.Fields = val.Fields + out.Tags = val.Tags + pointsYielded = true + } + if val.Time > out.Time { + out.Time = val.Time + out.Value = val.Value + out.Fields = val.Fields + out.Tags = val.Tags + } else if val.Time == out.Time && greaterThan(val.Value, out.Value) { + out.Value = val.Value + out.Fields = val.Fields + out.Tags = val.Tags + } + } + if pointsYielded { + return PositionPoint{ + Time: out.Time, + Value: out.Value, + Fields: out.Fields, + Tags: out.Tags, + } + } + return nil +} + +type positionOut struct { + points PositionPoints + callArgs []string // ordered args in the call +} + +func (p *positionOut) lessKey(a, b *PositionPoint) bool { + t1, t2 := a.Tags, b.Tags + for _, k := range p.callArgs { + if t1[k] != t2[k] { + return t1[k] < t2[k] + } + } + return false +} + +// typeCompare compares the types of a and b and returns an arbitrary ordering. +// It returns -1 if type(a) < type(b) , 0 if type(a) == type(b), or 1 if type(a) > type(b), following the strcmp convention +// from C. +// +// If the types are not equal, then it will attempt to coerce them to floating point and return them in the last 2 arguments. +// If the type cannot be coerced to floating point, it is returned unaltered. +func typeCompare(a, b interface{}) (int, interface{}, interface{}) { + const ( + stringWeight = iota + boolWeight + intWeight + floatWeight + ) + + va := reflect.ValueOf(a) + vb := reflect.ValueOf(b) + + vakind := va.Type().Kind() + vbkind := vb.Type().Kind() + + // same kind. Ordering is dependent on value + if vakind == vbkind { + return 0, a, b + } + wa, a := inferFloat(va) + wb, b := inferFloat(vb) + if wa < wb { + return -1, a, b + } else if wa == wb { + return 0, a, b + } + return 1, a, b +} + +// returns a weighting and if applicable, the value coerced to a float +func inferFloat(v reflect.Value) (weight int, value interface{}) { + const ( + stringWeight = iota + boolWeight + intWeight + floatWeight + ) + kind := v.Kind() + switch kind { + case reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8: + return intWeight, float64(v.Uint()) + case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: + return intWeight, float64(v.Int()) + case reflect.Float64, reflect.Float32: + return floatWeight, v.Float() + case reflect.Bool: + return boolWeight, v.Interface() + case reflect.String: + return stringWeight, v.Interface() + } + panic(fmt.Sprintf("interfaceValues.Less - unreachable code; type was %T", v.Interface())) +} + +func cmpFloat(a, b float64) int { + if a == b { + return 0 + } else if a < b { + return -1 + } + return 1 +} + +func cmpInt(a, b int64) int { + if a == b { + return 0 + } else if a < b { + return -1 + } + return 1 +} + +func cmpUint(a, b uint64) int { + if a == b { + return 0 + } else if a < b { + return -1 + } + return 1 +} + +// valueCompare returns -1 if a < b , 0 if a == b, or 1 if a > b +// If the interfaces are 2 different types, then 0 is returned +func valueCompare(a, b interface{}) int { + if reflect.TypeOf(a).Kind() != reflect.TypeOf(b).Kind() { + return 0 + } + // compare by float64/int64 first as that is the most likely match + { + d1, ok1 := a.(float64) + d2, ok2 := b.(float64) + if ok1 && ok2 { + return cmpFloat(d1, d2) + } + } + + { + d1, ok1 := a.(int64) + d2, ok2 := b.(int64) + if ok1 && ok2 { + return cmpInt(d1, d2) + } + } + + // compare by every numeric type left + { + d1, ok1 := a.(float32) + d2, ok2 := b.(float32) + if ok1 && ok2 { + return cmpFloat(float64(d1), float64(d2)) + } + } + + { + d1, ok1 := a.(uint64) + d2, ok2 := b.(uint64) + if ok1 && ok2 { + return cmpUint(d1, d2) + } + } + + { + d1, ok1 := a.(uint32) + d2, ok2 := b.(uint32) + if ok1 && ok2 { + return cmpUint(uint64(d1), uint64(d2)) + } + } + + { + d1, ok1 := a.(uint16) + d2, ok2 := b.(uint16) + if ok1 && ok2 { + return cmpUint(uint64(d1), uint64(d2)) + } + } + + { + d1, ok1 := a.(uint8) + d2, ok2 := b.(uint8) + if ok1 && ok2 { + return cmpUint(uint64(d1), uint64(d2)) + } + } + + { + d1, ok1 := a.(int32) + d2, ok2 := b.(int32) + if ok1 && ok2 { + return cmpInt(int64(d1), int64(d2)) + } + } + + { + d1, ok1 := a.(int16) + d2, ok2 := b.(int16) + if ok1 && ok2 { + return cmpInt(int64(d1), int64(d2)) + } + } + + { + d1, ok1 := a.(int8) + d2, ok2 := b.(int8) + if ok1 && ok2 { + return cmpInt(int64(d1), int64(d2)) + } + } + + { + d1, ok1 := a.(bool) + d2, ok2 := b.(bool) + if ok1 && ok2 { + if d1 == d2 { + return 0 + } else if d1 == true && d2 == false { + return 1 + } + return -1 + } + } + + { + d1, ok1 := a.(string) + d2, ok2 := b.(string) + if ok1 && ok2 { + return strings.Compare(d1, d2) + } + } + panic(fmt.Sprintf("unreachable code; types were %T, %T", a, b)) +} + +// PositionPoints is a slice of PositionPoints used to return richer data from a reduce func +type PositionPoints []PositionPoint + +// PositionPoint will return all data points from a written point that were selected in the query +// to be used in the post processing phase of the query executor to fill in additional +// tag and field values +type PositionPoint struct { + Time int64 + Value interface{} + Fields map[string]interface{} + Tags map[string]string +} + +type topBottomMapOut struct { + *positionOut + bottom bool +} + +func (t *topBottomMapOut) Len() int { return len(t.points) } +func (t *topBottomMapOut) Swap(i, j int) { t.points[i], t.points[j] = t.points[j], t.points[i] } +func (t *topBottomMapOut) Less(i, j int) bool { + return t.positionPointLess(&t.points[i], &t.points[j]) +} + +func (t *topBottomMapOut) positionPointLess(pa, pb *PositionPoint) bool { + // old C trick makes this code easier to read. Imagine + // that the OP in "cmp(i, j) OP 0" is the comparison you want + // between i and j + cmpt, a, b := typeCompare(pa.Value, pb.Value) + cmpv := valueCompare(a, b) + if cmpv != 0 { + if t.bottom { + return cmpv > 0 + } + return cmpv < 0 + } + if cmpt != 0 { + return cmpt < 0 + } + k1, k2 := pa.Time, pb.Time + if k1 != k2 { + return k1 > k2 + } + return !t.lessKey(pa, pb) +} + +// We never use this function, so make it a no-op. +func (t *topBottomMapOut) Push(i interface{}) { + panic("someone used the function") +} + +// this function doesn't return anything meaningful, since we don't look at the +// return value and we don't want to allocate for generating an interface. +func (t *topBottomMapOut) Pop() interface{} { + t.points = t.points[:len(t.points)-1] + return nil +} + +func (t *topBottomMapOut) insert(p PositionPoint) { + t.points[0] = p + heap.Fix(t, 0) +} + +type topBottomReduceOut struct { + positionOut + bottom bool +} + +func (t topBottomReduceOut) Len() int { return len(t.points) } +func (t topBottomReduceOut) Swap(i, j int) { t.points[i], t.points[j] = t.points[j], t.points[i] } +func (t topBottomReduceOut) Less(i, j int) bool { + // Now sort by time first, not value + + k1, k2 := t.points[i].Time, t.points[j].Time + if k1 != k2 { + return k1 < k2 + } + cmpt, a, b := typeCompare(t.points[i].Value, t.points[j].Value) + cmpv := valueCompare(a, b) + if cmpv != 0 { + if t.bottom { + return cmpv < 0 + } + return cmpv > 0 + } + if cmpt != 0 { + return cmpt < 0 + } + return t.lessKey(&t.points[i], &t.points[j]) +} + +// callArgs will get any additional field/tag names that may be needed to sort with +// it is important to maintain the order of these that they were asked for in the call +// for sorting purposes +func topCallArgs(c *influxql.Call) []string { + var names []string + for _, v := range c.Args[1 : len(c.Args)-1] { + if f, ok := v.(*influxql.VarRef); ok { + names = append(names, f.Val) + } + } + return names +} + +func tagkeytop(args []string, fields map[string]interface{}, keys map[string]string) string { + key := "" + for _, a := range args { + if v, ok := fields[a]; ok { + key += a + ":" + fmt.Sprintf("%v", v) + "," + continue + } + if v, ok := keys[a]; ok { + key += a + ":" + v + "," + continue + } + } + return key +} + +// map iterator. We need this for the top +// query, but luckily that doesn't require ordered +// iteration, so we can fake it +type mapIter struct { + m map[string]PositionPoint + currTags map[string]string + currFields map[string]interface{} + tmin int64 +} + +func (m *mapIter) TMin() int64 { + return m.tmin +} + +func (m *mapIter) Fields() map[string]interface{} { + return m.currFields +} + +func (m *mapIter) Tags() map[string]string { + return m.currTags +} + +func (m *mapIter) Next() (time int64, value interface{}) { + // this is a bit ugly, but can't think of any other way that doesn't involve dumping + // the entire map to an array + for key, p := range m.m { + m.currFields = p.Fields + m.currTags = p.Tags + time = p.Time + value = p.Value + delete(m.m, key) + return + } + return -1, nil +} + +// MapTopBottom emits the top/bottom data points for each group by interval +func MapTopBottom(input *MapInput, limit int, fields []string, argCount int, callName string) interface{} { + out := positionOut{callArgs: fields} + out.points = make([]PositionPoint, 0, limit) + minheap := topBottomMapOut{ + &out, + callName == "bottom", + } + tagmap := make(map[string]PositionPoint) + + // throughout this function, we refer to max and top. This is by the ordering specified by + // minheap, not the ordering based on value. Since this function handles both top and bottom + // max can be the lowest valued entry. + + // buffer so we don't allocate every time through + var pp PositionPoint + if argCount > 2 { + // this is a tag aggregating query. + // For each unique permutation of the tags given, + // select the max and then fall through to select top of those + // points + for _, item := range input.Items { + pp = PositionPoint{ + Time: item.Timestamp, + Value: item.Value, + Fields: item.Fields, + Tags: item.Tags, + } + tags := item.Tags + + // TODO in the future we need to send in fields as well + // this will allow a user to query on both fields and tags + // fields will take the priority over tags if there is a name collision + key := tagkeytop(fields, nil, tags) + p, ok := tagmap[key] + if !ok || minheap.positionPointLess(&p, &pp) { + tagmap[key] = pp + } + } + + items := make([]MapItem, 0, len(tagmap)) + for _, p := range tagmap { + items = append(items, MapItem{Timestamp: p.Time, Value: p.Value, Fields: p.Fields, Tags: p.Tags}) + } + input = &MapInput{ + TMin: input.TMin, + Items: items, + } + } + + for _, item := range input.Items { + t := item.Timestamp + if input.TMin > -1 { + t = input.TMin + } + if len(out.points) < limit { + out.points = append(out.points, PositionPoint{t, item.Value, item.Fields, item.Tags}) + if len(out.points) == limit { + heap.Init(&minheap) + } + } else { + // we're over the limit, so find out if we're bigger than the + // smallest point in the set and eject it if we are + minval := &out.points[0] + pp = PositionPoint{t, item.Value, item.Fields, item.Tags} + if minheap.positionPointLess(minval, &pp) { + minheap.insert(pp) + } + } + } + + // should only happen on empty iterator. + if len(out.points) == 0 { + return nil + } else if len(out.points) < limit { + // it would be as fast to just sort regularly here, + // but falling down to the heapsort will mean we can get + // rid of another sort order. + heap.Init(&minheap) + } + + // minheap should now contain the largest/smallest values that were encountered + // during iteration. + // + // we want these values in ascending sorted order. We can achieve this by iteratively + // removing the lowest element and putting it at the end of the array. This is analogous + // to a heap sort. + // + // computer science is fun! + result := out.points + for len(out.points) > 0 { + p := out.points[0] + heap.Pop(&minheap) + + // reslice so that we can get to the element just after the heap + endslice := out.points[:len(out.points)+1] + endslice[len(endslice)-1] = p + } + + // the ascending order is now in the result slice + return result +} + +// ReduceTop computes the top values for each key. +// This function assumes that its inputs are in sorted ascending order. +func ReduceTopBottom(values []interface{}, c *influxql.Call) interface{} { + lit, _ := c.Args[len(c.Args)-1].(*influxql.NumberLiteral) + limit := int(lit.Val) + + out := positionOut{callArgs: topCallArgs(c)} + minheap := topBottomMapOut{&out, c.Name == "bottom"} + results := make([]PositionPoints, 0, len(values)) + out.points = make([]PositionPoint, 0, limit) + for _, v := range values { + if v == nil { + continue + } + o, ok := v.(PositionPoints) + if ok { + results = append(results, o) + } + } + // These ranges are all in sorted ascending order + // so we can grab the top value out of all of them + // to figure out the top X ones. + for i := 0; i < limit; i++ { + var max *PositionPoint + whichselected := -1 + for iter, v := range results { + if len(v) > 0 && (max == nil || minheap.positionPointLess(max, &v[0])) { + max = &v[0] + whichselected = iter + } + } + if whichselected == -1 { + // none of the points have any values + // so we can return what we have now + sort.Sort(topBottomReduceOut{out, c.Name == "bottom"}) + return out.points + } + v := results[whichselected] + out.points = append(out.points, v[0]) + results[whichselected] = v[1:] + } + + // now we need to resort the tops by time + sort.Sort(topBottomReduceOut{out, c.Name == "bottom"}) + return out.points +} + +// MapEcho emits the data points for each group by interval +func MapEcho(input *MapInput) interface{} { + var values []interface{} + for _, item := range input.Items { + values = append(values, item.Value) + } + return values +} + +// ReducePercentile computes the percentile of values for each key. +func ReducePercentile(values []interface{}, c *influxql.Call) interface{} { + // Checks that this arg exists and is a valid type are done in the parsing validation + // and have test coverage there + lit, _ := c.Args[1].(*influxql.NumberLiteral) + percentile := lit.Val + + var allValues []float64 + + for _, v := range values { + if v == nil { + continue + } + + vals := v.([]interface{}) + for _, v := range vals { + switch v.(type) { + case int64: + allValues = append(allValues, float64(v.(int64))) + case float64: + allValues = append(allValues, v.(float64)) + } + } + } + + sort.Float64s(allValues) + length := len(allValues) + index := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1 + + if index < 0 || index >= len(allValues) { + return nil + } + + return allValues[index] +} + +// IsNumeric returns whether a given aggregate can only be run on numeric fields. +func IsNumeric(c *influxql.Call) bool { + switch c.Name { + case "count", "first", "last", "distinct": + return false + default: + return true + } +} + +// MapRawQuery is for queries without aggregates +func MapRawQuery(input *MapInput) interface{} { + var values []*rawQueryMapOutput + for _, item := range input.Items { + values = append(values, &rawQueryMapOutput{item.Timestamp, item.Value}) + } + return values +} + +type rawQueryMapOutput struct { + Time int64 + Values interface{} +} + +func (r *rawQueryMapOutput) String() string { + return fmt.Sprintf("{%#v %#v}", r.Time, r.Values) +} + +type rawOutputs []*rawQueryMapOutput + +func (a rawOutputs) Len() int { return len(a) } +func (a rawOutputs) Less(i, j int) bool { return a[i].Time < a[j].Time } +func (a rawOutputs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func greaterThan(a, b interface{}) bool { + switch t := a.(type) { + case int64: + return t > b.(int64) + case float64: + return t > b.(float64) + case string: + return t > b.(string) + case bool: + return t == true + } + return false +} diff --git a/_third_party/github.com/influxdb/influxdb/tsdb/functions_test.go b/_third_party/github.com/influxdb/influxdb/tsdb/functions_test.go new file mode 100644 index 0000000000..6b09b60673 --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/tsdb/functions_test.go @@ -0,0 +1,862 @@ +package tsdb + +import ( + "reflect" + "testing" + "time" + + "bosun.org/_third_party/github.com/davecgh/go-spew/spew" + "bosun.org/_third_party/github.com/influxdb/influxdb/influxql" +) + +import "sort" + +// type testPoint struct { +// time int64 +// value interface{} +// fields map[string]interface{} +// tags map[string]string +// } + +func TestMapMeanNoValues(t *testing.T) { + if got := MapMean(&MapInput{}); got != nil { + t.Errorf("output mismatch: exp nil got %v", got) + } +} + +func TestMapMean(t *testing.T) { + + tests := []struct { + input *MapInput + output *meanMapOutput + }{ + { // Single point + input: &MapInput{ + Items: []MapItem{ + {Timestamp: 1, Value: 1.0}, + }, + }, + output: &meanMapOutput{1, 1, Float64Type}, + }, + { // Two points + input: &MapInput{ + Items: []MapItem{ + {Timestamp: 1, Value: float64(2.0)}, + {Timestamp: 2, Value: float64(8.0)}, + }, + }, + output: &meanMapOutput{2, 5.0, Float64Type}, + }, + } + + for _, test := range tests { + got := MapMean(test.input) + if got == nil { + t.Fatalf("MapMean(%v): output mismatch: exp %v got %v", test.input, test.output, got) + } + + if got.(*meanMapOutput).Count != test.output.Count || got.(*meanMapOutput).Mean != test.output.Mean { + t.Errorf("output mismatch: exp %v got %v", test.output, got) + } + } +} + +func TestInitializeMapFuncDerivative(t *testing.T) { + + for _, fn := range []string{"derivative", "non_negative_derivative"} { + // Single field arg should return MapEcho + c := &influxql.Call{ + Name: fn, + Args: []influxql.Expr{ + &influxql.VarRef{Val: " field1"}, + &influxql.DurationLiteral{Val: time.Hour}, + }, + } + + _, err := initializeMapFunc(c) + if err != nil { + t.Errorf("InitializeMapFunc(%v) unexpected error. got %v", c, err) + } + + // Nested Aggregate func should return the map func for the nested aggregate + c = &influxql.Call{ + Name: fn, + Args: []influxql.Expr{ + &influxql.Call{Name: "mean", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}}}, + &influxql.DurationLiteral{Val: time.Hour}, + }, + } + + _, err = initializeMapFunc(c) + if err != nil { + t.Errorf("InitializeMapFunc(%v) unexpected error. got %v", c, err) + } + } +} + +func TestReducePercentileNil(t *testing.T) { + + input := []interface{}{ + nil, + } + + // ReducePercentile should ignore nil values when calculating the percentile + got := ReducePercentile(input, &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 100}}}) + if got != nil { + t.Fatalf("ReducePercentile(100) returned wrong type. exp nil got %v", got) + } +} + +func TestMapDistinct(t *testing.T) { + const ( // prove that we're ignoring time + timeId1 = iota + 1 + timeId2 + timeId3 + timeId4 + timeId5 + timeId6 + ) + + input := &MapInput{ + Items: []MapItem{ + {Timestamp: timeId1, Value: uint64(1)}, + {Timestamp: timeId2, Value: uint64(1)}, + {Timestamp: timeId3, Value: "1"}, + {Timestamp: timeId4, Value: uint64(1)}, + {Timestamp: timeId5, Value: float64(1.0)}, + {Timestamp: timeId6, Value: "1"}, + }, + } + + values := MapDistinct(input).(interfaceValues) + + if exp, got := 3, len(values); exp != got { + t.Errorf("Wrong number of values. exp %v got %v", exp, got) + } + + sort.Sort(values) + + exp := interfaceValues{ + "1", + uint64(1), + float64(1), + } + + if !reflect.DeepEqual(values, exp) { + t.Errorf("Wrong values. exp %v got %v", spew.Sdump(exp), spew.Sdump(values)) + } +} + +func TestMapDistinctNil(t *testing.T) { + values := MapDistinct(&MapInput{}) + + if values != nil { + t.Errorf("Wrong values. exp nil got %v", spew.Sdump(values)) + } +} + +func TestReduceDistinct(t *testing.T) { + v1 := interfaceValues{ + "2", + "1", + float64(2.0), + float64(1), + uint64(2), + uint64(1), + true, + false, + } + + expect := interfaceValues{ + "1", + "2", + false, + true, + uint64(1), + float64(1), + uint64(2), + float64(2), + } + + got := ReduceDistinct([]interface{}{v1, v1, expect}) + + if !reflect.DeepEqual(got, expect) { + t.Errorf("Wrong values. exp %v got %v", spew.Sdump(expect), spew.Sdump(got)) + } +} + +func TestReduceDistinctNil(t *testing.T) { + tests := []struct { + name string + values []interface{} + }{ + { + name: "nil values", + values: nil, + }, + { + name: "nil mapper", + values: []interface{}{nil}, + }, + { + name: "no mappers", + values: []interface{}{}, + }, + { + name: "empty mappper (len 1)", + values: []interface{}{interfaceValues{}}, + }, + { + name: "empty mappper (len 2)", + values: []interface{}{interfaceValues{}, interfaceValues{}}, + }, + } + + for _, test := range tests { + t.Log(test.name) + got := ReduceDistinct(test.values) + if got != nil { + t.Errorf("Wrong values. exp nil got %v", spew.Sdump(got)) + } + } +} + +func Test_distinctValues_Sort(t *testing.T) { + values := interfaceValues{ + "2", + "1", + float64(2.0), + float64(1), + uint64(2), + uint64(1), + true, + false, + } + + expect := interfaceValues{ + "1", + "2", + false, + true, + uint64(1), + float64(1), + uint64(2), + float64(2), + } + + sort.Sort(values) + + if !reflect.DeepEqual(values, expect) { + t.Errorf("Wrong values. exp %v got %v", spew.Sdump(expect), spew.Sdump(values)) + } +} + +func TestMapCountDistinct(t *testing.T) { + const ( // prove that we're ignoring time + timeId1 = iota + 1 + timeId2 + timeId3 + timeId4 + timeId5 + timeId6 + timeId7 + ) + + input := &MapInput{ + Items: []MapItem{ + {Timestamp: timeId1, Value: uint64(1)}, + {Timestamp: timeId2, Value: uint64(1)}, + {Timestamp: timeId3, Value: "1"}, + {Timestamp: timeId4, Value: uint64(1)}, + {Timestamp: timeId5, Value: float64(1.0)}, + {Timestamp: timeId6, Value: "1"}, + {Timestamp: timeId7, Value: true}, + }, + } + + values := MapCountDistinct(input).(map[interface{}]struct{}) + + if exp, got := 4, len(values); exp != got { + t.Errorf("Wrong number of values. exp %v got %v", exp, got) + } + + exp := map[interface{}]struct{}{ + uint64(1): struct{}{}, + float64(1): struct{}{}, + "1": struct{}{}, + true: struct{}{}, + } + + if !reflect.DeepEqual(values, exp) { + t.Errorf("Wrong values. exp %v got %v", spew.Sdump(exp), spew.Sdump(values)) + } +} + +func TestMapCountDistinctNil(t *testing.T) { + if values := MapCountDistinct(&MapInput{}); values != nil { + t.Errorf("Wrong values. exp nil got %v", spew.Sdump(values)) + } +} + +func TestReduceCountDistinct(t *testing.T) { + v1 := map[interface{}]struct{}{ + "2": struct{}{}, + "1": struct{}{}, + float64(2.0): struct{}{}, + float64(1): struct{}{}, + uint64(2): struct{}{}, + uint64(1): struct{}{}, + true: struct{}{}, + false: struct{}{}, + } + + v2 := map[interface{}]struct{}{ + uint64(1): struct{}{}, + float64(1): struct{}{}, + uint64(2): struct{}{}, + float64(2): struct{}{}, + false: struct{}{}, + true: struct{}{}, + "1": struct{}{}, + "2": struct{}{}, + } + + exp := 8 + got := ReduceCountDistinct([]interface{}{v1, v1, v2}) + + if !reflect.DeepEqual(got, exp) { + t.Errorf("Wrong values. exp %v got %v", spew.Sdump(exp), spew.Sdump(got)) + } +} + +func TestReduceCountDistinctNil(t *testing.T) { + emptyResults := make(map[interface{}]struct{}) + tests := []struct { + name string + values []interface{} + }{ + { + name: "nil values", + values: nil, + }, + { + name: "nil mapper", + values: []interface{}{nil}, + }, + { + name: "no mappers", + values: []interface{}{}, + }, + { + name: "empty mappper (len 1)", + values: []interface{}{emptyResults}, + }, + { + name: "empty mappper (len 2)", + values: []interface{}{emptyResults, emptyResults}, + }, + } + + for _, test := range tests { + t.Log(test.name) + got := ReduceCountDistinct(test.values) + if got != 0 { + t.Errorf("Wrong values. exp nil got %v", spew.Sdump(got)) + } + } +} + +var getSortedRangeData = []float64{ + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, +} + +var getSortedRangeTests = []struct { + name string + data []float64 + start int + count int + expected []float64 +}{ + {"first 5", getSortedRangeData, 0, 5, []float64{0, 1, 2, 3, 4}}, + {"0 length", getSortedRangeData, 8, 0, []float64{}}, + {"past end of data", getSortedRangeData, len(getSortedRangeData) - 3, 5, []float64{67, 68, 69}}, +} + +func TestGetSortedRange(t *testing.T) { + for _, tt := range getSortedRangeTests { + results := getSortedRange(tt.data, tt.start, tt.count) + if len(results) != len(tt.expected) { + t.Errorf("Test %s error. Expected getSortedRange to return %v but got %v", tt.name, tt.expected, results) + } + for i, testPoint := range tt.expected { + if testPoint != results[i] { + t.Errorf("Test %s error. getSortedRange returned wrong result for index %v. Expected %v but got %v", tt.name, i, testPoint, results[i]) + } + } + } +} + +var benchGetSortedRangeResults []float64 + +func BenchmarkGetSortedRangeByPivot(b *testing.B) { + data := make([]float64, len(getSortedRangeData)) + var results []float64 + for i := 0; i < b.N; i++ { + copy(data, getSortedRangeData) + results = getSortedRange(data, 8, 15) + } + benchGetSortedRangeResults = results +} + +func BenchmarkGetSortedRangeBySort(b *testing.B) { + data := make([]float64, len(getSortedRangeData)) + var results []float64 + for i := 0; i < b.N; i++ { + copy(data, getSortedRangeData) + sort.Float64s(data) + results = data[8:23] + } + benchGetSortedRangeResults = results +} + +func TestMapTopBottom(t *testing.T) { + tests := []struct { + name string + skip bool + input *MapInput + exp positionOut + call *influxql.Call + }{ + { + name: "top int64 - basic", + input: &MapInput{ + TMin: -1, + Items: []MapItem{ + {Timestamp: 10, Value: int64(53), Tags: map[string]string{"host": "a"}}, + {Timestamp: 20, Value: int64(88), Tags: map[string]string{"host": "a"}}, + }, + }, + exp: positionOut{ + points: PositionPoints{ + {20, int64(88), nil, map[string]string{"host": "a"}}, + {10, int64(53), nil, map[string]string{"host": "a"}}, + }, + }, + call: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}, + }, + { + name: "top int64 - tie on value, resolve based on time", + input: &MapInput{ + TMin: -1, + Items: []MapItem{ + {Timestamp: 20, Value: int64(99), Tags: map[string]string{"host": "a"}}, + {Timestamp: 10, Value: int64(53), Tags: map[string]string{"host": "a"}}, + {Timestamp: 10, Value: int64(99), Tags: map[string]string{"host": "a"}}, + }, + }, + exp: positionOut{ + callArgs: []string{"host"}, + points: PositionPoints{ + {10, int64(99), nil, map[string]string{"host": "a"}}, + {20, int64(99), nil, map[string]string{"host": "a"}}, + }, + }, + call: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}, + }, + { + name: "top mixed numerics - ints", + input: &MapInput{ + TMin: -1, + Items: []MapItem{ + {Timestamp: 10, Value: int64(99), Tags: map[string]string{"host": "a"}}, + {Timestamp: 10, Value: int64(53), Tags: map[string]string{"host": "a"}}, + {Timestamp: 20, Value: uint64(88), Tags: map[string]string{"host": "a"}}, + }, + }, + exp: positionOut{ + points: PositionPoints{ + {10, int64(99), nil, map[string]string{"host": "a"}}, + {20, uint64(88), nil, map[string]string{"host": "a"}}, + }, + }, + call: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}, + }, + { + name: "top mixed numerics - ints & floats", + input: &MapInput{ + TMin: -1, + Items: []MapItem{ + {Timestamp: 10, Value: float64(99), Tags: map[string]string{"host": "a"}}, + {Timestamp: 10, Value: int64(53), Tags: map[string]string{"host": "a"}}, + {Timestamp: 20, Value: uint64(88), Tags: map[string]string{"host": "a"}}, + }, + }, + exp: positionOut{ + points: PositionPoints{ + {10, float64(99), nil, map[string]string{"host": "a"}}, + {20, uint64(88), nil, map[string]string{"host": "a"}}, + }, + }, + call: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}, + }, + { + name: "top mixed numerics - ints, floats, & strings", + input: &MapInput{ + TMin: -1, + Items: []MapItem{ + {Timestamp: 10, Value: float64(99), Tags: map[string]string{"host": "a"}}, + {Timestamp: 10, Value: int64(53), Tags: map[string]string{"host": "a"}}, + {Timestamp: 20, Value: "88", Tags: map[string]string{"host": "a"}}, + }, + }, + exp: positionOut{ + points: PositionPoints{ + {10, float64(99), nil, map[string]string{"host": "a"}}, + {10, int64(53), nil, map[string]string{"host": "a"}}, + }, + }, + call: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}, + }, + { + name: "top bools", + input: &MapInput{ + TMin: -1, + Items: []MapItem{ + {Timestamp: 10, Value: true, Tags: map[string]string{"host": "a"}}, + {Timestamp: 10, Value: true, Tags: map[string]string{"host": "a"}}, + {Timestamp: 20, Value: false, Tags: map[string]string{"host": "a"}}, + }, + }, + exp: positionOut{ + points: PositionPoints{ + {10, true, nil, map[string]string{"host": "a"}}, + {10, true, nil, map[string]string{"host": "a"}}, + }, + }, + call: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}, + }, + { + name: "bottom int64 - basic", + input: &MapInput{ + TMin: -1, + Items: []MapItem{ + {Timestamp: 10, Value: int64(99), Tags: map[string]string{"host": "a"}}, + {Timestamp: 10, Value: int64(53), Tags: map[string]string{"host": "a"}}, + {Timestamp: 20, Value: int64(88), Tags: map[string]string{"host": "a"}}, + }, + }, + exp: positionOut{ + points: PositionPoints{ + {10, int64(53), nil, map[string]string{"host": "a"}}, + {20, int64(88), nil, map[string]string{"host": "a"}}, + }, + }, + call: &influxql.Call{Name: "bottom", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}, + }, + { + name: "bottom int64 - tie on value, resolve based on time", + input: &MapInput{ + TMin: -1, + Items: []MapItem{ + {Timestamp: 10, Value: int64(53), Tags: map[string]string{"host": "a"}}, + {Timestamp: 20, Value: int64(53), Tags: map[string]string{"host": "a"}}, + {Timestamp: 20, Value: int64(53), Tags: map[string]string{"host": "a"}}, + }, + }, + exp: positionOut{ + callArgs: []string{"host"}, + points: PositionPoints{ + {10, int64(53), nil, map[string]string{"host": "a"}}, + {20, int64(53), nil, map[string]string{"host": "a"}}, + }, + }, + call: &influxql.Call{Name: "bottom", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}, + }, + { + name: "bottom mixed numerics - ints", + input: &MapInput{ + TMin: -1, + Items: []MapItem{ + {Timestamp: 10, Value: int64(99), Tags: map[string]string{"host": "a"}}, + {Timestamp: 10, Value: int64(53), Tags: map[string]string{"host": "a"}}, + {Timestamp: 20, Value: uint64(88), Tags: map[string]string{"host": "a"}}, + }, + }, + exp: positionOut{ + points: PositionPoints{ + {10, int64(53), nil, map[string]string{"host": "a"}}, + {20, uint64(88), nil, map[string]string{"host": "a"}}, + }, + }, + call: &influxql.Call{Name: "bottom", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}, + }, + { + name: "bottom mixed numerics - ints & floats", + input: &MapInput{ + TMin: -1, + Items: []MapItem{ + {Timestamp: 10, Value: int64(99), Tags: map[string]string{"host": "a"}}, + {Timestamp: 10, Value: float64(53), Tags: map[string]string{"host": "a"}}, + {Timestamp: 20, Value: uint64(88), Tags: map[string]string{"host": "a"}}, + }, + }, + exp: positionOut{ + points: PositionPoints{ + {10, float64(53), nil, map[string]string{"host": "a"}}, + {20, uint64(88), nil, map[string]string{"host": "a"}}, + }, + }, + call: &influxql.Call{Name: "bottom", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}, + }, + { + name: "bottom mixed numerics - ints, floats, & strings", + input: &MapInput{ + TMin: -1, + Items: []MapItem{ + {Timestamp: 10, Value: float64(99), Tags: map[string]string{"host": "a"}}, + {Timestamp: 10, Value: int64(53), Tags: map[string]string{"host": "a"}}, + {Timestamp: 20, Value: "88", Tags: map[string]string{"host": "a"}}, + }, + }, + exp: positionOut{ + points: PositionPoints{ + {10, int64(53), nil, map[string]string{"host": "a"}}, + {10, float64(99), nil, map[string]string{"host": "a"}}, + }, + }, + call: &influxql.Call{Name: "bottom", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}, + }, + { + name: "bottom bools", + input: &MapInput{ + TMin: -1, + Items: []MapItem{ + {Timestamp: 10, Value: true, Tags: map[string]string{"host": "a"}}, + {Timestamp: 10, Value: true, Tags: map[string]string{"host": "a"}}, + {Timestamp: 20, Value: false, Tags: map[string]string{"host": "a"}}, + }, + }, + exp: positionOut{ + points: PositionPoints{ + {20, false, nil, map[string]string{"host": "a"}}, + {10, true, nil, map[string]string{"host": "a"}}, + }, + }, + call: &influxql.Call{Name: "bottom", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}, + }, + } + + for _, test := range tests { + if test.skip { + continue + } + lit, _ := test.call.Args[len(test.call.Args)-1].(*influxql.NumberLiteral) + limit := int(lit.Val) + fields := topCallArgs(test.call) + + values := MapTopBottom(test.input, limit, fields, len(test.call.Args), test.call.Name).(PositionPoints) + t.Logf("Test: %s", test.name) + if exp, got := len(test.exp.points), len(values); exp != got { + t.Errorf("Wrong number of values. exp %v got %v", exp, got) + } + if !reflect.DeepEqual(values, test.exp.points) { + t.Errorf("Wrong values. \nexp\n %v\ngot\n %v", spew.Sdump(test.exp.points), spew.Sdump(values)) + } + } +} + +func TestReduceTopBottom(t *testing.T) { + tests := []struct { + name string + skip bool + values []interface{} + exp PositionPoints + call *influxql.Call + }{ + { + name: "top int64 - single map", + values: []interface{}{ + PositionPoints{ + {10, int64(99), nil, map[string]string{"host": "a"}}, + {20, int64(88), nil, map[string]string{"host": "a"}}, + {10, int64(53), nil, map[string]string{"host": "b"}}, + }, + }, + exp: PositionPoints{ + PositionPoint{10, int64(99), nil, map[string]string{"host": "a"}}, + PositionPoint{20, int64(88), nil, map[string]string{"host": "a"}}, + }, + call: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}, + }, + { + name: "top int64 - double map", + values: []interface{}{ + PositionPoints{ + {10, int64(99), nil, map[string]string{"host": "a"}}, + }, + PositionPoints{ + {20, int64(88), nil, map[string]string{"host": "a"}}, + {10, int64(53), nil, map[string]string{"host": "b"}}, + }, + }, + exp: PositionPoints{ + PositionPoint{10, int64(99), nil, map[string]string{"host": "a"}}, + PositionPoint{20, int64(88), nil, map[string]string{"host": "a"}}, + }, + call: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}, + }, + { + name: "top int64 - double map with nil", + values: []interface{}{ + PositionPoints{ + {10, int64(99), nil, map[string]string{"host": "a"}}, + {20, int64(88), nil, map[string]string{"host": "a"}}, + {10, int64(53), nil, map[string]string{"host": "b"}}, + }, + nil, + }, + exp: PositionPoints{ + PositionPoint{10, int64(99), nil, map[string]string{"host": "a"}}, + PositionPoint{20, int64(88), nil, map[string]string{"host": "a"}}, + }, + call: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}, + }, + { + name: "top int64 - double map with non-matching tags and tag selected", + values: []interface{}{ + PositionPoints{ + {10, int64(99), nil, map[string]string{"host": "a"}}, + {20, int64(88), nil, map[string]string{}}, + {10, int64(53), nil, map[string]string{"host": "b"}}, + }, + nil, + }, + exp: PositionPoints{ + PositionPoint{10, int64(99), nil, map[string]string{"host": "a"}}, + PositionPoint{20, int64(88), nil, map[string]string{}}, + }, + call: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.VarRef{Val: "host"}, &influxql.NumberLiteral{Val: 2}}}, + }, + { + skip: true, + name: "top int64 - double map with non-matching tags", + values: []interface{}{ + PositionPoints{ + {10, int64(99), nil, map[string]string{"host": "a"}}, + {20, int64(88), nil, map[string]string{}}, + {10, int64(53), nil, map[string]string{"host": "b"}}, + }, + nil, + }, + exp: PositionPoints{ + PositionPoint{10, int64(99), nil, map[string]string{"host": "a"}}, + PositionPoint{20, int64(55), nil, map[string]string{"host": "b"}}, + }, + call: &influxql.Call{Name: "bottom", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}, + }, + { + name: "bottom int64 - single map", + values: []interface{}{ + PositionPoints{ + {10, int64(53), nil, map[string]string{"host": "b"}}, + {20, int64(88), nil, map[string]string{"host": "a"}}, + {10, int64(99), nil, map[string]string{"host": "a"}}, + }, + }, + exp: PositionPoints{ + PositionPoint{10, int64(53), nil, map[string]string{"host": "b"}}, + PositionPoint{20, int64(88), nil, map[string]string{"host": "a"}}, + }, + call: &influxql.Call{Name: "bottom", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}, + }, + { + name: "bottom int64 - double map", + values: []interface{}{ + PositionPoints{ + {10, int64(99), nil, map[string]string{"host": "a"}}, + }, + PositionPoints{ + {10, int64(53), nil, map[string]string{"host": "b"}}, + {20, int64(88), nil, map[string]string{"host": "a"}}, + }, + }, + exp: PositionPoints{ + PositionPoint{10, int64(53), nil, map[string]string{"host": "b"}}, + PositionPoint{20, int64(88), nil, map[string]string{"host": "a"}}, + }, + call: &influxql.Call{Name: "bottom", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}, + }, + { + name: "bottom int64 - double map with nil", + values: []interface{}{ + PositionPoints{ + {10, int64(53), nil, map[string]string{"host": "b"}}, + {20, int64(88), nil, map[string]string{"host": "a"}}, + {10, int64(99), nil, map[string]string{"host": "a"}}, + }, + nil, + }, + exp: PositionPoints{ + PositionPoint{10, int64(53), nil, map[string]string{"host": "b"}}, + PositionPoint{20, int64(88), nil, map[string]string{"host": "a"}}, + }, + call: &influxql.Call{Name: "bottom", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}, + }, + { + name: "bottom int64 - double map with non-matching tags and tag selected", + values: []interface{}{ + PositionPoints{ + {10, int64(53), nil, map[string]string{"host": "b"}}, + {20, int64(88), nil, map[string]string{}}, + {10, int64(99), nil, map[string]string{"host": "a"}}, + }, + nil, + }, + exp: PositionPoints{ + PositionPoint{10, int64(53), nil, map[string]string{"host": "b"}}, + PositionPoint{20, int64(88), nil, map[string]string{}}, + }, + call: &influxql.Call{Name: "bottom", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.VarRef{Val: "host"}, &influxql.NumberLiteral{Val: 2}}}, + }, + { + skip: true, + name: "bottom int64 - double map with non-matching tags", + values: []interface{}{ + PositionPoints{ + {10, int64(53), nil, map[string]string{"host": "b"}}, + {20, int64(88), nil, map[string]string{}}, + {10, int64(99), nil, map[string]string{"host": "a"}}, + }, + nil, + }, + exp: PositionPoints{ + PositionPoint{10, int64(99), nil, map[string]string{"host": "a"}}, + PositionPoint{20, int64(55), nil, map[string]string{"host": "b"}}, + }, + call: &influxql.Call{Name: "bottom", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}, + }, + } + + for _, test := range tests { + if test.skip { + continue + } + values := ReduceTopBottom(test.values, test.call) + t.Logf("Test: %s", test.name) + if values != nil { + v, _ := values.(PositionPoints) + if exp, got := len(test.exp), len(v); exp != got { + t.Errorf("Wrong number of values. exp %v got %v", exp, got) + } + } + if !reflect.DeepEqual(values, test.exp) { + t.Errorf("Wrong values. \nexp\n %v\ngot\n %v", spew.Sdump(test.exp), spew.Sdump(values)) + } + } +} diff --git a/_third_party/github.com/influxdb/influxdb/tsdb/mapper.go b/_third_party/github.com/influxdb/influxdb/tsdb/mapper.go index ef76830f5e..5ea37dd30d 100644 --- a/_third_party/github.com/influxdb/influxdb/tsdb/mapper.go +++ b/_third_party/github.com/influxdb/influxdb/tsdb/mapper.go @@ -2,16 +2,47 @@ package tsdb import ( "container/heap" - "encoding/binary" "encoding/json" "errors" "fmt" "sort" - "strings" "bosun.org/_third_party/github.com/influxdb/influxdb/influxql" + "bosun.org/_third_party/github.com/influxdb/influxdb/pkg/slices" ) +// Mapper is the interface all Mapper types must implement. +type Mapper interface { + Open() error + TagSets() []string + Fields() []string + NextChunk() (interface{}, error) + Close() +} + +// StatefulMapper encapsulates a Mapper and some state that the executor needs to +// track for that mapper. +type StatefulMapper struct { + Mapper + bufferedChunk *MapperOutput // Last read chunk. + drained bool +} + +// NextChunk wraps a RawMapper and some state. +func (sm *StatefulMapper) NextChunk() (*MapperOutput, error) { + c, err := sm.Mapper.NextChunk() + if err != nil { + return nil, err + } + chunk, ok := c.(*MapperOutput) + if !ok { + if chunk == interface{}(nil) { + return nil, nil + } + } + return chunk, nil +} + // MapperValue is a complex type, which can encapsulate data from both raw and aggregate // mappers. This currently allows marshalling and network system to remain simpler. For // aggregate output Time is ignored, and actual Time-Value pairs are contained soley @@ -22,6 +53,47 @@ type MapperValue struct { Tags map[string]string `json:"tags,omitempty"` // Meta tags for results } +// MapperValueJSON is the JSON-encoded representation of MapperValue. Because MapperValue is +// a complex type, custom JSON encoding is required so that none of the types contained within +// a MapperValue are "lost", and so the data are encoded as byte slices where necessary. +type MapperValueJSON struct { + Time int64 `json:"time,omitempty"` + RawData []byte `json:"rdata,omitempty"` + AggData [][]byte `json:"adata,omitempty"` + Tags map[string]string `json:"tags,omitempty"` +} + +// MarshalJSON returns the JSON-encoded representation of a MapperValue. +func (mv *MapperValue) MarshalJSON() ([]byte, error) { + o := &MapperValueJSON{ + Time: mv.Time, + AggData: make([][]byte, 0), + Tags: mv.Tags, + } + + o.Time = mv.Time + o.Tags = mv.Tags + if values, ok := mv.Value.([]interface{}); ok { + // Value contain a slice of more values. This happens only with + // aggregate output. + for _, v := range values { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + o.AggData = append(o.AggData, b) + } + } else { + // If must be raw output, so just marshal the single value. + b, err := json.Marshal(mv.Value) + if err != nil { + return nil, err + } + o.RawData = b + } + return json.Marshal(o) +} + type MapperValues []*MapperValue func (a MapperValues) Len() int { return len(a) } @@ -36,281 +108,191 @@ type MapperOutput struct { cursorKey string // Tagset-based key for the source cursor. Cached for performance reasons. } +// MapperOutputJSON is the JSON-encoded representation of MapperOutput. The query data is represented +// as a raw JSON message, so decode is delayed, and can proceed in a custom manner. +type MapperOutputJSON struct { + Name string `json:"name,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Fields []string `json:"fields,omitempty"` // Field names of returned data. + Values json.RawMessage `json:"values,omitempty"` +} + +// MarshalJSON returns the JSON-encoded representation of a MapperOutput. +func (mo *MapperOutput) MarshalJSON() ([]byte, error) { + o := &MapperOutputJSON{ + Name: mo.Name, + Tags: mo.Tags, + Fields: mo.Fields, + } + data, err := json.Marshal(mo.Values) + if err != nil { + return nil, err + } + o.Values = data + + return json.Marshal(o) +} + func (mo *MapperOutput) key() string { return mo.cursorKey } -// SelectMapper is for retrieving data for a query, from a given shard. -type SelectMapper struct { - shard *Shard - remote Mapper - stmt influxql.Statement - selectStmt *influxql.SelectStatement - rawMode bool - chunkSize int - tx Tx // Read transaction for this shard. - queryTMin int64 // Minimum time of the query. - queryTMax int64 // Maximum time of the query. - whereFields []string // field names that occur in the where clause - selectFields []string // field names that occur in the select clause - selectTags []string // tag keys that occur in the select clause - cursors []*tagSetCursor // Cursors per tag sets. - currCursorIndex int // Current tagset cursor being drained. - - // The following attributes are only used when mappers are for aggregate queries. - - queryTMinWindow int64 // Minimum time of the query floored to start of interval. - intervalSize int64 // Size of each interval. - numIntervals int // Maximum number of intervals to return. - currInterval int // Current interval for which data is being fetched. - mapFuncs []influxql.MapFunc // The mapping functions. - fieldNames []string // the field name being read for mapping. +// RawMapper runs the map phase for non-aggregate, raw SELECT queries. +type RawMapper struct { + shard *Shard + stmt *influxql.SelectStatement + qmin, qmax int64 // query time range + + tx Tx + cursors []*TagSetCursor + cursorIndex int + + selectFields []string + selectTags []string + whereFields []string + + ChunkSize int } -// NewSelectMapper returns a mapper for the given shard, which will return data for the SELECT statement. -func NewSelectMapper(shard *Shard, stmt influxql.Statement, chunkSize int) *SelectMapper { - return &SelectMapper{ - shard: shard, - stmt: stmt, - chunkSize: chunkSize, - cursors: make([]*tagSetCursor, 0), +// NewRawMapper returns a new instance of RawMapper. +func NewRawMapper(sh *Shard, stmt *influxql.SelectStatement) *RawMapper { + return &RawMapper{ + shard: sh, + stmt: stmt, } } -// openMeta opens the mapper for a meta query. -func (lm *SelectMapper) openMeta() error { - return errors.New("not implemented") -} +// Open opens and initializes the mapper. +func (m *RawMapper) Open() error { + // Ignore if node has the shard but hasn't written to it yet. + if m.shard == nil { + return nil + } -// Open opens the local mapper. -func (lm *SelectMapper) Open() error { - if lm.remote != nil { - return lm.remote.Open() + // Rewrite statement. + stmt, err := m.shard.index.RewriteSelectStatement(m.stmt) + if err != nil { + return err } + m.stmt = stmt - var err error + // Set all time-related parameters on the mapper. + m.qmin, m.qmax = influxql.TimeRangeAsEpochNano(m.stmt.Condition) // Get a read-only transaction. - tx, err := lm.shard.engine.Begin(false) + tx, err := m.shard.engine.Begin(false) if err != nil { return err } - lm.tx = tx + m.tx = tx - if s, ok := lm.stmt.(*influxql.SelectStatement); ok { - stmt, err := lm.rewriteSelectStatement(s) - if err != nil { - return err - } - lm.selectStmt = stmt - lm.rawMode = (s.IsRawQuery && !s.HasDistinct()) || s.IsSimpleDerivative() - } else { - return lm.openMeta() - } + // Collect measurements. + mms := Measurements(m.shard.index.MeasurementsByName(m.stmt.SourceNames())) + m.selectFields = mms.SelectFields(m.stmt) + m.selectTags = mms.SelectTags(m.stmt) + m.whereFields = mms.WhereFields(m.stmt) - // Set all time-related parameters on the mapper. - lm.queryTMin, lm.queryTMax = influxql.TimeRangeAsEpochNano(lm.selectStmt.Condition) - - if !lm.rawMode { - if err := lm.initializeMapFunctions(); err != nil { - return err - } - - // For GROUP BY time queries, limit the number of data points returned by the limit and offset - d, err := lm.selectStmt.GroupByInterval() - if err != nil { + // Open cursors for each measurement. + for _, mm := range mms { + if err := m.openMeasurement(mm); err != nil { return err } - lm.intervalSize = d.Nanoseconds() - if lm.queryTMin == 0 || lm.intervalSize == 0 { - lm.numIntervals = 1 - lm.intervalSize = lm.queryTMax - lm.queryTMin - } else { - intervalTop := lm.queryTMax/lm.intervalSize*lm.intervalSize + lm.intervalSize - intervalBottom := lm.queryTMin / lm.intervalSize * lm.intervalSize - lm.numIntervals = int((intervalTop - intervalBottom) / lm.intervalSize) - } - - if lm.selectStmt.Limit > 0 || lm.selectStmt.Offset > 0 { - // ensure that the offset isn't higher than the number of points we'd get - if lm.selectStmt.Offset > lm.numIntervals { - return nil - } - - // Take the lesser of either the pre computed number of GROUP BY buckets that - // will be in the result or the limit passed in by the user - if lm.selectStmt.Limit < lm.numIntervals { - lm.numIntervals = lm.selectStmt.Limit - } - } - - // If we are exceeding our MaxGroupByPoints error out - if lm.numIntervals > MaxGroupByPoints { - return errors.New("too many points in the group by interval. maybe you forgot to specify a where time clause?") - } - - // Ensure that the start time for the results is on the start of the window. - lm.queryTMinWindow = lm.queryTMin - if lm.intervalSize > 0 && lm.numIntervals > 1 { - lm.queryTMinWindow = lm.queryTMinWindow / lm.intervalSize * lm.intervalSize - } } - selectFields := newStringSet() - selectTags := newStringSet() - whereFields := newStringSet() - - // Create the TagSet cursors for the Mapper. - for _, src := range lm.selectStmt.Sources { - mm, ok := src.(*influxql.Measurement) - if !ok { - return fmt.Errorf("invalid source type: %#v", src) - } + // Remove cursors if there are not SELECT fields. + if len(m.selectFields) == 0 { + m.cursors = nil + } - m := lm.shard.index.Measurement(mm.Name) - if m == nil { - // This shard have never received data for the measurement. No Mapper - // required. - return nil - } + return nil +} - // Validate that ANY GROUP BY is not a field for thie measurement. - if err := m.ValidateGroupBy(lm.selectStmt); err != nil { - return err - } +func (m *RawMapper) openMeasurement(mm *Measurement) error { + // Validate that ANY GROUP BY is not a field for the measurement. + if err := mm.ValidateGroupBy(m.stmt); err != nil { + return err + } - // Create tagset cursors and determine various field types within SELECT statement. - tsf, err := createTagSetsAndFields(m, lm.selectStmt) - if err != nil { - return err - } - tagSets := tsf.tagSets - selectFields.add(tsf.selectFields...) - selectTags.add(tsf.selectTags...) - whereFields.add(tsf.whereFields...) - - // If we only have tags in our select clause we just return - if len(selectFields) == 0 && len(selectTags) > 0 { - return fmt.Errorf("statement must have at least one field in select clause") - } + // Validate the fields and tags asked for exist and keep track of which are in the select vs the where + selectFields := mm.SelectFields(m.stmt) + selectTags := mm.SelectTags(m.stmt) + fields := uniqueStrings(m.selectFields, m.whereFields) - // Validate that any GROUP BY is not on a field - if err := m.ValidateGroupBy(lm.selectStmt); err != nil { - return err - } + // If we only have tags in our select clause we just return + if len(selectFields) == 0 && len(selectTags) > 0 { + return fmt.Errorf("statement must have at least one field in select clause") + } - // SLIMIT and SOFFSET the unique series - if lm.selectStmt.SLimit > 0 || lm.selectStmt.SOffset > 0 { - if lm.selectStmt.SOffset > len(tagSets) { - tagSets = nil - } else { - if lm.selectStmt.SOffset+lm.selectStmt.SLimit > len(tagSets) { - lm.selectStmt.SLimit = len(tagSets) - lm.selectStmt.SOffset - } + // Calculate tag sets and apply SLIMIT/SOFFSET. + tagSets, err := mm.DimensionTagSets(m.stmt) + if err != nil { + return err + } + tagSets = m.stmt.LimitTagSets(tagSets) - tagSets = tagSets[lm.selectStmt.SOffset : lm.selectStmt.SOffset+lm.selectStmt.SLimit] - } - } + // Create all cursors for reading the data from this shard. + ascending := m.stmt.TimeAscending() + for _, t := range tagSets { + cursors := []*TagsCursor{} - // Create all cursors for reading the data from this shard. - for _, t := range tagSets { - cursors := []*seriesCursor{} - - for i, key := range t.SeriesKeys { - c := lm.tx.Cursor(key) - if c == nil { - // No data exists for this key. - continue - } - seriesTags := lm.shard.index.TagsForSeries(key) - cm := newSeriesCursor(c, t.Filters[i], seriesTags) - cursors = append(cursors, cm) + for i, key := range t.SeriesKeys { + c := m.tx.Cursor(key, fields, m.shard.FieldCodec(mm.Name), ascending) + if c == nil { + continue } - tsc := newTagSetCursor(m.Name, t.Tags, cursors, lm.shard.FieldCodec(m.Name)) - if lm.rawMode { - tsc.pointHeap = newPointHeap() - //Prime the buffers. - for i := 0; i < len(tsc.cursors); i++ { - k, v := tsc.cursors[i].SeekTo(lm.queryTMin) - if k == -1 { - continue - } - p := &pointHeapItem{ - timestamp: k, - value: v, - cursor: tsc.cursors[i], - } - heap.Push(tsc.pointHeap, p) - } - } - lm.cursors = append(lm.cursors, tsc) + seriesTags := m.shard.index.TagsForSeries(key) + cm := NewTagsCursor(c, t.Filters[i], seriesTags) + cursors = append(cursors, cm) } - sort.Sort(tagSetCursors(lm.cursors)) - } - lm.selectFields = selectFields.list() - lm.selectTags = selectTags.list() - lm.whereFields = whereFields.list() + tsc := NewTagSetCursor(mm.Name, t.Tags, cursors) + tsc.SelectFields = m.selectFields + tsc.SelectWhereFields = fields + if ascending { + tsc.Init(m.qmin) + } else { + tsc.Init(m.qmax) + } - // If the query does not aggregate, then at least 1 SELECT field should be present. - if lm.rawMode && len(lm.selectFields) == 0 { - // None of the SELECT fields exist in this data. Wipe out all tagset cursors. - lm.cursors = nil + m.cursors = append(m.cursors, tsc) } - return nil -} + sort.Sort(TagSetCursors(m.cursors)) -func (lm *SelectMapper) SetRemote(m Mapper) error { - lm.remote = m return nil } -func (lm *SelectMapper) NextChunk() (interface{}, error) { - // If set, use remote mapper. - if lm.remote != nil { - b, err := lm.remote.NextChunk() - if err != nil { - return nil, err - } else if b == nil { - return nil, nil - } - - mo := &MapperOutput{} - if err := json.Unmarshal(b.([]byte), mo); err != nil { - return nil, err - } else if len(mo.Values) == 0 { - // Mapper on other node sent 0 values so it's done. - return nil, nil - } - return mo, nil +// Close closes the mapper. +func (m *RawMapper) Close() { + if m != nil && m.tx != nil { + m.tx.Rollback() } +} - // Remote mapper not set so get values from local shard. - if lm.rawMode { - return lm.nextChunkRaw() - } +// TagSets returns the list of tag sets for which this mapper has data. +func (m *RawMapper) TagSets() []string { return TagSetCursors(m.cursors).Keys() } - return lm.nextChunkAgg() -} +// Fields returns all SELECT fields. +func (m *RawMapper) Fields() []string { return append(m.selectFields, m.selectTags...) } -// nextChunkRaw returns the next chunk of data. Data comes in the same order as the -// tags return by TagSets. A chunk never contains data for more than 1 tagset. +// NextChunk returns the next chunk of data. +// Data is ordered the same as TagSets. Each chunk contains one tag set. // If there is no more data for any tagset, nil will be returned. -func (lm *SelectMapper) nextChunkRaw() (interface{}, error) { +func (m *RawMapper) NextChunk() (interface{}, error) { var output *MapperOutput for { - if lm.currCursorIndex == len(lm.cursors) { - // All tagset cursors processed. NextChunk'ing complete. + // All tagset cursors processed. NextChunk'ing complete. + if m.cursorIndex == len(m.cursors) { return nil, nil } - cursor := lm.cursors[lm.currCursorIndex] - k, v, t := cursor.Next(lm.queryTMin, lm.queryTMax, lm.selectFields, lm.whereFields) + cursor := m.cursors[m.cursorIndex] + + k, v := cursor.Next(m.qmin, m.qmax) if v == nil { // Tagset cursor is empty, move to next one. - lm.currCursorIndex++ + m.cursorIndex++ if output != nil { // There is data, so return it and continue when next called. return output, nil @@ -324,627 +306,359 @@ func (lm *SelectMapper) nextChunkRaw() (interface{}, error) { output = &MapperOutput{ Name: cursor.measurement, Tags: cursor.tags, - Fields: lm.selectFields, + Fields: m.selectFields, cursorKey: cursor.key(), } } - value := &MapperValue{Time: k, Value: v, Tags: t} - output.Values = append(output.Values, value) - if len(output.Values) == lm.chunkSize { + + output.Values = append(output.Values, &MapperValue{ + Time: k, + Value: v, + Tags: cursor.Tags(), + }) + + if len(output.Values) == m.ChunkSize { return output, nil } } } -// nextChunkAgg returns the next chunk of data, which is the next interval of data -// for the current tagset. Tagsets are always processed in the same order as that -// returned by AvailTagsSets(). When there is no more data for any tagset nil -// is returned. -func (lm *SelectMapper) nextChunkAgg() (interface{}, error) { - var output *MapperOutput - for { - if lm.currCursorIndex == len(lm.cursors) { - // All tagset cursors processed. NextChunk'ing complete. - return nil, nil - } - tsc := lm.cursors[lm.currCursorIndex] - tmin, tmax := lm.nextInterval() +// AggregateMapper runs the map phase for aggregate SELECT queries. +type AggregateMapper struct { + shard *Shard + stmt *influxql.SelectStatement + qmin, qmax int64 // query time range - if tmin < 0 { - // All intervals complete for this tagset. Move to the next tagset. - lm.currInterval = 0 - lm.currCursorIndex++ - continue - } + tx Tx + cursors []*TagSetCursor + cursorIndex int - // Prep the return data for this tagset. This will hold data for a single interval - // for a single tagset. - if output == nil { - output = &MapperOutput{ - Name: tsc.measurement, - Tags: tsc.tags, - Fields: lm.selectFields, - Values: make([]*MapperValue, 1), - cursorKey: tsc.key(), - } - // Aggregate values only use the first entry in the Values field. Set the time - // to the start of the interval. - output.Values[0] = &MapperValue{ - Time: tmin, - Value: make([]interface{}, 0)} - } + interval int // Current interval for which data is being fetched. + intervalN int // Maximum number of intervals to return. + intervalSize int64 // Size of each interval. + qminWindow int64 // Minimum time of the query floored to start of interval. - // Always clamp tmin. This can happen as bucket-times are bucketed to the nearest - // interval, and this can be less than the times in the query. - qmin := tmin - if qmin < lm.queryTMin { - qmin = lm.queryTMin - } + mapFuncs []mapFunc // The mapping functions. + fieldNames []string // the field name being read for mapping. - tsc.pointHeap = newPointHeap() - for i := range lm.mapFuncs { - // Prime the tagset cursor for the start of the interval. This is not ideal, as - // it should really calculate the values all in 1 pass, but that would require - // changes to the mapper functions, which can come later. - // Prime the buffers. - for i := 0; i < len(tsc.cursors); i++ { - k, v := tsc.cursors[i].SeekTo(tmin) - if k == -1 || k > tmax { - continue - } - p := &pointHeapItem{ - timestamp: k, - value: v, - cursor: tsc.cursors[i], - } - heap.Push(tsc.pointHeap, p) - } - // Wrap the tagset cursor so it implements the mapping functions interface. - f := func() (time int64, value interface{}) { - k, v, _ := tsc.Next(qmin, tmax, []string{lm.fieldNames[i]}, lm.whereFields) - return k, v - } - - tagSetCursor := &aggTagSetCursor{ - nextFunc: f, - } - - // Execute the map function which walks the entire interval, and aggregates - // the result. - values := output.Values[0].Value.([]interface{}) - output.Values[0].Value = append(values, lm.mapFuncs[i](tagSetCursor)) - } - return output, nil - } + selectFields []string + selectTags []string + whereFields []string } -// nextInterval returns the next interval for which to return data. If start is less than 0 -// there are no more intervals. -func (lm *SelectMapper) nextInterval() (start, end int64) { - t := lm.queryTMinWindow + int64(lm.currInterval+lm.selectStmt.Offset)*lm.intervalSize - - // Onto next interval. - lm.currInterval++ - if t > lm.queryTMax || lm.currInterval > lm.numIntervals { - start, end = -1, 1 - } else { - start, end = t, t+lm.intervalSize +// NewAggregateMapper returns a new instance of AggregateMapper. +func NewAggregateMapper(sh *Shard, stmt *influxql.SelectStatement) *AggregateMapper { + return &AggregateMapper{ + shard: sh, + stmt: stmt, } - return } -// initializeMapFunctions initialize the mapping functions for the mapper. This only applies -// to aggregate queries. -func (lm *SelectMapper) initializeMapFunctions() error { - var err error - // Set up each mapping function for this statement. - aggregates := lm.selectStmt.FunctionCalls() - lm.mapFuncs = make([]influxql.MapFunc, len(aggregates)) - lm.fieldNames = make([]string, len(lm.mapFuncs)) - for i, c := range aggregates { - lm.mapFuncs[i], err = influxql.InitializeMapFunc(c) - if err != nil { - return err - } +// Open opens and initializes the mapper. +func (m *AggregateMapper) Open() error { + // Ignore if node has the shard but hasn't written to it yet. + if m.shard == nil { + return nil + } - // Check for calls like `derivative(lmean(value), 1d)` - var nested *influxql.Call = c - if fn, ok := c.Args[0].(*influxql.Call); ok { - nested = fn - } - switch lit := nested.Args[0].(type) { - case *influxql.VarRef: - lm.fieldNames[i] = lit.Val - case *influxql.Distinct: - if c.Name != "count" { - return fmt.Errorf("aggregate call didn't contain a field %s", c.String()) - } - lm.fieldNames[i] = lit.Val - default: - return fmt.Errorf("aggregate call didn't contain a field %s", c.String()) - } + // Rewrite statement. + stmt, err := m.shard.index.RewriteSelectStatement(m.stmt) + if err != nil { + return err } + m.stmt = stmt - return nil -} + // Set all time-related parameters on the mapper. + m.qmin, m.qmax = influxql.TimeRangeAsEpochNano(m.stmt.Condition) -// rewriteSelectStatement performs any necessary query re-writing. -func (lm *SelectMapper) rewriteSelectStatement(stmt *influxql.SelectStatement) (*influxql.SelectStatement, error) { - var err error - // Expand regex expressions in the FROM clause. - sources, err := expandSources(stmt.Sources, lm.shard.index) - if err != nil { - return nil, err + if err := m.initializeMapFunctions(); err != nil { + return err } - stmt.Sources = sources - // Expand wildcards in the fields or GROUP BY. - stmt, err = lm.expandWildcards(stmt) + + // For GROUP BY time queries, limit the number of data points returned by the limit and offset + d, err := m.stmt.GroupByInterval() if err != nil { - return nil, err + return err } - stmt.RewriteDistinct() - return stmt, nil -} -// expandWildcards returns a new SelectStatement with wildcards expanded -// If only a `SELECT *` is present, without a `GROUP BY *`, both tags and fields expand in the SELECT -// If a `SELECT *` and a `GROUP BY *` are both present, then only fiels are expanded in the `SELECT` and only -// tags are expanded in the `GROUP BY` -func (lm *SelectMapper) expandWildcards(stmt *influxql.SelectStatement) (*influxql.SelectStatement, error) { - // If there are no wildcards in the statement, return it as-is. - if !stmt.HasWildcard() { - return stmt, nil - } - // Use sets to avoid duplicate field names. - fieldSet := map[string]struct{}{} - dimensionSet := map[string]struct{}{} - var fields influxql.Fields - var dimensions influxql.Dimensions - - // keep track of where the wildcards are in the select statement - hasFieldWildcard := stmt.HasFieldWildcard() - hasDimensionWildcard := stmt.HasDimensionWildcard() - - // Iterate measurements in the FROM clause getting the fields & dimensions for each. - for _, src := range stmt.Sources { - if m, ok := src.(*influxql.Measurement); ok { - // Lookup the measurement in the database. - mm := lm.shard.index.Measurement(m.Name) - if mm == nil { - // This shard have never received data for the measurement. No Mapper - // required. - return stmt, nil - } - // Get the fields for this measurement. - for _, name := range mm.FieldNames() { - if _, ok := fieldSet[name]; ok { - continue - } - fieldSet[name] = struct{}{} - fields = append(fields, &influxql.Field{Expr: &influxql.VarRef{Val: name}}) - } + m.intervalSize = d.Nanoseconds() + if m.qmin == 0 || m.intervalSize == 0 { + m.intervalN = 1 + m.intervalSize = m.qmax - m.qmin + } else { + intervalTop := m.qmax/m.intervalSize*m.intervalSize + m.intervalSize + intervalBottom := m.qmin / m.intervalSize * m.intervalSize + m.intervalN = int((intervalTop - intervalBottom) / m.intervalSize) + } - // Add tags to fields if a field wildcard was provided and a dimension wildcard was not. - if hasFieldWildcard && !hasDimensionWildcard { - for _, t := range mm.TagKeys() { - if _, ok := fieldSet[t]; ok { - continue - } - fieldSet[t] = struct{}{} - fields = append(fields, &influxql.Field{Expr: &influxql.VarRef{Val: t}}) - } - } + if m.stmt.Limit > 0 || m.stmt.Offset > 0 { + // ensure that the offset isn't higher than the number of points we'd get + if m.stmt.Offset > m.intervalN { + return nil + } - // Get the dimensions for this measurement. - if hasDimensionWildcard { - for _, t := range mm.TagKeys() { - if _, ok := dimensionSet[t]; ok { - continue - } - dimensionSet[t] = struct{}{} - dimensions = append(dimensions, &influxql.Dimension{Expr: &influxql.VarRef{Val: t}}) - } - } + // Take the lesser of either the pre computed number of GROUP BY buckets that + // will be in the result or the limit passed in by the user + if m.stmt.Limit < m.intervalN { + m.intervalN = m.stmt.Limit } } - // Return a new SelectStatement with the wild cards rewritten. - return stmt.RewriteWildcards(fields, dimensions), nil -} - -// TagSets returns the list of TagSets for which this mapper has data. -func (lm *SelectMapper) TagSets() []string { - if lm.remote != nil { - return lm.remote.TagSets() + // If we are exceeding our MaxGroupByPoints error out + if m.intervalN > MaxGroupByPoints { + return errors.New("too many points in the group by interval. maybe you forgot to specify a where time clause?") } - return tagSetCursors(lm.cursors).Keys() -} -// Fields returns any SELECT fields. If this Mapper is not processing a SELECT query -// then an empty slice is returned. -func (lm *SelectMapper) Fields() []string { - if lm.remote != nil { - return lm.remote.Fields() + // Ensure that the start time for the results is on the start of the window. + m.qminWindow = m.qmin + if m.intervalSize > 0 && m.intervalN > 1 { + m.qminWindow = m.qminWindow / m.intervalSize * m.intervalSize } - return append(lm.selectFields, lm.selectTags...) -} -// Close closes the mapper. -func (lm *SelectMapper) Close() { - if lm.remote != nil { - lm.remote.Close() - return - } - if lm != nil && lm.tx != nil { - _ = lm.tx.Rollback() + // Get a read-only transaction. + tx, err := m.shard.engine.Begin(false) + if err != nil { + return err } -} + m.tx = tx -// aggTagSetCursor wraps a standard tagSetCursor, such that the values it emits are aggregated -// by intervals. -type aggTagSetCursor struct { - nextFunc func() (time int64, value interface{}) -} + // Collect measurements. + mms := Measurements(m.shard.index.MeasurementsByName(m.stmt.SourceNames())) + m.selectFields = mms.SelectFields(m.stmt) + m.selectTags = mms.SelectTags(m.stmt) + m.whereFields = mms.WhereFields(m.stmt) -// Next returns the next value for the aggTagSetCursor. It implements the interface expected -// by the mapping functions. -func (a *aggTagSetCursor) Next() (time int64, value interface{}) { - return a.nextFunc() -} - -type pointHeapItem struct { - timestamp int64 - value []byte - cursor *seriesCursor // cursor whence pointHeapItem came -} - -type pointHeap []*pointHeapItem + // Open cursors for each measurement. + for _, mm := range mms { + if err := m.openMeasurement(mm); err != nil { + return err + } + } -func newPointHeap() *pointHeap { - q := make(pointHeap, 0) - heap.Init(&q) - return &q + return nil } -func (pq pointHeap) Len() int { return len(pq) } +func (m *AggregateMapper) openMeasurement(mm *Measurement) error { + // Validate that ANY GROUP BY is not a field for the measurement. + if err := mm.ValidateGroupBy(m.stmt); err != nil { + return err + } -func (pq pointHeap) Less(i, j int) bool { - // We want a min-heap (points in chronological order), so use less than. - return pq[i].timestamp < pq[j].timestamp -} + // Validate the fields and tags asked for exist and keep track of which are in the select vs the where + selectFields := mm.SelectFields(m.stmt) + selectTags := mm.SelectTags(m.stmt) -func (pq pointHeap) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] } + // If we only have tags in our select clause we just return + if len(selectFields) == 0 && len(selectTags) > 0 { + return fmt.Errorf("statement must have at least one field in select clause") + } -func (pq *pointHeap) Push(x interface{}) { - item := x.(*pointHeapItem) - *pq = append(*pq, item) -} + // Calculate tag sets and apply SLIMIT/SOFFSET. + tagSets, err := mm.DimensionTagSets(m.stmt) + if err != nil { + return err + } + tagSets = m.stmt.LimitTagSets(tagSets) -func (pq *pointHeap) Pop() interface{} { - old := *pq - n := len(old) - item := old[n-1] - *pq = old[0 : n-1] - return item -} + // Create all cursors for reading the data from this shard. + for _, t := range tagSets { + cursors := []*TagsCursor{} -// tagSetCursor is virtual cursor that iterates over mutiple series cursors, as though it were -// a single series. -type tagSetCursor struct { - measurement string // Measurement name - tags map[string]string // Tag key-value pairs - cursors []*seriesCursor // Underlying series cursors. - decoder *FieldCodec // decoder for the raw data bytes - - // pointHeap is a min-heap, ordered by timestamp, that contains the next - // point from each seriesCursor. Queries sometimes pull points from - // thousands of series. This makes it reasonably efficient to find the - // point with the next lowest timestamp among the thousands of series that - // the query is pulling points from. - // Performance profiling shows that this lookahead needs to be part - // of the tagSetCursor type and not part of the the cursors type. - pointHeap *pointHeap - - // Memomize the cursor's tagset-based key. Profiling shows that calculating this - // is significant CPU cost, and it only needs to be done once. - memokey string -} - -// tagSetCursors represents a sortable slice of tagSetCursors. -type tagSetCursors []*tagSetCursor + for i, key := range t.SeriesKeys { + fields := slices.Union(selectFields, m.fieldNames, false) + c := m.tx.Cursor(key, fields, m.shard.FieldCodec(mm.Name), true) + if c == nil { + continue + } -func (a tagSetCursors) Len() int { return len(a) } -func (a tagSetCursors) Less(i, j int) bool { return a[i].key() < a[j].key() } -func (a tagSetCursors) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + seriesTags := m.shard.index.TagsForSeries(key) + cursors = append(cursors, NewTagsCursor(c, t.Filters[i], seriesTags)) + } -func (a tagSetCursors) Keys() []string { - keys := []string{} - for i := range a { - keys = append(keys, a[i].key()) + tsc := NewTagSetCursor(mm.Name, t.Tags, cursors) + tsc.Init(m.qmin) + m.cursors = append(m.cursors, tsc) } - sort.Strings(keys) - return keys -} -// newTagSetCursor returns a tagSetCursor -func newTagSetCursor(m string, t map[string]string, c []*seriesCursor, d *FieldCodec) *tagSetCursor { - tsc := &tagSetCursor{ - measurement: m, - tags: t, - cursors: c, - decoder: d, - pointHeap: newPointHeap(), - } + sort.Sort(TagSetCursors(m.cursors)) - return tsc + return nil } -func (tsc *tagSetCursor) key() string { - if tsc.memokey == "" { - tsc.memokey = formMeasurementTagSetKey(tsc.measurement, tsc.tags) - } - return tsc.memokey -} +// initializeMapFunctions initialize the mapping functions for the mapper. +func (m *AggregateMapper) initializeMapFunctions() error { + // Set up each mapping function for this statement. + aggregates := m.stmt.FunctionCalls() + m.mapFuncs = make([]mapFunc, len(aggregates)) + m.fieldNames = make([]string, len(m.mapFuncs)) -// Next returns the next matching series-key, timestamp byte slice and meta tags for the tagset. Filtering -// is enforced on the values. If there is no matching value, then a nil result is returned. -func (tsc *tagSetCursor) Next(tmin, tmax int64, selectFields, whereFields []string) (int64, interface{}, map[string]string) { - for { - // If we're out of points, we're done. - if tsc.pointHeap.Len() == 0 { - return -1, nil, nil + for i, c := range aggregates { + mfn, err := initializeMapFunc(c) + if err != nil { + return err } + m.mapFuncs[i] = mfn - // Grab the next point with the lowest timestamp. - p := heap.Pop(tsc.pointHeap).(*pointHeapItem) - - // We're done if the point is outside the query's time range [tmin:tmax). - if p.timestamp != tmin && (tmin > p.timestamp || p.timestamp >= tmax) { - return -1, nil, nil + // Check for calls like `derivative(lmean(value), 1d)` + var nested *influxql.Call = c + if fn, ok := c.Args[0].(*influxql.Call); ok { + nested = fn } - - // Decode the raw point. - value := tsc.decodeRawPoint(p, selectFields, whereFields) - timestamp := p.timestamp - tags := p.cursor.tags - - // Advance the cursor - nextKey, nextVal := p.cursor.Next() - if nextKey != -1 { - *p = pointHeapItem{ - timestamp: nextKey, - value: nextVal, - cursor: p.cursor, + switch lit := nested.Args[0].(type) { + case *influxql.VarRef: + m.fieldNames[i] = lit.Val + case *influxql.Distinct: + if c.Name != "count" { + return fmt.Errorf("aggregate call didn't contain a field %s", c.String()) } - heap.Push(tsc.pointHeap, p) + m.fieldNames[i] = lit.Val + default: + return fmt.Errorf("aggregate call didn't contain a field %s", c.String()) } + } - // Value didn't match, look for the next one. - if value == nil { - continue - } + return nil +} - return timestamp, value, tags +// Close closes the mapper. +func (m *AggregateMapper) Close() { + if m != nil && m.tx != nil { + m.tx.Rollback() } + return } -// decodeRawPoint decodes raw point data into field names & values and does WHERE filtering. -func (tsc *tagSetCursor) decodeRawPoint(p *pointHeapItem, selectFields, whereFields []string) interface{} { - if len(selectFields) > 1 { - if fieldsWithNames, err := tsc.decoder.DecodeFieldsWithNames(p.value); err == nil { - // if there's a where clause, make sure we don't need to filter this value - if p.cursor.filter != nil && !matchesWhere(p.cursor.filter, fieldsWithNames) { - return nil - } +// TagSets returns the list of tag sets for which this mapper has data. +func (m *AggregateMapper) TagSets() []string { return TagSetCursors(m.cursors).Keys() } - return fieldsWithNames - } - } +// Fields returns all SELECT fields. +func (m *AggregateMapper) Fields() []string { return append(m.selectFields, m.selectTags...) } - // With only 1 field SELECTed, decoding all fields may be avoidable, which is faster. - value, err := tsc.decoder.DecodeByName(selectFields[0], p.value) - if err != nil { - return nil - } +// NextChunk returns the next interval of data. +// Tagsets are always processed in the same order as AvailTagsSets(). +// When there is no more data for any tagset nil is returned. +func (m *AggregateMapper) NextChunk() (interface{}, error) { + var tmin, tmax int64 + for { + // All tagset cursors processed. NextChunk'ing complete. + if m.cursorIndex == len(m.cursors) { + return nil, nil + } - // If there's a WHERE clase, see if we need to filter - if p.cursor.filter != nil { - // See if the WHERE is only on this field or on one or more other fields. - // If the latter, we'll have to decode everything - if len(whereFields) == 1 && whereFields[0] == selectFields[0] { - if !matchesWhere(p.cursor.filter, map[string]interface{}{selectFields[0]: value}) { - value = nil - } - } else { // Decode everything - fieldsWithNames, err := tsc.decoder.DecodeFieldsWithNames(p.value) - if err != nil || !matchesWhere(p.cursor.filter, fieldsWithNames) { - value = nil - } + // All intervals complete for this tagset. Move to the next tagset. + tmin, tmax = m.nextInterval() + if tmin < 0 { + m.interval = 0 + m.cursorIndex++ + continue } + break } - return value -} - -// seriesCursor is a cursor that walks a single series. It provides lookahead functionality. -type seriesCursor struct { - cursor Cursor // BoltDB cursor for a series - filter influxql.Expr - tags map[string]string - seekto int64 - seekResult struct { - k int64 - v []byte + // Prep the return data for this tagset. + // This will hold data for a single interval for a single tagset. + tsc := m.cursors[m.cursorIndex] + output := &MapperOutput{ + Name: tsc.measurement, + Tags: tsc.tags, + Fields: m.selectFields, + Values: make([]*MapperValue, 1), + cursorKey: tsc.key(), } -} -// newSeriesCursor returns a new instance of a series cursor. -func newSeriesCursor(cur Cursor, filter influxql.Expr, tags map[string]string) *seriesCursor { - return &seriesCursor{ - cursor: cur, - filter: filter, - tags: tags, - seekto: -1, + // Aggregate values only use the first entry in the Values field. + // Set the time to the start of the interval. + output.Values[0] = &MapperValue{ + Time: tmin, + Value: make([]interface{}, 0), } -} -// Seek positions returning the timestamp and value at that key. -func (sc *seriesCursor) SeekTo(key int64) (timestamp int64, value []byte) { - if sc.seekto != -1 && sc.seekto < key && (sc.seekResult.k == -1 || sc.seekResult.k >= key) { - // we've seeked on this cursor. This seek is after that previous cached seek - // and the result it gave was after the key for this seek. - // - // In this case, any seek would just return what we got before, so there's - // no point in reseeking. - return sc.seekResult.k, sc.seekResult.v - } - k, v := sc.cursor.Seek(u64tob(uint64(key))) - if k == nil { - timestamp = -1 - } else { - timestamp, value = int64(btou64(k)), v + // Always clamp tmin and tmax. This can happen as bucket-times are bucketed to the nearest + // interval. This is necessary to grab the "partial" buckets at the beginning and end of the time range + qmin, qmax := tmin, tmax + if qmin < m.qmin { + qmin = m.qmin } - sc.seekto = key - sc.seekResult.k = timestamp - sc.seekResult.v = v - return -} - -// Next returns the next timestamp and value from the cursor. -func (sc *seriesCursor) Next() (key int64, value []byte) { - // calling next on this cursor means that we need to invalidate the seek - sc.seekto = -1 - sc.seekResult.k = 0 - sc.seekResult.v = nil - k, v := sc.cursor.Next() - if k == nil { - key = -1 - } else { - key, value = int64(btou64(k)), v + if qmax > m.qmax { + qmax = m.qmax + 1 } - return -} - -type tagSetsAndFields struct { - tagSets []*influxql.TagSet - selectFields []string - selectTags []string - whereFields []string -} -// expandSources expands regex sources and removes duplicates. -// NOTE: sources must be normalized (db and rp set) before calling this function. -func expandSources(sources influxql.Sources, di *DatabaseIndex) (influxql.Sources, error) { - // Use a map as a set to prevent duplicates. Two regexes might produce - // duplicates when expanded. - set := map[string]influxql.Source{} - names := []string{} - // Iterate all sources, expanding regexes when they're found. - for _, source := range sources { - switch src := source.(type) { - case *influxql.Measurement: - if src.Regex == nil { - name := src.String() - set[name] = src - names = append(names, name) + tsc.heap = newPointHeap() + for i := range m.mapFuncs { + // Prime the tagset cursor for the start of the interval. This is not ideal, as + // it should really calculate the values all in 1 pass, but that would require + // changes to the mapper functions, which can come later. + // Prime the buffers. + for i := 0; i < len(tsc.cursors); i++ { + k, v := tsc.cursors[i].SeekTo(qmin) + if k == -1 || k > tmax { continue } - // Get measurements from the database that match the regex. - measurements := di.measurementsByRegex(src.Regex.Val) - // Add those measurements to the set. - for _, m := range measurements { - m2 := &influxql.Measurement{ - Database: src.Database, - RetentionPolicy: src.RetentionPolicy, - Name: m.Name, - } - name := m2.String() - if _, ok := set[name]; !ok { - set[name] = m2 - names = append(names, name) - } - } - default: - return nil, fmt.Errorf("expandSources: unsuported source type: %T", source) - } - } - // Sort the list of source names. - sort.Strings(names) - // Convert set to a list of Sources. - expanded := make(influxql.Sources, 0, len(set)) - for _, name := range names { - expanded = append(expanded, set[name]) - } - return expanded, nil -} -// createTagSetsAndFields returns the tagsets and various fields given a measurement and -// SELECT statement. -func createTagSetsAndFields(m *Measurement, stmt *influxql.SelectStatement) (*tagSetsAndFields, error) { - _, tagKeys, err := stmt.Dimensions.Normalize() - if err != nil { - return nil, err - } + heap.Push(tsc.heap, &pointHeapItem{ + timestamp: k, + value: v, + cursor: tsc.cursors[i], + }) + } - sfs := newStringSet() - sts := newStringSet() - wfs := newStringSet() + tsc.SelectFields = []string{m.fieldNames[i]} + tsc.SelectWhereFields = uniqueStrings([]string{m.fieldNames[i]}, m.whereFields) - // Validate the fields and tags asked for exist and keep track of which are in the select vs the where - for _, n := range stmt.NamesInSelect() { - if m.HasField(n) { - sfs.add(n) - continue + // Build a map input from the cursor. + input := &MapInput{ + TMin: -1, } - if m.HasTagKey(n) { - sts.add(n) + if len(m.stmt.Dimensions) > 0 && !m.stmt.HasTimeFieldSpecified() { + input.TMin = tmin } - } - for _, n := range stmt.NamesInDimension() { - if m.HasTagKey(n) { - tagKeys = append(tagKeys, n) + for k, v := tsc.Next(qmin, qmax); k != -1; k, v = tsc.Next(qmin, qmax) { + input.Items = append(input.Items, MapItem{ + Timestamp: k, + Value: v, + Fields: tsc.Fields(), + Tags: tsc.Tags(), + }) } - } - for _, n := range stmt.NamesInWhere() { - if n == "time" { - continue - } - if m.HasField(n) { - wfs.add(n) - continue - } - } - - // Get the sorted unique tag sets for this statement. - tagSets, err := m.TagSets(stmt, tagKeys) - if err != nil { - return nil, err + // Execute the map function which walks the entire interval, and aggregates the result. + mapValue := m.mapFuncs[i](input) + output.Values[0].Value = append(output.Values[0].Value.([]interface{}), mapValue) } - return &tagSetsAndFields{ - tagSets: tagSets, - selectFields: sfs.list(), - selectTags: sts.list(), - whereFields: wfs.list(), - }, nil + return output, nil } -// matchesFilter returns true if the value matches the where clause -func matchesWhere(f influxql.Expr, fields map[string]interface{}) bool { - if ok, _ := influxql.Eval(f, fields).(bool); !ok { - return false +// nextInterval returns the next interval for which to return data. +// If start is less than 0 there are no more intervals. +func (m *AggregateMapper) nextInterval() (start, end int64) { + t := m.qminWindow + int64(m.interval+m.stmt.Offset)*m.intervalSize + + // On to next interval. + m.interval++ + if t > m.qmax || m.interval > m.intervalN { + start, end = -1, 1 + } else { + start, end = t, t+m.intervalSize } - return true + return } -func formMeasurementTagSetKey(name string, tags map[string]string) string { - if len(tags) == 0 { - return name +// uniqueStrings returns a slice of unique strings from all lists in a. +func uniqueStrings(a ...[]string) []string { + // Calculate unique set of strings. + m := make(map[string]struct{}) + for _, strs := range a { + for _, str := range strs { + m[str] = struct{}{} + } } - return strings.Join([]string{name, string(MarshalTags(tags))}, "|") -} -// btou64 converts an 8-byte slice into an uint64. -func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) } + // Convert back to slice. + result := make([]string, 0, len(m)) + for k := range m { + result = append(result, k) + } + return result +} diff --git a/_third_party/github.com/influxdb/influxdb/tsdb/mapper_test.go b/_third_party/github.com/influxdb/influxdb/tsdb/mapper_test.go index 4e96608550..81d50b24fc 100644 --- a/_third_party/github.com/influxdb/influxdb/tsdb/mapper_test.go +++ b/_third_party/github.com/influxdb/influxdb/tsdb/mapper_test.go @@ -13,6 +13,7 @@ import ( "time" "bosun.org/_third_party/github.com/influxdb/influxdb/influxql" + "bosun.org/_third_party/github.com/influxdb/influxdb/models" "bosun.org/_third_party/github.com/influxdb/influxdb/tsdb" ) @@ -22,20 +23,20 @@ func TestShardMapper_RawMapperTagSetsFields(t *testing.T) { shard := mustCreateShard(tmpDir) pt1time := time.Unix(1, 0).UTC() - pt1 := tsdb.NewPoint( + pt1 := models.NewPoint( "cpu", map[string]string{"host": "serverA", "region": "us-east"}, map[string]interface{}{"idle": 60}, pt1time, ) pt2time := time.Unix(2, 0).UTC() - pt2 := tsdb.NewPoint( + pt2 := models.NewPoint( "cpu", map[string]string{"host": "serverB", "region": "us-east"}, map[string]interface{}{"load": 60}, pt2time, ) - err := shard.WritePoints([]tsdb.Point{pt1, pt2}) + err := shard.WritePoints([]models.Point{pt1, pt2}) if err != nil { t.Fatalf(err.Error()) } @@ -112,20 +113,20 @@ func TestShardMapper_WriteAndSingleMapperRawQuerySingleValue(t *testing.T) { shard := mustCreateShard(tmpDir) pt1time := time.Unix(1, 0).UTC() - pt1 := tsdb.NewPoint( + pt1 := models.NewPoint( "cpu", map[string]string{"host": "serverA", "region": "us-east"}, map[string]interface{}{"load": 42}, pt1time, ) pt2time := time.Unix(2, 0).UTC() - pt2 := tsdb.NewPoint( + pt2 := models.NewPoint( "cpu", map[string]string{"host": "serverB", "region": "us-east"}, map[string]interface{}{"load": 60}, pt2time, ) - err := shard.WritePoints([]tsdb.Point{pt1, pt2}) + err := shard.WritePoints([]models.Point{pt1, pt2}) if err != nil { t.Fatalf(err.Error()) } @@ -219,20 +220,20 @@ func TestShardMapper_WriteAndSingleMapperRawQueryMultiValue(t *testing.T) { shard := mustCreateShard(tmpDir) pt1time := time.Unix(1, 0).UTC() - pt1 := tsdb.NewPoint( + pt1 := models.NewPoint( "cpu", map[string]string{"host": "serverA", "region": "us-east"}, map[string]interface{}{"foo": 42, "bar": 43}, pt1time, ) pt2time := time.Unix(2, 0).UTC() - pt2 := tsdb.NewPoint( + pt2 := models.NewPoint( "cpu", map[string]string{"host": "serverB", "region": "us-east"}, map[string]interface{}{"foo": 60, "bar": 61}, pt2time, ) - err := shard.WritePoints([]tsdb.Point{pt1, pt2}) + err := shard.WritePoints([]models.Point{pt1, pt2}) if err != nil { t.Fatalf(err.Error()) } @@ -272,20 +273,20 @@ func TestShardMapper_WriteAndSingleMapperRawQueryMultiSource(t *testing.T) { shard := mustCreateShard(tmpDir) pt1time := time.Unix(1, 0).UTC() - pt1 := tsdb.NewPoint( + pt1 := models.NewPoint( "cpu0", map[string]string{"host": "serverA", "region": "us-east"}, map[string]interface{}{"foo": 42}, pt1time, ) pt2time := time.Unix(2, 0).UTC() - pt2 := tsdb.NewPoint( + pt2 := models.NewPoint( "cpu1", map[string]string{"host": "serverB", "region": "us-east"}, map[string]interface{}{"bar": 60}, pt2time, ) - err := shard.WritePoints([]tsdb.Point{pt1, pt2}) + err := shard.WritePoints([]models.Point{pt1, pt2}) if err != nil { t.Fatalf(err.Error()) } @@ -337,20 +338,20 @@ func TestShardMapper_WriteAndSingleMapperAggregateQuery(t *testing.T) { shard := mustCreateShard(tmpDir) pt1time := time.Unix(10, 0).UTC() - pt1 := tsdb.NewPoint( + pt1 := models.NewPoint( "cpu", map[string]string{"host": "serverA", "region": "us-east"}, map[string]interface{}{"value": 1}, pt1time, ) pt2time := time.Unix(20, 0).UTC() - pt2 := tsdb.NewPoint( + pt2 := models.NewPoint( "cpu", map[string]string{"host": "serverB", "region": "us-east"}, map[string]interface{}{"value": 60}, pt2time, ) - err := shard.WritePoints([]tsdb.Point{pt1, pt2}) + err := shard.WritePoints([]models.Point{pt1, pt2}) if err != nil { t.Fatalf(err.Error()) } @@ -415,7 +416,7 @@ func TestShardMapper_WriteAndSingleMapperAggregateQuery(t *testing.T) { for _, tt := range tests { stmt := mustParseSelectStatement(tt.stmt) - mapper := openSelectMapperOrFail(t, shard, stmt) + mapper := openAggregateMapperOrFail(t, shard, stmt) for i := range tt.expected { got := aggIntervalAsJson(t, mapper) @@ -433,20 +434,20 @@ func TestShardMapper_SelectMapperTagSetsFields(t *testing.T) { shard := mustCreateShard(tmpDir) pt1time := time.Unix(1, 0).UTC() - pt1 := tsdb.NewPoint( + pt1 := models.NewPoint( "cpu", map[string]string{"host": "serverA", "region": "us-east"}, map[string]interface{}{"value": 42}, pt1time, ) pt2time := time.Unix(2, 0).UTC() - pt2 := tsdb.NewPoint( + pt2 := models.NewPoint( "cpu", map[string]string{"host": "serverB", "region": "us-east"}, map[string]interface{}{"value": 60}, pt2time, ) - err := shard.WritePoints([]tsdb.Point{pt1, pt2}) + err := shard.WritePoints([]models.Point{pt1, pt2}) if err != nil { t.Fatalf(err.Error()) } @@ -490,7 +491,7 @@ func TestShardMapper_SelectMapperTagSetsFields(t *testing.T) { for _, tt := range tests { stmt := mustParseSelectStatement(tt.stmt) - mapper := openSelectMapperOrFail(t, shard, stmt) + mapper := openAggregateMapperOrFail(t, shard, stmt) fields := mapper.Fields() if !reflect.DeepEqual(fields, tt.expectedFields) { @@ -536,12 +537,12 @@ func mustParseStatement(s string) influxql.Statement { } func openRawMapperOrFail(t *testing.T, shard *tsdb.Shard, stmt *influxql.SelectStatement, chunkSize int) tsdb.Mapper { - mapper := tsdb.NewSelectMapper(shard, stmt, chunkSize) - - if err := mapper.Open(); err != nil { + m := tsdb.NewRawMapper(shard, stmt) + m.ChunkSize = chunkSize + if err := m.Open(); err != nil { t.Fatalf("failed to open raw mapper: %s", err.Error()) } - return mapper + return m } func nextRawChunkAsJson(t *testing.T, mapper tsdb.Mapper) string { @@ -549,30 +550,67 @@ func nextRawChunkAsJson(t *testing.T, mapper tsdb.Mapper) string { if err != nil { t.Fatalf("failed to get next chunk from mapper: %s", err.Error()) } - b, err := json.Marshal(r) - if err != nil { - t.Fatalf("failed to marshal chunk as JSON: %s", err.Error()) - } - return string(b) + return mustMarshalMapperOutput(r) } -func openSelectMapperOrFail(t *testing.T, shard *tsdb.Shard, stmt *influxql.SelectStatement) *tsdb.SelectMapper { - mapper := tsdb.NewSelectMapper(shard, stmt, 0) - - if err := mapper.Open(); err != nil { +func openAggregateMapperOrFail(t *testing.T, shard *tsdb.Shard, stmt *influxql.SelectStatement) *tsdb.AggregateMapper { + m := tsdb.NewAggregateMapper(shard, stmt) + if err := m.Open(); err != nil { t.Fatalf("failed to open aggregate mapper: %s", err.Error()) } - return mapper + return m } -func aggIntervalAsJson(t *testing.T, mapper *tsdb.SelectMapper) string { +func aggIntervalAsJson(t *testing.T, mapper *tsdb.AggregateMapper) string { r, err := mapper.NextChunk() if err != nil { - t.Fatalf("failed to get chunk from aggregate mapper: %s", err.Error()) + t.Fatalf("failed to get next chunk from aggregate mapper: %s", err.Error()) + } + return mustMarshalMapperOutput(r) +} + +// mustMarshalMapperOutput manually converts a mapper output to JSON, to avoid the +// built-in encoding. +func mustMarshalMapperOutput(r interface{}) string { + if r == nil { + b, err := json.Marshal(nil) + if err != nil { + panic("failed to marshal nil chunk as JSON") + } + return string(b) } - b, err := json.Marshal(r) + mo := r.(*tsdb.MapperOutput) + + type v struct { + Time int64 `json:"time,omitempty"` + Value interface{} `json:"value,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + } + + values := make([]*v, len(mo.Values)) + for i, value := range mo.Values { + values[i] = &v{ + Time: value.Time, + Value: value.Value, + Tags: value.Tags, + } + } + + var o struct { + Name string `json:"name,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Fields []string `json:"fields,omitempty"` + Values []*v `json:"values,omitempty"` + } + + o.Name = mo.Name + o.Tags = mo.Tags + o.Fields = mo.Fields + o.Values = values + + b, err := json.Marshal(o) if err != nil { - t.Fatalf("failed to marshal chunk as JSON: %s", err.Error()) + panic("failed to marshal MapperOutput") } return string(b) } diff --git a/_third_party/github.com/influxdb/influxdb/tsdb/meta.go b/_third_party/github.com/influxdb/influxdb/tsdb/meta.go index 6bbe3a6932..edb0c741c2 100644 --- a/_third_party/github.com/influxdb/influxdb/tsdb/meta.go +++ b/_third_party/github.com/influxdb/influxdb/tsdb/meta.go @@ -9,6 +9,7 @@ import ( "time" "bosun.org/_third_party/github.com/influxdb/influxdb/influxql" + "bosun.org/_third_party/github.com/influxdb/influxdb/pkg/escape" "bosun.org/_third_party/github.com/influxdb/influxdb/tsdb/internal" "bosun.org/_third_party/github.com/gogo/protobuf/proto" @@ -58,6 +59,20 @@ func (d *DatabaseIndex) Measurement(name string) *Measurement { return d.measurements[name] } +// MeasurementsByName returns a list of measurements. +func (d *DatabaseIndex) MeasurementsByName(names []string) []*Measurement { + d.mu.RLock() + defer d.mu.RUnlock() + + a := make([]*Measurement, 0, len(names)) + for _, name := range names { + if m := d.measurements[name]; m != nil { + a = append(a, m) + } + } + return a +} + // MeasurementSeriesCounts returns the number of measurements and series currently indexed by the database. // Useful for reporting and monitoring. func (d *DatabaseIndex) MeasurementSeriesCounts() (nMeasurements int, nSeries int) { @@ -92,7 +107,7 @@ func (s *DatabaseIndex) CreateSeriesIndexIfNotExists(measurementName string, ser // CreateMeasurementIndexIfNotExists creates or retrieves an in memory index object for the measurement func (s *DatabaseIndex) CreateMeasurementIndexIfNotExists(name string) *Measurement { - name = unescapeString(name) + name = escape.UnescapeString(name) m := s.measurements[name] if m == nil { m = NewMeasurement(name, s) @@ -169,7 +184,7 @@ func (db *DatabaseIndex) measurementsByExpr(expr influxql.Expr) (Measurements, e return nil, fmt.Errorf("%#v", expr) } -// measurementsByTagFilters returns the measurements matching the filters on tag values. +// measurementsByTagFilters returns the sorted measurements matching the filters on tag values. func (db *DatabaseIndex) measurementsByTagFilters(filters []*TagFilter) Measurements { // If no filters, then return all measurements. if len(filters) == 0 { @@ -227,6 +242,7 @@ func (db *DatabaseIndex) measurementsByTagFilters(filters []*TagFilter) Measurem } } + sort.Sort(measurements) return measurements } @@ -280,6 +296,142 @@ func (db *DatabaseIndex) DropSeries(keys []string) { } } +// RewriteSelectStatement performs any necessary query re-writing. +func (db *DatabaseIndex) RewriteSelectStatement(stmt *influxql.SelectStatement) (*influxql.SelectStatement, error) { + // Expand regex expressions in the FROM clause. + sources, err := db.ExpandSources(stmt.Sources) + if err != nil { + return nil, err + } + stmt.Sources = sources + + // Expand wildcards in the fields or GROUP BY. + stmt, err = db.ExpandWildcards(stmt) + if err != nil { + return nil, err + } + + stmt.RewriteDistinct() + + return stmt, nil +} + +// expandWildcards returns a new SelectStatement with wildcards expanded +// If only a `SELECT *` is present, without a `GROUP BY *`, both tags and fields expand in the SELECT +// If a `SELECT *` and a `GROUP BY *` are both present, then only fiels are expanded in the `SELECT` and only +// tags are expanded in the `GROUP BY` +func (db *DatabaseIndex) ExpandWildcards(stmt *influxql.SelectStatement) (*influxql.SelectStatement, error) { + // If there are no wildcards in the statement, return it as-is. + if !stmt.HasWildcard() { + return stmt, nil + } + // Use sets to avoid duplicate field names. + fieldSet := map[string]struct{}{} + dimensionSet := map[string]struct{}{} + + // keep track of where the wildcards are in the select statement + hasFieldWildcard := stmt.HasFieldWildcard() + hasDimensionWildcard := stmt.HasDimensionWildcard() + + // Iterate measurements in the FROM clause getting the fields & dimensions for each. + var fields influxql.Fields + var dimensions influxql.Dimensions + for _, src := range stmt.Sources { + if m, ok := src.(*influxql.Measurement); ok { + // Lookup the measurement in the database. + mm := db.Measurement(m.Name) + if mm == nil { + // This shard have never received data for the measurement. No Mapper + // required. + return stmt, nil + } + + // Get the fields for this measurement. + for _, name := range mm.FieldNames() { + if _, ok := fieldSet[name]; ok { + continue + } + fieldSet[name] = struct{}{} + fields = append(fields, &influxql.Field{Expr: &influxql.VarRef{Val: name}}) + } + + // Add tags to fields if a field wildcard was provided and a dimension wildcard was not. + if hasFieldWildcard && !hasDimensionWildcard { + for _, t := range mm.TagKeys() { + if _, ok := fieldSet[t]; ok { + continue + } + fieldSet[t] = struct{}{} + fields = append(fields, &influxql.Field{Expr: &influxql.VarRef{Val: t}}) + } + } + + // Get the dimensions for this measurement. + if hasDimensionWildcard { + for _, t := range mm.TagKeys() { + if _, ok := dimensionSet[t]; ok { + continue + } + dimensionSet[t] = struct{}{} + dimensions = append(dimensions, &influxql.Dimension{Expr: &influxql.VarRef{Val: t}}) + } + } + } + } + + // Return a new SelectStatement with the wild cards rewritten. + return stmt.RewriteWildcards(fields, dimensions), nil +} + +// expandSources expands regex sources and removes duplicates. +// NOTE: sources must be normalized (db and rp set) before calling this function. +func (di *DatabaseIndex) ExpandSources(sources influxql.Sources) (influxql.Sources, error) { + // Use a map as a set to prevent duplicates. Two regexes might produce + // duplicates when expanded. + set := map[string]influxql.Source{} + names := []string{} + + // Iterate all sources, expanding regexes when they're found. + for _, source := range sources { + switch src := source.(type) { + case *influxql.Measurement: + if src.Regex == nil { + name := src.String() + set[name] = src + names = append(names, name) + continue + } + // Get measurements from the database that match the regex. + measurements := di.measurementsByRegex(src.Regex.Val) + // Add those measurements to the set. + for _, m := range measurements { + m2 := &influxql.Measurement{ + Database: src.Database, + RetentionPolicy: src.RetentionPolicy, + Name: m.Name, + } + name := m2.String() + if _, ok := set[name]; !ok { + set[name] = m2 + names = append(names, name) + } + } + default: + return nil, fmt.Errorf("expandSources: unsuported source type: %T", source) + } + } + + // Sort the list of source names. + sort.Strings(names) + + // Convert set to a list of Sources. + expanded := make(influxql.Sources, 0, len(set)) + for _, name := range names { + expanded = append(expanded, set[name]) + } + return expanded, nil +} + // Measurement represents a collection of time series in a database. It also contains in memory // structures for indexing tags. Exported functions are goroutine safe while un-exported functions // assume the caller will use the appropriate locks @@ -901,6 +1053,64 @@ func (m *Measurement) uniqueTagValues(expr influxql.Expr) map[string][]string { return out } +// SelectFields returns a list of fields in the SELECT section of stmt. +func (m *Measurement) SelectFields(stmt *influxql.SelectStatement) []string { + set := newStringSet() + for _, name := range stmt.NamesInSelect() { + if m.HasField(name) { + set.add(name) + continue + } + } + return set.list() +} + +// SelectTags returns a list of non-field tags in the SELECT section of stmt. +func (m *Measurement) SelectTags(stmt *influxql.SelectStatement) []string { + set := newStringSet() + for _, name := range stmt.NamesInSelect() { + if !m.HasField(name) && m.HasTagKey(name) { + set.add(name) + } + } + return set.list() +} + +// WhereFields returns a list of non-"time" fields in the WHERE section of stmt. +func (m *Measurement) WhereFields(stmt *influxql.SelectStatement) []string { + set := newStringSet() + for _, name := range stmt.NamesInWhere() { + if name != "time" && m.HasField(name) { + set.add(name) + } + } + return set.list() +} + +// DimensionTagSets returns list of tag sets from the GROUP BY section of stmt. +func (m *Measurement) DimensionTagSets(stmt *influxql.SelectStatement) ([]*influxql.TagSet, error) { + _, tagKeys := stmt.Dimensions.Normalize() + + for _, n := range stmt.NamesInDimension() { + if m.HasTagKey(n) { + tagKeys = append(tagKeys, n) + } + } + + // Get the sorted unique tag sets for this statement. + tagSets, err := m.TagSets(stmt, tagKeys) + if err != nil { + return nil, err + } + return tagSets, nil +} + +type SelectInfo struct { + SelectFields []string + SelectTags []string + WhereFields []string +} + // Measurements represents a list of *Measurement. type Measurements []*Measurement @@ -908,6 +1118,45 @@ func (a Measurements) Len() int { return len(a) } func (a Measurements) Less(i, j int) bool { return a[i].Name < a[j].Name } func (a Measurements) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +// SelectFields returns a list of fields in the SELECT section of stmt. +func (a Measurements) SelectFields(stmt *influxql.SelectStatement) []string { + set := newStringSet() + for _, name := range stmt.NamesInSelect() { + for _, m := range a { + if m.HasField(name) { + set.add(name) + } + } + } + return set.list() +} + +// SelectTags returns a list of non-field tags in the SELECT section of stmt. +func (a Measurements) SelectTags(stmt *influxql.SelectStatement) []string { + set := newStringSet() + for _, name := range stmt.NamesInSelect() { + for _, m := range a { + if !m.HasField(name) && m.HasTagKey(name) { + set.add(name) + } + } + } + return set.list() +} + +// WhereFields returns a list of non-"time" fields in the WHERE section of stmt. +func (a Measurements) WhereFields(stmt *influxql.SelectStatement) []string { + set := newStringSet() + for _, name := range stmt.NamesInWhere() { + for _, m := range a { + if name != "time" && m.HasField(name) { + set.add(name) + } + } + } + return set.list() +} + func (a Measurements) intersect(other Measurements) Measurements { l := a r := other @@ -1196,6 +1445,17 @@ func (m *Measurement) TagKeys() []string { return keys } +// TagValues returns all the values for the given tag key +func (m *Measurement) TagValues(key string) []string { + m.mu.RLock() + defer m.mu.RUnlock() + values := []string{} + for v := range m.seriesByTagKeyValue[key] { + values = append(values, v) + } + return values +} + // SetFieldName adds the field name to the measurement. func (m *Measurement) SetFieldName(name string) { m.mu.Lock() diff --git a/_third_party/github.com/influxdb/influxdb/tsdb/query_executor.go b/_third_party/github.com/influxdb/influxdb/tsdb/query_executor.go index 63f9b0b983..955d916dec 100644 --- a/_third_party/github.com/influxdb/influxdb/tsdb/query_executor.go +++ b/_third_party/github.com/influxdb/influxdb/tsdb/query_executor.go @@ -6,11 +6,11 @@ import ( "log" "os" "sort" - "strings" "time" "bosun.org/_third_party/github.com/influxdb/influxdb/influxql" "bosun.org/_third_party/github.com/influxdb/influxdb/meta" + "bosun.org/_third_party/github.com/influxdb/influxdb/models" ) // QueryExecutor executes every statement in an influxdb Query. It is responsible for @@ -36,12 +36,18 @@ type QueryExecutor struct { ExecuteStatement(stmt influxql.Statement) *influxql.Result } + // Execute statements relating to statistics and diagnostics. + MonitorStatementExecutor interface { + ExecuteStatement(stmt influxql.Statement) *influxql.Result + } + // Maps shards for queries. ShardMapper interface { CreateMapper(shard meta.ShardInfo, stmt influxql.Statement, chunkSize int) (Mapper, error) } - Logger *log.Logger + Logger *log.Logger + QueryLogEnabled bool // the local data store Store *Store @@ -55,6 +61,11 @@ func NewQueryExecutor(store *Store) *QueryExecutor { } } +// SetLogger sets the internal logger to the logger passed in. +func (q *QueryExecutor) SetLogger(l *log.Logger) { + q.Logger = l +} + // Authorize user u to execute query q on database. // database can be "" for queries that do not require a database. // If no user is provided it will return an error unless the query's first statement is to create @@ -140,10 +151,15 @@ func (q *QueryExecutor) ExecuteQuery(query *influxql.Query, database string, chu break } + // Log each normalized statement. + if q.QueryLogEnabled { + q.Logger.Println(stmt.String()) + } + var res *influxql.Result switch stmt := stmt.(type) { case *influxql.SelectStatement: - if err := q.executeSelectStatement(i, stmt, results, chunkSize); err != nil { + if err := q.executeStatement(i, stmt, database, results, chunkSize); err != nil { results <- &influxql.Result{Err: err} break } @@ -156,23 +172,27 @@ func (q *QueryExecutor) ExecuteQuery(query *influxql.Query, database string, chu // TODO: handle this in a cluster res = q.executeDropMeasurementStatement(stmt, database) case *influxql.ShowMeasurementsStatement: - if err := q.executeShowMeasurementsStatement(i, stmt, database, results, chunkSize); err != nil { + if err := q.executeStatement(i, stmt, database, results, chunkSize); err != nil { results <- &influxql.Result{Err: err} break } case *influxql.ShowTagKeysStatement: - res = q.executeShowTagKeysStatement(stmt, database) + if err := q.executeStatement(i, stmt, database, results, chunkSize); err != nil { + results <- &influxql.Result{Err: err} + break + } case *influxql.ShowTagValuesStatement: res = q.executeShowTagValuesStatement(stmt, database) case *influxql.ShowFieldKeysStatement: res = q.executeShowFieldKeysStatement(stmt, database) - case *influxql.ShowDiagnosticsStatement: - res = q.executeShowDiagnosticsStatement(stmt) case *influxql.DeleteStatement: res = &influxql.Result{Err: ErrInvalidQuery} case *influxql.DropDatabaseStatement: // TODO: handle this in a cluster res = q.executeDropDatabaseStatement(stmt) + case *influxql.ShowStatsStatement, *influxql.ShowDiagnosticsStatement: + // Send monitor-related queries to the monitor service. + res = q.MonitorStatementExecutor.ExecuteStatement(stmt) default: // Delegate all other meta statements to a separate executor. They don't hit tsdb storage. res = q.MetaStatementExecutor.ExecuteStatement(stmt) @@ -205,11 +225,14 @@ func (q *QueryExecutor) ExecuteQuery(query *influxql.Query, database string, chu func (q *QueryExecutor) PlanSelect(stmt *influxql.SelectStatement, chunkSize int) (Executor, error) { shards := map[uint64]meta.ShardInfo{} // Shards requiring mappers. + // It is important to "stamp" this time so that everywhere we evaluate `now()` in the statement is EXACTLY the same `now` + now := time.Now().UTC() + // Replace instances of "now()" with the current time, and check the resultant times. - stmt.Condition = influxql.Reduce(stmt.Condition, &influxql.NowValuer{Now: time.Now().UTC()}) + stmt.Condition = influxql.Reduce(stmt.Condition, &influxql.NowValuer{Now: now}) tmin, tmax := influxql.TimeRange(stmt.Condition) if tmax.IsZero() { - tmax = time.Now() + tmax = now } if tmin.IsZero() { tmin = time.Unix(0, 0) @@ -270,11 +293,11 @@ func (q *QueryExecutor) executeSelectStatement(statementID int, stmt *influxql.S return row.Err } resultSent = true - results <- &influxql.Result{StatementID: statementID, Series: []*influxql.Row{row}} + results <- &influxql.Result{StatementID: statementID, Series: []*models.Row{row}} } if !resultSent { - results <- &influxql.Result{StatementID: statementID, Series: make([]*influxql.Row, 0)} + results <- &influxql.Result{StatementID: statementID, Series: make([]*models.Row, 0)} } return nil @@ -460,7 +483,7 @@ func (q *QueryExecutor) executeShowSeriesStatement(stmt *influxql.ShowSeriesStat // Create result struct that will be populated and returned. result := &influxql.Result{ - Series: make(influxql.Rows, 0, len(measurements)), + Series: make(models.Rows, 0, len(measurements)), } // Loop through measurements to build result. One result row / measurement. @@ -486,7 +509,7 @@ func (q *QueryExecutor) executeShowSeriesStatement(stmt *influxql.ShowSeriesStat } // Make a new row for this measurement. - r := &influxql.Row{ + r := &models.Row{ Name: m.Name, Columns: m.TagKeys(), } @@ -524,8 +547,8 @@ func (q *QueryExecutor) executeShowSeriesStatement(stmt *influxql.ShowSeriesStat // filterShowSeriesResult will limit the number of series returned based on the limit and the offset. // Unlike limit and offset on SELECT statements, the limit and offset don't apply to the number of Rows, but // to the number of total Values returned, since each Value represents a unique series. -func (q *QueryExecutor) filterShowSeriesResult(limit, offset int, rows influxql.Rows) influxql.Rows { - var filteredSeries influxql.Rows +func (q *QueryExecutor) filterShowSeriesResult(limit, offset int, rows models.Rows) models.Rows { + var filteredSeries models.Rows seriesCount := 0 for _, r := range rows { var currentSeries [][]interface{} @@ -550,7 +573,20 @@ func (q *QueryExecutor) filterShowSeriesResult(limit, offset int, rows influxql. return filteredSeries } -// PlanShowMeasurements creates an execution plan for the given SelectStatement and returns an Executor. +func (q *QueryExecutor) planStatement(stmt influxql.Statement, database string, chunkSize int) (Executor, error) { + switch stmt := stmt.(type) { + case *influxql.SelectStatement: + return q.PlanSelect(stmt, chunkSize) + case *influxql.ShowMeasurementsStatement: + return q.PlanShowMeasurements(stmt, database, chunkSize) + case *influxql.ShowTagKeysStatement: + return q.PlanShowTagKeys(stmt, database, chunkSize) + default: + return nil, fmt.Errorf("can't plan statement type: %v", stmt) + } +} + +// PlanShowMeasurements creates an execution plan for a SHOW TAG KEYS statement and returns an Executor. func (q *QueryExecutor) PlanShowMeasurements(stmt *influxql.ShowMeasurementsStatement, database string, chunkSize int) (Executor, error) { // Get the database info. di, err := q.MetaStore.Database(database) @@ -581,9 +617,40 @@ func (q *QueryExecutor) PlanShowMeasurements(stmt *influxql.ShowMeasurementsStat return executor, nil } -func (q *QueryExecutor) executeShowMeasurementsStatement(statementID int, stmt *influxql.ShowMeasurementsStatement, database string, results chan *influxql.Result, chunkSize int) error { +// PlanShowTagKeys creates an execution plan for a SHOW MEASUREMENTS statement and returns an Executor. +func (q *QueryExecutor) PlanShowTagKeys(stmt *influxql.ShowTagKeysStatement, database string, chunkSize int) (Executor, error) { + // Get the database info. + di, err := q.MetaStore.Database(database) + if err != nil { + return nil, err + } else if di == nil { + return nil, ErrDatabaseNotFound(database) + } + + // Get info for all shards in the database. + shards := di.ShardInfos() + + // Build the Mappers, one per shard. + mappers := []Mapper{} + for _, sh := range shards { + m, err := q.ShardMapper.CreateMapper(sh, stmt, chunkSize) + if err != nil { + return nil, err + } + if m == nil { + // No data for this shard, skip it. + continue + } + mappers = append(mappers, m) + } + + executor := NewShowTagKeysExecutor(stmt, mappers, chunkSize) + return executor, nil +} + +func (q *QueryExecutor) executeStatement(statementID int, stmt influxql.Statement, database string, results chan *influxql.Result, chunkSize int) error { // Plan statement execution. - e, err := q.PlanShowMeasurements(stmt, database, chunkSize) + e, err := q.planStatement(stmt, database, chunkSize) if err != nil { return err } @@ -598,67 +665,40 @@ func (q *QueryExecutor) executeShowMeasurementsStatement(statementID int, stmt * return row.Err } resultSent = true - results <- &influxql.Result{StatementID: statementID, Series: []*influxql.Row{row}} + results <- &influxql.Result{StatementID: statementID, Series: []*models.Row{row}} } if !resultSent { - results <- &influxql.Result{StatementID: statementID, Series: make([]*influxql.Row, 0)} + results <- &influxql.Result{StatementID: statementID, Series: make([]*models.Row, 0)} } return nil } -func (q *QueryExecutor) executeShowTagKeysStatement(stmt *influxql.ShowTagKeysStatement, database string) *influxql.Result { - // Find the database. - db := q.Store.DatabaseIndex(database) - if db == nil { - return &influxql.Result{} - } - - // Expand regex expressions in the FROM clause. - sources, err := q.expandSources(stmt.Sources) - if err != nil { - return &influxql.Result{Err: err} - } - - // Get the list of measurements we're interested in. - measurements, err := measurementsFromSourcesOrDB(db, sources...) +func (q *QueryExecutor) executeShowMeasurementsStatement(statementID int, stmt *influxql.ShowMeasurementsStatement, database string, results chan *influxql.Result, chunkSize int) error { // Plan statement execution. + e, err := q.PlanShowMeasurements(stmt, database, chunkSize) if err != nil { - return &influxql.Result{Err: err} - } - - // Make result. - result := &influxql.Result{ - Series: make(influxql.Rows, 0, len(measurements)), + return err } - // Add one row per measurement to the result. - for _, m := range measurements { - // TODO: filter tag keys by stmt.Condition - - // Get the tag keys in sorted order. - keys := m.TagKeys() - - // Convert keys to an [][]interface{}. - values := make([][]interface{}, 0, len(m.seriesByTagKeyValue)) - for _, k := range keys { - v := interface{}(k) - values = append(values, []interface{}{v}) - } + // Execute plan. + ch := e.Execute() - // Make a result row for the measurement. - r := &influxql.Row{ - Name: m.Name, - Columns: []string{"tagKey"}, - Values: values, + // Stream results from the channel. We should send an empty result if nothing comes through. + resultSent := false + for row := range ch { + if row.Err != nil { + return row.Err } - - result.Series = append(result.Series, r) + resultSent = true + results <- &influxql.Result{StatementID: statementID, Series: []*models.Row{row}} } - // TODO: LIMIT & OFFSET + if !resultSent { + results <- &influxql.Result{StatementID: statementID, Series: make([]*models.Row, 0)} + } - return result + return nil } func (q *QueryExecutor) executeShowTagValuesStatement(stmt *influxql.ShowTagValuesStatement, database string) *influxql.Result { @@ -682,7 +722,7 @@ func (q *QueryExecutor) executeShowTagValuesStatement(stmt *influxql.ShowTagValu // Make result. result := &influxql.Result{ - Series: make(influxql.Rows, 0), + Series: make(models.Rows, 0), } tagValues := make(map[string]stringSet) @@ -717,7 +757,7 @@ func (q *QueryExecutor) executeShowTagValuesStatement(stmt *influxql.ShowTagValu } for k, v := range tagValues { - r := &influxql.Row{ + r := &models.Row{ Name: k + "TagValues", Columns: []string{k}, } @@ -759,13 +799,13 @@ func (q *QueryExecutor) executeShowFieldKeysStatement(stmt *influxql.ShowFieldKe // Make result. result := &influxql.Result{ - Series: make(influxql.Rows, 0, len(measurements)), + Series: make(models.Rows, 0, len(measurements)), } // Loop through measurements, adding a result row for each. for _, m := range measurements { // Create a new row. - r := &influxql.Row{ + r := &models.Row{ Name: m.Name, Columns: []string{"fieldKey"}, } @@ -797,7 +837,7 @@ func measurementsFromSourcesOrDB(db *DatabaseIndex, sources ...influxql.Source) if m, ok := source.(*influxql.Measurement); ok { measurement := db.measurements[m.Name] if measurement == nil { - return nil, ErrMeasurementNotFound(m.Name) + continue } measurements = append(measurements, measurement) @@ -838,29 +878,15 @@ func (q *QueryExecutor) normalizeStatement(stmt influxql.Statement, defaultDatab prefixes[n.Name] = n.Name } }) - if err != nil { - return err - } - - // Replace all variable references that used measurement prefixes. - influxql.WalkFunc(stmt, func(n influxql.Node) { - switch n := n.(type) { - case *influxql.VarRef: - for k, v := range prefixes { - if strings.HasPrefix(n.Val, k+".") { - n.Val = v + "." + influxql.QuoteIdent(n.Val[len(k)+1:]) - } - } - } - }) - return } // normalizeMeasurement inserts the default database or policy into all measurement names, // if required. func (q *QueryExecutor) normalizeMeasurement(m *influxql.Measurement, defaultDatabase string) error { - if m.Name == "" && m.Regex == nil { + // Targets (measurements in an INTO clause) can have blank names, which means it will be + // the same as the measurement name it came from in the FROM clause. + if !m.IsTarget && m.Name == "" && m.Regex == nil { return errors.New("invalid measurement") } @@ -893,10 +919,6 @@ func (q *QueryExecutor) normalizeMeasurement(m *influxql.Measurement, defaultDat return nil } -func (q *QueryExecutor) executeShowDiagnosticsStatement(stmt *influxql.ShowDiagnosticsStatement) *influxql.Result { - return &influxql.Result{Err: fmt.Errorf("SHOW DIAGNOSTICS is not implemented yet")} -} - // ErrAuthorize represents an authorization error. type ErrAuthorize struct { q *QueryExecutor diff --git a/_third_party/github.com/influxdb/influxdb/tsdb/query_executor_test.go b/_third_party/github.com/influxdb/influxdb/tsdb/query_executor_test.go index 863f705583..a842762702 100644 --- a/_third_party/github.com/influxdb/influxdb/tsdb/query_executor_test.go +++ b/_third_party/github.com/influxdb/influxdb/tsdb/query_executor_test.go @@ -11,6 +11,7 @@ import ( "bosun.org/_third_party/github.com/influxdb/influxdb/influxql" "bosun.org/_third_party/github.com/influxdb/influxdb/meta" + "bosun.org/_third_party/github.com/influxdb/influxdb/models" "bosun.org/_third_party/github.com/influxdb/influxdb/tsdb" ) @@ -22,7 +23,7 @@ func TestWritePointsAndExecuteQuery(t *testing.T) { defer os.RemoveAll(store.Path()) // Write first point. - if err := store.WriteToShard(shardID, []tsdb.Point{tsdb.NewPoint( + if err := store.WriteToShard(shardID, []models.Point{models.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, @@ -32,7 +33,7 @@ func TestWritePointsAndExecuteQuery(t *testing.T) { } // Write second point. - if err := store.WriteToShard(shardID, []tsdb.Point{tsdb.NewPoint( + if err := store.WriteToShard(shardID, []models.Point{models.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, @@ -75,7 +76,7 @@ func TestWritePointsAndExecuteQuery_Update(t *testing.T) { defer os.RemoveAll(store.Path()) // Write original point. - if err := store.WriteToShard(1, []tsdb.Point{tsdb.NewPoint( + if err := store.WriteToShard(1, []models.Point{models.NewPoint( "temperature", map[string]string{}, map[string]interface{}{"value": 100.0}, @@ -96,7 +97,7 @@ func TestWritePointsAndExecuteQuery_Update(t *testing.T) { executor.ShardMapper = &testShardMapper{store: store} // Rewrite point with new value. - if err := store.WriteToShard(1, []tsdb.Point{tsdb.NewPoint( + if err := store.WriteToShard(1, []models.Point{models.NewPoint( "temperature", map[string]string{}, map[string]interface{}{"value": 200.0}, @@ -116,14 +117,14 @@ func TestDropSeriesStatement(t *testing.T) { store, executor := testStoreAndExecutor("") defer os.RemoveAll(store.Path()) - pt := tsdb.NewPoint( + pt := models.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) - err := store.WriteToShard(shardID, []tsdb.Point{pt}) + err := store.WriteToShard(shardID, []models.Point{pt}) if err != nil { t.Fatalf(err.Error()) } @@ -143,7 +144,7 @@ func TestDropSeriesStatement(t *testing.T) { } got = executeAndGetJSON("show tag keys from cpu", executor) - exepected = `[{"series":[{"name":"cpu","columns":["tagKey"]}]}]` + exepected = `[{}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } @@ -162,7 +163,7 @@ func TestDropSeriesStatement(t *testing.T) { } got = executeAndGetJSON("show tag keys from cpu", executor) - exepected = `[{"series":[{"name":"cpu","columns":["tagKey"]}]}]` + exepected = `[{}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } @@ -172,20 +173,20 @@ func TestDropMeasurementStatement(t *testing.T) { store, executor := testStoreAndExecutor("") defer os.RemoveAll(store.Path()) - pt := tsdb.NewPoint( + pt := models.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) - pt2 := tsdb.NewPoint( + pt2 := models.NewPoint( "memory", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) - if err := store.WriteToShard(shardID, []tsdb.Point{pt, pt2}); err != nil { + if err := store.WriteToShard(shardID, []models.Point{pt, pt2}); err != nil { t.Fatal(err) } @@ -238,14 +239,14 @@ func TestDropDatabase(t *testing.T) { store, executor := testStoreAndExecutor("") defer os.RemoveAll(store.Path()) - pt := tsdb.NewPoint( + pt := models.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) - if err := store.WriteToShard(shardID, []tsdb.Point{pt}); err != nil { + if err := store.WriteToShard(shardID, []models.Point{pt}); err != nil { t.Fatal(err) } @@ -290,7 +291,7 @@ func TestDropDatabase(t *testing.T) { executor.Store = store executor.ShardMapper = &testShardMapper{store: store} - if err := store.WriteToShard(shardID, []tsdb.Point{pt}); err == nil || err.Error() != "shard not found" { + if err := store.WriteToShard(shardID, []models.Point{pt}); err == nil || err.Error() != "shard not found" { t.Fatalf("expected shard to not be found") } } @@ -406,8 +407,8 @@ func (t *testMetastore) Database(name string) (*meta.DatabaseInfo, error) { EndTime: time.Now().Add(time.Hour), Shards: []meta.ShardInfo{ { - ID: uint64(1), - OwnerIDs: []uint64{1}, + ID: uint64(1), + Owners: []meta.ShardOwner{{NodeID: 1}}, }, }, }, @@ -440,8 +441,8 @@ func (t *testMetastore) RetentionPolicy(database, name string) (rpi *meta.Retent EndTime: time.Now().Add(time.Hour), Shards: []meta.ShardInfo{ { - ID: uint64(1), - OwnerIDs: []uint64{1}, + ID: uint64(1), + Owners: []meta.ShardOwner{{NodeID: 1}}, }, }, }, @@ -461,8 +462,8 @@ func (t *testMetastore) ShardGroupsByTimeRange(database, policy string, min, max EndTime: time.Now().Add(time.Hour), Shards: []meta.ShardInfo{ { - ID: uint64(1), - OwnerIDs: []uint64{1}, + ID: uint64(1), + Owners: []meta.ShardOwner{{NodeID: 1}}, }, }, }, diff --git a/_third_party/github.com/influxdb/influxdb/tsdb/shard.go b/_third_party/github.com/influxdb/influxdb/tsdb/shard.go index f2cb3c7b2d..be55a3915c 100644 --- a/_third_party/github.com/influxdb/influxdb/tsdb/shard.go +++ b/_third_party/github.com/influxdb/influxdb/tsdb/shard.go @@ -4,19 +4,31 @@ import ( "encoding/binary" "encoding/json" "errors" + "expvar" "fmt" "io" "math" "os" "sync" + "bosun.org/_third_party/github.com/influxdb/influxdb" "bosun.org/_third_party/github.com/influxdb/influxdb/influxql" + "bosun.org/_third_party/github.com/influxdb/influxdb/models" "bosun.org/_third_party/github.com/influxdb/influxdb/tsdb/internal" "bosun.org/_third_party/github.com/boltdb/bolt" "bosun.org/_third_party/github.com/gogo/protobuf/proto" ) +const ( + statWriteReq = "write_req" + statSeriesCreate = "series_create" + statFieldsCreate = "fields_create" + statWritePointsFail = "write_points_fail" + statWritePointsOK = "write_points_ok" + statWriteBytes = "write_bytes" +) + var ( // ErrFieldOverflow is returned when too many fields are created on a measurement. ErrFieldOverflow = errors.New("field overflow") @@ -49,12 +61,20 @@ type Shard struct { mu sync.RWMutex measurementFields map[string]*MeasurementFields // measurement name to their fields + // expvar-based stats. + statMap *expvar.Map + // The writer used by the logger. LogOutput io.Writer } // NewShard returns a new initialized Shard. walPath doesn't apply to the b1 type index func NewShard(id uint64, index *DatabaseIndex, path string, walPath string, options EngineOptions) *Shard { + // Configure statistics collection. + key := fmt.Sprintf("shard:%s:%d", path, id) + tags := map[string]string{"path": path, "id": fmt.Sprintf("%d", id), "engine": options.EngineVersion} + statMap := influxdb.NewStatistics(key, "shard", tags) + return &Shard{ index: index, path: path, @@ -63,6 +83,7 @@ func NewShard(id uint64, index *DatabaseIndex, path string, walPath string, opti options: options, measurementFields: make(map[string]*MeasurementFields), + statMap: statMap, LogOutput: os.Stderr, } } @@ -127,6 +148,25 @@ func (s *Shard) close() error { return nil } +// DiskSize returns the size on disk of this shard +func (s *Shard) DiskSize() (int64, error) { + s.mu.RLock() + defer s.mu.RUnlock() + stats, err := os.Stat(s.path) + var size int64 + if err != nil { + return 0, err + } + size += stats.Size() + return size, nil +} + +// ReadOnlyTx returns a read-only transaction for the shard. The transaction must be rolled back to +// release resources. +func (s *Shard) ReadOnlyTx() (Tx, error) { + return s.engine.Begin(false) +} + // TODO: this is temporarily exported to make tx.go work. When the query engine gets refactored // into the tsdb package this should be removed. No one outside tsdb should know the underlying field encoding scheme. func (s *Shard) FieldCodec(measurementName string) *FieldCodec { @@ -152,11 +192,15 @@ type SeriesCreate struct { } // WritePoints will write the raw data points and any new metadata to the index in the shard -func (s *Shard) WritePoints(points []Point) error { +func (s *Shard) WritePoints(points []models.Point) error { + s.statMap.Add(statWriteReq, 1) + seriesToCreate, fieldsToCreate, seriesToAddShardTo, err := s.validateSeriesAndFields(points) if err != nil { return err } + s.statMap.Add(statSeriesCreate, int64(len(seriesToCreate))) + s.statMap.Add(statFieldsCreate, int64(len(fieldsToCreate))) // add any new series to the in-memory index if len(seriesToCreate) > 0 { @@ -210,8 +254,10 @@ func (s *Shard) WritePoints(points []Point) error { // Write to the engine. if err := s.engine.WritePoints(points, measurementFieldsToSave, seriesToCreate); err != nil { + s.statMap.Add(statWritePointsFail, 1) return fmt.Errorf("engine: %s", err) } + s.statMap.Add(statWritePointsOK, int64(len(points))) return nil } @@ -244,7 +290,7 @@ func (s *Shard) ValidateAggregateFieldsInStatement(measurementName string, stmt switch lit := nested.Args[0].(type) { case *influxql.VarRef: - if influxql.IsNumeric(nested) { + if IsNumeric(nested) { f := m.Fields[lit.Val] if err := validateType(a.Name, f.Name, f.Type); err != nil { return err @@ -254,7 +300,7 @@ func (s *Shard) ValidateAggregateFieldsInStatement(measurementName string, stmt if nested.Name != "count" { return fmt.Errorf("aggregate call didn't contain a field %s", a.String()) } - if influxql.IsNumeric(nested) { + if IsNumeric(nested) { f := m.Fields[lit.Val] if err := validateType(a.Name, f.Name, f.Type); err != nil { return err @@ -327,7 +373,7 @@ func (s *Shard) createFieldsAndMeasurements(fieldsToCreate []*FieldCreate) (map[ } // validateSeriesAndFields checks which series and fields are new and whose metadata should be saved and indexed -func (s *Shard) validateSeriesAndFields(points []Point) ([]*SeriesCreate, []*FieldCreate, []string, error) { +func (s *Shard) validateSeriesAndFields(points []models.Point) ([]*SeriesCreate, []*FieldCreate, []string, error) { var seriesToCreate []*SeriesCreate var fieldsToCreate []*FieldCreate var seriesToAddShardTo []string @@ -382,6 +428,13 @@ func (s *Shard) validateSeriesAndFields(points []Point) ([]*SeriesCreate, []*Fie // SeriesCount returns the number of series buckets on the shard. func (s *Shard) SeriesCount() (int, error) { return s.engine.SeriesCount() } +// WriteTo writes the shard's data to w. +func (s *Shard) WriteTo(w io.Writer) (int64, error) { + n, err := s.engine.WriteTo(w) + s.statMap.Add(statWriteBytes, int64(n)) + return n, err +} + type MeasurementFields struct { Fields map[string]*Field `json:"fields"` Codec *FieldCodec diff --git a/_third_party/github.com/influxdb/influxdb/tsdb/shard_test.go b/_third_party/github.com/influxdb/influxdb/tsdb/shard_test.go index 776f216ffb..22791d68b7 100644 --- a/_third_party/github.com/influxdb/influxdb/tsdb/shard_test.go +++ b/_third_party/github.com/influxdb/influxdb/tsdb/shard_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "bosun.org/_third_party/github.com/influxdb/influxdb/models" "bosun.org/_third_party/github.com/influxdb/influxdb/tsdb" "bosun.org/_third_party/github.com/influxdb/influxdb/tsdb/engine/b1" ) @@ -29,20 +30,20 @@ func TestShardWriteAndIndex(t *testing.T) { t.Fatalf("error openeing shard: %s", err.Error()) } - pt := tsdb.NewPoint( + pt := models.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) - err := sh.WritePoints([]tsdb.Point{pt}) + err := sh.WritePoints([]models.Point{pt}) if err != nil { t.Fatalf(err.Error()) } pt.SetTime(time.Unix(2, 3)) - err = sh.WritePoints([]tsdb.Point{pt}) + err = sh.WritePoints([]models.Point{pt}) if err != nil { t.Fatalf(err.Error()) } @@ -76,7 +77,7 @@ func TestShardWriteAndIndex(t *testing.T) { // and ensure that we can still write data pt.SetTime(time.Unix(2, 6)) - err = sh.WritePoints([]tsdb.Point{pt}) + err = sh.WritePoints([]models.Point{pt}) if err != nil { t.Fatalf(err.Error()) } @@ -98,26 +99,26 @@ func TestShardWriteAddNewField(t *testing.T) { } defer sh.Close() - pt := tsdb.NewPoint( + pt := models.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) - err := sh.WritePoints([]tsdb.Point{pt}) + err := sh.WritePoints([]models.Point{pt}) if err != nil { t.Fatalf(err.Error()) } - pt = tsdb.NewPoint( + pt = models.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0, "value2": 2.0}, time.Unix(1, 2), ) - err = sh.WritePoints([]tsdb.Point{pt}) + err = sh.WritePoints([]models.Point{pt}) if err != nil { t.Fatalf(err.Error()) } @@ -158,7 +159,7 @@ func TestShard_Autoflush(t *testing.T) { // Write a bunch of points. for i := 0; i < 100; i++ { - if err := sh.WritePoints([]tsdb.Point{tsdb.NewPoint( + if err := sh.WritePoints([]models.Point{models.NewPoint( fmt.Sprintf("cpu%d", i), map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, @@ -198,7 +199,7 @@ func TestShard_Autoflush_FlushInterval(t *testing.T) { // Write some points. for i := 0; i < 100; i++ { - if err := sh.WritePoints([]tsdb.Point{tsdb.NewPoint( + if err := sh.WritePoints([]models.Point{models.NewPoint( fmt.Sprintf("cpu%d", i), map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, @@ -252,10 +253,10 @@ func benchmarkWritePoints(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt int) { // Create index for the shard to use. index := tsdb.NewDatabaseIndex() // Generate point data to write to the shard. - points := []tsdb.Point{} + points := []models.Point{} for _, s := range series { for val := 0.0; val < float64(pntCnt); val++ { - p := tsdb.NewPoint(s.Measurement, s.Series.Tags, map[string]interface{}{"value": val}, time.Now()) + p := models.NewPoint(s.Measurement, s.Series.Tags, map[string]interface{}{"value": val}, time.Now()) points = append(points, p) } } @@ -293,10 +294,10 @@ func benchmarkWritePointsExistingSeries(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt // Create index for the shard to use. index := tsdb.NewDatabaseIndex() // Generate point data to write to the shard. - points := []tsdb.Point{} + points := []models.Point{} for _, s := range series { for val := 0.0; val < float64(pntCnt); val++ { - p := tsdb.NewPoint(s.Measurement, s.Series.Tags, map[string]interface{}{"value": val}, time.Now()) + p := models.NewPoint(s.Measurement, s.Series.Tags, map[string]interface{}{"value": val}, time.Now()) points = append(points, p) } } @@ -326,7 +327,7 @@ func benchmarkWritePointsExistingSeries(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt } } -func chunkedWrite(shard *tsdb.Shard, points []tsdb.Point) { +func chunkedWrite(shard *tsdb.Shard, points []models.Point) { nPts := len(points) chunkSz := 10000 start := 0 diff --git a/_third_party/github.com/influxdb/influxdb/tsdb/show_measurements.go b/_third_party/github.com/influxdb/influxdb/tsdb/show_measurements.go index 244b6fb9b5..3ddf36fffc 100644 --- a/_third_party/github.com/influxdb/influxdb/tsdb/show_measurements.go +++ b/_third_party/github.com/influxdb/influxdb/tsdb/show_measurements.go @@ -6,6 +6,7 @@ import ( "sort" "bosun.org/_third_party/github.com/influxdb/influxdb/influxql" + "bosun.org/_third_party/github.com/influxdb/influxdb/models" ) // ShowMeasurementsExecutor implements the Executor interface for a SHOW MEASUREMENTS statement. @@ -25,18 +26,15 @@ func NewShowMeasurementsExecutor(stmt *influxql.ShowMeasurementsStatement, mappe } // Execute begins execution of the query and returns a channel to receive rows. -func (e *ShowMeasurementsExecutor) Execute() <-chan *influxql.Row { +func (e *ShowMeasurementsExecutor) Execute() <-chan *models.Row { // Create output channel and stream data in a separate goroutine. - out := make(chan *influxql.Row, 0) - - // It's important that all resources are released when execution completes. - defer e.close() + out := make(chan *models.Row, 0) go func() { // Open the mappers. for _, m := range e.mappers { if err := m.Open(); err != nil { - out <- &influxql.Row{Err: err} + out <- &models.Row{Err: err} return } } @@ -48,7 +46,7 @@ func (e *ShowMeasurementsExecutor) Execute() <-chan *influxql.Row { // Get the data from the mapper. c, err := m.NextChunk() if err != nil { - out <- &influxql.Row{Err: err} + out <- &models.Row{Err: err} return } else if c == nil { // Mapper had no data. @@ -58,7 +56,7 @@ func (e *ShowMeasurementsExecutor) Execute() <-chan *influxql.Row { // Convert the mapper chunk to a string array of measurement names. mms, ok := c.([]string) if !ok { - out <- &influxql.Row{Err: fmt.Errorf("show measurements mapper returned invalid type: %T", c)} + out <- &models.Row{Err: fmt.Errorf("show measurements mapper returned invalid type: %T", c)} return } @@ -88,7 +86,7 @@ func (e *ShowMeasurementsExecutor) Execute() <-chan *influxql.Row { } // Put the results in a row and send it. - row := &influxql.Row{ + row := &models.Row{ Name: "measurements", Columns: []string{"name"}, Values: make([][]interface{}, 0, len(measurements)), @@ -104,6 +102,8 @@ func (e *ShowMeasurementsExecutor) Execute() <-chan *influxql.Row { } close(out) + // It's important that all resources are released when execution completes. + e.close() }() return out } @@ -120,19 +120,19 @@ func (e *ShowMeasurementsExecutor) close() { // ShowMeasurementsMapper is a mapper for collecting measurement names from a shard. type ShowMeasurementsMapper struct { - remote Mapper - shard *Shard - stmt *influxql.ShowMeasurementsStatement - chunkSize int - state interface{} + remote Mapper + shard *Shard + stmt *influxql.ShowMeasurementsStatement + state interface{} + + ChunkSize int } // NewShowMeasurementsMapper returns a mapper for the given shard, which will return data for the meta statement. -func NewShowMeasurementsMapper(shard *Shard, stmt *influxql.ShowMeasurementsStatement, chunkSize int) *ShowMeasurementsMapper { +func NewShowMeasurementsMapper(shard *Shard, stmt *influxql.ShowMeasurementsStatement) *ShowMeasurementsMapper { return &ShowMeasurementsMapper{ - shard: shard, - stmt: stmt, - chunkSize: chunkSize, + shard: shard, + stmt: stmt, } } @@ -144,18 +144,20 @@ func (m *ShowMeasurementsMapper) Open() error { var measurements Measurements - // If a WHERE clause was specified, filter the measurements. - if m.stmt.Condition != nil { - var err error - measurements, err = m.shard.index.measurementsByExpr(m.stmt.Condition) - if err != nil { - return err + if m.shard != nil { + // If a WHERE clause was specified, filter the measurements. + if m.stmt.Condition != nil { + var err error + measurements, err = m.shard.index.measurementsByExpr(m.stmt.Condition) + if err != nil { + return err + } + } else { + // Otherwise, get all measurements from the database. + measurements = m.shard.index.Measurements() } - } else { - // Otherwise, get all measurements from the database. - measurements = m.shard.index.Measurements() + sort.Sort(measurements) } - sort.Sort(measurements) // Create a channel to send measurement names on. ch := make(chan string) @@ -174,10 +176,7 @@ func (m *ShowMeasurementsMapper) Open() error { } // SetRemote sets the remote mapper to use. -func (m *ShowMeasurementsMapper) SetRemote(remote Mapper) error { - m.remote = remote - return nil -} +func (m *ShowMeasurementsMapper) SetRemote(remote Mapper) { m.remote = remote } // TagSets is only implemented on this mapper to satisfy the Mapper interface. func (m *ShowMeasurementsMapper) TagSets() []string { return nil } @@ -210,13 +209,15 @@ func (m *ShowMeasurementsMapper) NextChunk() (interface{}, error) { // nextChunk implements next chunk logic for a local shard. func (m *ShowMeasurementsMapper) nextChunk() (interface{}, error) { // Allocate array to hold measurement names. - names := make([]string, 0, m.chunkSize) + names := make([]string, 0, m.ChunkSize) + // Get the channel of measurement names from the state. measurementNames := m.state.(chan string) + // Get the next chunk of names. for n := range measurementNames { names = append(names, n) - if len(names) == m.chunkSize { + if len(names) == m.ChunkSize { break } } diff --git a/_third_party/github.com/influxdb/influxdb/tsdb/show_tag_keys.go b/_third_party/github.com/influxdb/influxdb/tsdb/show_tag_keys.go new file mode 100644 index 0000000000..6a9674b17b --- /dev/null +++ b/_third_party/github.com/influxdb/influxdb/tsdb/show_tag_keys.go @@ -0,0 +1,315 @@ +package tsdb + +import ( + "encoding/json" + "fmt" + "sort" + + "bosun.org/_third_party/github.com/influxdb/influxdb/influxql" + "bosun.org/_third_party/github.com/influxdb/influxdb/models" +) + +// ShowTagKeysExecutor implements the Executor interface for a SHOW MEASUREMENTS statement. +type ShowTagKeysExecutor struct { + stmt *influxql.ShowTagKeysStatement + mappers []Mapper + chunkSize int +} + +// NewShowTagKeysExecutor returns a new ShowTagKeysExecutor. +func NewShowTagKeysExecutor(stmt *influxql.ShowTagKeysStatement, mappers []Mapper, chunkSize int) *ShowTagKeysExecutor { + return &ShowTagKeysExecutor{ + stmt: stmt, + mappers: mappers, + chunkSize: chunkSize, + } +} + +// Execute begins execution of the query and returns a channel to receive rows. +func (e *ShowTagKeysExecutor) Execute() <-chan *models.Row { + // Create output channel and stream data in a separate goroutine. + out := make(chan *models.Row, 0) + + // It's important that all resources are released when execution completes. + defer e.close() + + go func() { + defer close(out) + // Open the mappers. + for _, m := range e.mappers { + if err := m.Open(); err != nil { + out <- &models.Row{Err: err} + return + } + } + + // Create a map of measurement to tags keys. + set := map[string]map[string]struct{}{} + // Iterate through mappers collecting measurement names. + for _, m := range e.mappers { + // Read all data from the mapper. + for { + c, err := m.NextChunk() + if err != nil { + out <- &models.Row{Err: err} + return + } else if c == nil { + // Mapper has been drained. + break + } + + // Convert the mapper chunk to an array of measurements with tag keys. + mtks, ok := c.(MeasurementsTagKeys) + if !ok { + out <- &models.Row{Err: fmt.Errorf("show tag keys mapper returned invalid type: %T", c)} + return + } + + // Merge mapper chunk with previous mapper outputs. + for _, mm := range mtks { + for _, key := range mm.TagKeys { + if set[mm.Measurement] == nil { + set[mm.Measurement] = map[string]struct{}{} + } + set[mm.Measurement][key] = struct{}{} + } + } + } + } + + // All mappers are drained. + + // Convert the set into an array of measurements and their tag keys. + mstks := make(MeasurementsTagKeys, 0) + for mm, tks := range set { + mtks := &MeasurementTagKeys{Measurement: mm} + for tk := range tks { + mtks.TagKeys = append(mtks.TagKeys, tk) + } + sort.Strings(mtks.TagKeys) + mstks = append(mstks, mtks) + } + // Sort by measurement name. + sort.Sort(mstks) + + slim, soff := limitAndOffset(e.stmt.SLimit, e.stmt.SOffset, len(mstks)) + + // Send results. + for _, mtks := range mstks[soff:slim] { + lim, off := limitAndOffset(e.stmt.Limit, e.stmt.Offset, len(mtks.TagKeys)) + + row := &models.Row{ + Name: mtks.Measurement, + Columns: []string{"tagKey"}, + Values: make([][]interface{}, 0, lim-off), + } + + for _, tk := range mtks.TagKeys[off:lim] { + v := []interface{}{tk} + row.Values = append(row.Values, v) + } + + out <- row + } + }() + return out +} + +// limitAndOffset calculates the limit and offset indexes for n things. +func limitAndOffset(lim, off, n int) (int, int) { + if off >= n { + return 0, 0 + } + + o := off + l := n + + if lim > 0 && o+lim < l { + l = o + lim + } + + if o > l { + return 0, 0 + } + + return l, o +} + +// Close closes the executor such that all resources are released. Once closed, +// an executor may not be re-used. +func (e *ShowTagKeysExecutor) close() { + if e != nil { + for _, m := range e.mappers { + m.Close() + } + } +} + +// ShowTagKeysMapper is a mapper for collecting measurement names from a shard. +type ShowTagKeysMapper struct { + remote Mapper + shard *Shard + stmt *influxql.ShowTagKeysStatement + chunkSize int + state interface{} +} + +// NewShowTagKeysMapper returns a mapper for the given shard, which will return data for the meta statement. +func NewShowTagKeysMapper(shard *Shard, stmt *influxql.ShowTagKeysStatement, chunkSize int) *ShowTagKeysMapper { + return &ShowTagKeysMapper{ + shard: shard, + stmt: stmt, + chunkSize: chunkSize, + } +} + +// MeasurementTagKeys represents measurement tag keys. +type MeasurementTagKeys struct { + Measurement string `json:"measurement"` + TagKeys []string `json:"tagkeys"` +} + +// MeasurementsTagKeys represents tag keys for multiple measurements. +type MeasurementsTagKeys []*MeasurementTagKeys + +func (a MeasurementsTagKeys) Len() int { return len(a) } +func (a MeasurementsTagKeys) Less(i, j int) bool { return a[i].Measurement < a[j].Measurement } +func (a MeasurementsTagKeys) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// Size returns the total string length of measurement names & tag keys. +func (a MeasurementsTagKeys) Size() int { + n := 0 + for _, m := range a { + n += len(m.Measurement) + for _, k := range m.TagKeys { + n += len(k) + } + } + return n +} + +// Open opens the mapper for use. +func (m *ShowTagKeysMapper) Open() error { + if m.remote != nil { + return m.remote.Open() + } + + // This can happen when a shard has been assigned to this node but we have not + // written to it so it may not exist yet. + if m.shard == nil { + return nil + } + + sources := influxql.Sources{} + + // Expand regex expressions in the FROM clause. + if m.stmt.Sources != nil { + var err error + sources, err = m.shard.index.ExpandSources(m.stmt.Sources) + if err != nil { + return err + } + } + + // Get measurements from sources in the statement if provided or database if not. + measurements, err := measurementsFromSourcesOrDB(m.shard.index, sources...) + if err != nil { + return err + } + + // If a WHERE clause was specified, filter the measurements. + if m.stmt.Condition != nil { + var err error + whereMs, err := m.shard.index.measurementsByExpr(m.stmt.Condition) + if err != nil { + return err + } + + sort.Sort(whereMs) + + measurements = measurements.intersect(whereMs) + } + + // Create a channel to send measurement names on. + ch := make(chan *MeasurementTagKeys) + // Start a goroutine to send the names over the channel as needed. + go func() { + for _, mm := range measurements { + ch <- &MeasurementTagKeys{ + Measurement: mm.Name, + TagKeys: mm.TagKeys(), + } + } + close(ch) + }() + + // Store the channel as the state of the mapper. + m.state = ch + + return nil +} + +// SetRemote sets the remote mapper to use. +func (m *ShowTagKeysMapper) SetRemote(remote Mapper) error { + m.remote = remote + return nil +} + +// TagSets is only implemented on this mapper to satisfy the Mapper interface. +func (m *ShowTagKeysMapper) TagSets() []string { return nil } + +// Fields returns a list of field names for this mapper. +func (m *ShowTagKeysMapper) Fields() []string { return []string{"tagKey"} } + +// NextChunk returns the next chunk of measurements and tag keys. +func (m *ShowTagKeysMapper) NextChunk() (interface{}, error) { + if m.remote != nil { + b, err := m.remote.NextChunk() + if err != nil { + return nil, err + } else if b == nil { + return nil, nil + } + + mtks := []*MeasurementTagKeys{} + if err := json.Unmarshal(b.([]byte), &mtks); err != nil { + return nil, err + } else if len(mtks) == 0 { + // Mapper on other node sent 0 values so it's done. + return nil, nil + } + return mtks, nil + } + return m.nextChunk() +} + +// nextChunk implements next chunk logic for a local shard. +func (m *ShowTagKeysMapper) nextChunk() (interface{}, error) { + // Get the channel of measurement tag keys from the state. + ch, ok := m.state.(chan *MeasurementTagKeys) + if !ok { + return nil, nil + } + // Allocate array to hold measurement names. + mtks := make(MeasurementsTagKeys, 0) + // Get the next chunk of tag keys. + for n := range ch { + mtks = append(mtks, n) + if mtks.Size() >= m.chunkSize { + break + } + } + // See if we've read all the names. + if len(mtks) == 0 { + return nil, nil + } + + return mtks, nil +} + +// Close closes the mapper. +func (m *ShowTagKeysMapper) Close() { + if m.remote != nil { + m.remote.Close() + } +} diff --git a/_third_party/github.com/influxdb/influxdb/tsdb/store.go b/_third_party/github.com/influxdb/influxdb/tsdb/store.go index efda01b9d2..d5945cfe9b 100644 --- a/_third_party/github.com/influxdb/influxdb/tsdb/store.go +++ b/_third_party/github.com/influxdb/influxdb/tsdb/store.go @@ -11,6 +11,7 @@ import ( "sync" "bosun.org/_third_party/github.com/influxdb/influxdb/influxql" + "bosun.org/_third_party/github.com/influxdb/influxdb/models" ) func NewStore(path string) *Store { @@ -37,6 +38,7 @@ type Store struct { EngineOptions EngineOptions Logger *log.Logger + closing chan struct{} } // Path returns the store's root path. @@ -67,6 +69,12 @@ func (s *Store) CreateShard(database, retentionPolicy string, shardID uint64) er s.mu.Lock() defer s.mu.Unlock() + select { + case <-s.closing: + return fmt.Errorf("closing") + default: + } + // shard already exists if _, ok := s.shards[shardID]; ok { return nil @@ -174,6 +182,17 @@ func (s *Store) DatabaseIndex(name string) *DatabaseIndex { return s.databaseIndexes[name] } +// Databases returns all the databases in the indexes +func (s *Store) Databases() []string { + s.mu.RLock() + defer s.mu.RUnlock() + databases := []string{} + for db := range s.databaseIndexes { + databases = append(databases, db) + } + return databases +} + func (s *Store) Measurement(database, name string) *Measurement { s.mu.RLock() db := s.databaseIndexes[database] @@ -184,6 +203,22 @@ func (s *Store) Measurement(database, name string) *Measurement { return db.Measurement(name) } +// DiskSize returns the size of all the shard files in bytes. This size does not include the WAL size. +func (s *Store) DiskSize() (int64, error) { + s.mu.RLock() + defer s.mu.RUnlock() + var size int64 + for _, shardID := range s.ShardIDs() { + shard := s.Shard(shardID) + sz, err := shard.DiskSize() + if err != nil { + return 0, err + } + size += sz + } + return size, nil +} + // deleteSeries loops through the local shards and deletes the series data and metadata for the passed in series keys func (s *Store) deleteSeries(keys []string) error { s.mu.RLock() @@ -270,6 +305,8 @@ func (s *Store) Open() error { s.mu.Lock() defer s.mu.Unlock() + s.closing = make(chan struct{}) + s.shards = map[uint64]*Shard{} s.databaseIndexes = map[string]*DatabaseIndex{} @@ -292,7 +329,7 @@ func (s *Store) Open() error { return nil } -func (s *Store) WriteToShard(shardID uint64, points []Point) error { +func (s *Store) WriteToShard(shardID uint64, points []models.Point) error { s.mu.RLock() defer s.mu.RUnlock() sh, ok := s.shards[shardID] @@ -305,18 +342,24 @@ func (s *Store) WriteToShard(shardID uint64, points []Point) error { func (s *Store) CreateMapper(shardID uint64, stmt influxql.Statement, chunkSize int) (Mapper, error) { shard := s.Shard(shardID) - if shard == nil { - // This can happen if the shard has been assigned, but hasn't actually been created yet. - return nil, nil - } - switch st := stmt.(type) { + switch stmt := stmt.(type) { case *influxql.SelectStatement: - return NewSelectMapper(shard, st, chunkSize), nil + if (stmt.IsRawQuery && !stmt.HasDistinct()) || stmt.IsSimpleDerivative() { + m := NewRawMapper(shard, stmt) + m.ChunkSize = chunkSize + return m, nil + } + return NewAggregateMapper(shard, stmt), nil + case *influxql.ShowMeasurementsStatement: - return NewShowMeasurementsMapper(shard, st, chunkSize), nil + m := NewShowMeasurementsMapper(shard, stmt) + m.ChunkSize = chunkSize + return m, nil + case *influxql.ShowTagKeysStatement: + return NewShowTagKeysMapper(shard, stmt, chunkSize), nil default: - return nil, fmt.Errorf("can't create mapper for statement type: %v", st) + return nil, fmt.Errorf("can't create mapper for statement type: %T", stmt) } } @@ -329,6 +372,10 @@ func (s *Store) Close() error { return err } } + if s.closing != nil { + close(s.closing) + } + s.closing = nil s.shards = nil s.databaseIndexes = nil diff --git a/_third_party/github.com/influxdb/influxdb/tsdb/store_test.go b/_third_party/github.com/influxdb/influxdb/tsdb/store_test.go index e8ea6f79d3..90423ab219 100644 --- a/_third_party/github.com/influxdb/influxdb/tsdb/store_test.go +++ b/_third_party/github.com/influxdb/influxdb/tsdb/store_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "bosun.org/_third_party/github.com/influxdb/influxdb/models" "bosun.org/_third_party/github.com/influxdb/influxdb/tsdb" ) @@ -236,7 +237,7 @@ func TestStoreEnsureSeriesPersistedInNewShards(t *testing.T) { t.Fatalf("error creating shard: %v", err) } - p, _ := tsdb.ParsePoints([]byte("cpu val=1")) + p, _ := models.ParsePoints([]byte("cpu val=1")) if err := s.WriteToShard(1, p); err != nil { t.Fatalf("error writing to shard: %v", err) } @@ -282,10 +283,10 @@ func benchmarkStoreOpen(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt, shardCnt int) // Generate test series (measurements + unique tag sets). series := genTestSeries(mCnt, tkCnt, tvCnt) // Generate point data to write to the shards. - points := []tsdb.Point{} + points := []models.Point{} for _, s := range series { for val := 0.0; val < float64(pntCnt); val++ { - p := tsdb.NewPoint(s.Measurement, s.Series.Tags, map[string]interface{}{"value": val}, time.Now()) + p := models.NewPoint(s.Measurement, s.Series.Tags, map[string]interface{}{"value": val}, time.Now()) points = append(points, p) } } @@ -324,7 +325,7 @@ func benchmarkStoreOpen(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt, shardCnt int) } } -func chunkedWriteStoreShard(store *tsdb.Store, shardID int, points []tsdb.Point) { +func chunkedWriteStoreShard(store *tsdb.Store, shardID int, points []models.Point) { nPts := len(points) chunkSz := 10000 start := 0 diff --git a/_third_party/github.com/jordan-wright/email/email.go b/_third_party/github.com/jordan-wright/email/email.go index 3c5b2651e0..9b4085eaac 100644 --- a/_third_party/github.com/jordan-wright/email/email.go +++ b/_third_party/github.com/jordan-wright/email/email.go @@ -3,6 +3,7 @@ package email import ( + "bufio" "bytes" "encoding/base64" "errors" @@ -10,6 +11,7 @@ import ( "io" "mime" "mime/multipart" + "mime/quotedprintable" "net/mail" "net/smtp" "net/textproto" @@ -24,6 +26,12 @@ const ( MaxLineLength = 76 ) +// ErrMissingBoundary is returned when there is no boundary given for a multipart entity +var ErrMissingBoundary = errors.New("No boundary found for multipart entity") + +// ErrMissingContentType is returned when there is no "Content-Type" header for a MIME entity +var ErrMissingContentType = errors.New("No Content-Type found for MIME entity") + // Email is the type used for email messages type Email struct { From string @@ -38,11 +46,117 @@ type Email struct { ReadReceipt []string } +// part is a copyable representation of a multipart.Part +type part struct { + header textproto.MIMEHeader + body []byte +} + // NewEmail creates an Email, and returns the pointer to it. func NewEmail() *Email { return &Email{Headers: textproto.MIMEHeader{}} } +// NewEmailFromReader reads a stream of bytes from an io.Reader, r, +// and returns an email struct containing the parsed data. +// This function expects the data in RFC 5322 format. +func NewEmailFromReader(r io.Reader) (*Email, error) { + e := NewEmail() + tp := textproto.NewReader(bufio.NewReader(r)) + // Parse the main headers + hdrs, err := tp.ReadMIMEHeader() + if err != nil { + return e, err + } + // Set the subject, to, cc, bcc, and from + for h, v := range hdrs { + switch { + case h == "Subject": + e.Subject = v[0] + delete(hdrs, h) + case h == "To": + e.To = v + delete(hdrs, h) + case h == "Cc": + e.Cc = v + delete(hdrs, h) + case h == "Bcc": + e.Bcc = v + delete(hdrs, h) + case h == "From": + e.From = v[0] + delete(hdrs, h) + } + } + e.Headers = hdrs + body := tp.R + // Recursively parse the MIME parts + ps, err := parseMIMEParts(e.Headers, body) + if err != nil { + return e, err + } + for _, p := range ps { + if ct := p.header.Get("Content-Type"); ct == "" { + return e, ErrMissingContentType + } + ct, _, err := mime.ParseMediaType(p.header.Get("Content-Type")) + if err != nil { + return e, err + } + switch { + case ct == "text/plain": + e.Text = p.body + case ct == "text/html": + e.HTML = p.body + } + } + return e, nil +} + +// parseMIMEParts will recursively walk a MIME entity and return a []mime.Part containing +// each (flattened) mime.Part found. +// It is important to note that there are no limits to the number of recursions, so be +// careful when parsing unknown MIME structures! +func parseMIMEParts(hs textproto.MIMEHeader, b io.Reader) ([]*part, error) { + var ps []*part + ct, params, err := mime.ParseMediaType(hs.Get("Content-Type")) + if err != nil { + return ps, err + } + if strings.HasPrefix(ct, "multipart/") { + if _, ok := params["boundary"]; !ok { + return ps, ErrMissingBoundary + } + mr := multipart.NewReader(b, params["boundary"]) + for { + var buf bytes.Buffer + p, err := mr.NextPart() + if err == io.EOF { + break + } + if err != nil { + return ps, err + } + subct, _, err := mime.ParseMediaType(p.Header.Get("Content-Type")) + if strings.HasPrefix(subct, "multipart/") { + sps, err := parseMIMEParts(p.Header, p) + if err != nil { + return ps, err + } + ps = append(ps, sps...) + } else { + // Otherwise, just append the part to the list + // Copy the part data into the buffer + if _, err := io.Copy(&buf, p); err != nil { + return ps, err + } + ps = append(ps, &part{body: buf.Bytes(), header: p.Header}) + } + } + } + return ps, nil +} + // Attach is used to attach content from an io.Reader to the email. // Required parameters include an io.Reader, the desired filename for the attachment, and the Content-Type // The function will return the created Attachment for reference, as well as nil for the error, if successful. @@ -156,8 +270,12 @@ func (e *Email) Bytes() ([]byte, error) { if _, err := subWriter.CreatePart(header); err != nil { return nil, err } + qp := quotedprintable.NewWriter(buff) // Write the text - if err := quotePrintEncode(buff, e.Text); err != nil { + if _, err := qp.Write(e.Text); err != nil { + return nil, err + } + if err := qp.Close(); err != nil { return nil, err } } @@ -167,8 +285,12 @@ func (e *Email) Bytes() ([]byte, error) { if _, err := subWriter.CreatePart(header); err != nil { return nil, err } - // Write the text - if err := quotePrintEncode(buff, e.HTML); err != nil { + qp := quotedprintable.NewWriter(buff) + // Write the HTML + if _, err := qp.Write(e.HTML); err != nil { + return nil, err + } + if err := qp.Close(); err != nil { return nil, err } } @@ -227,74 +349,6 @@ type Attachment struct { Content []byte } -// quotePrintEncode writes the quoted-printable text to the IO Writer (according to RFC 2045) -func quotePrintEncode(w io.Writer, body []byte) error { - var buf [3]byte - mc := 0 - for _, c := range body { - // We're assuming Unix style text formats as input (LF line break), and - // quoted-printable uses CRLF line breaks. (Literal CRs will become - // "=0D", but probably shouldn't be there to begin with!) - if c == '\n' { - io.WriteString(w, "\r\n") - mc = 0 - continue - } - - var nextOut []byte - if isPrintable[c] { - buf[0] = c - nextOut = buf[:1] - } else { - nextOut = buf[:] - qpEscape(nextOut, c) - } - - // Add a soft line break if the next (encoded) byte would push this line - // to or past the limit. - if mc+len(nextOut) >= MaxLineLength { - if _, err := io.WriteString(w, "=\r\n"); err != nil { - return err - } - mc = 0 - } - - if _, err := w.Write(nextOut); err != nil { - return err - } - mc += len(nextOut) - } - // No trailing end-of-line?? Soft line break, then. TODO: is this sane? - if mc > 0 { - io.WriteString(w, "=\r\n") - } - return nil -} - -// isPrintable holds true if the byte given is "printable" according to RFC 2045, false otherwise -var isPrintable [256]bool - -func init() { - for c := '!'; c <= '<'; c++ { - isPrintable[c] = true - } - for c := '>'; c <= '~'; c++ { - isPrintable[c] = true - } - isPrintable[' '] = true - isPrintable['\n'] = true - isPrintable['\t'] = true -} - -// qpEscape is a helper function for quotePrintEncode which escapes a -// non-printable byte. Expects len(dest) == 3. -func qpEscape(dest []byte, c byte) { - const nums = "0123456789ABCDEF" - dest[0] = '=' - dest[1] = nums[(c&0xf0)>>4] - dest[2] = nums[(c & 0xf)] -} - // base64Wrap encodes the attachment content, and wraps it according to RFC 2045 standards (every 76 chars) // The output is then written to the specified io.Writer func base64Wrap(w io.Writer, b []byte) { @@ -326,7 +380,13 @@ func headerToBytes(buff *bytes.Buffer, header textproto.MIMEHeader) { // bytes.Buffer.Write() never returns an error. io.WriteString(buff, field) io.WriteString(buff, ": ") - io.WriteString(buff, subval) + // Write the encoded header if needed + switch { + case field == "Content-Type" || field == "Content-Disposition": + buff.Write([]byte(subval)) + default: + buff.Write([]byte(mime.QEncoding.Encode("UTF-8", subval))) + } io.WriteString(buff, "\r\n") } } diff --git a/_third_party/github.com/llgcode/draw2d/draw2dimg/ftgc.go b/_third_party/github.com/llgcode/draw2d/draw2dimg/ftgc.go index ba3b9bb4ef..6fcb237924 100644 --- a/_third_party/github.com/llgcode/draw2d/draw2dimg/ftgc.go +++ b/_third_party/github.com/llgcode/draw2d/draw2dimg/ftgc.go @@ -17,8 +17,8 @@ import ( "bosun.org/_third_party/github.com/golang/freetype/raster" "bosun.org/_third_party/github.com/golang/freetype/truetype" - "golang.org/x/image/font" - "golang.org/x/image/math/fixed" + "bosun.org/_third_party/golang.org/x/image/font" + "bosun.org/_third_party/golang.org/x/image/math/fixed" ) // Painter implements the freetype raster.Painter and has a SetColor method like the RGBAPainter diff --git a/_third_party/github.com/llgcode/draw2d/draw2dimg/ftpath.go b/_third_party/github.com/llgcode/draw2d/draw2dimg/ftpath.go index 1e9ff92e29..fa1f88e3d0 100644 --- a/_third_party/github.com/llgcode/draw2d/draw2dimg/ftpath.go +++ b/_third_party/github.com/llgcode/draw2d/draw2dimg/ftpath.go @@ -5,7 +5,7 @@ package draw2dimg import ( "bosun.org/_third_party/github.com/golang/freetype/raster" - "golang.org/x/image/math/fixed" + "bosun.org/_third_party/golang.org/x/image/math/fixed" ) type FtLineBuilder struct { diff --git a/_third_party/github.com/llgcode/draw2d/draw2dimg/text.go b/_third_party/github.com/llgcode/draw2d/draw2dimg/text.go index 153b433aeb..e9cbc4a687 100644 --- a/_third_party/github.com/llgcode/draw2d/draw2dimg/text.go +++ b/_third_party/github.com/llgcode/draw2d/draw2dimg/text.go @@ -4,7 +4,7 @@ import ( "bosun.org/_third_party/github.com/golang/freetype/truetype" "bosun.org/_third_party/github.com/llgcode/draw2d" - "golang.org/x/image/math/fixed" + "bosun.org/_third_party/golang.org/x/image/math/fixed" ) // DrawContour draws the given closed contour at the given sub-pixel offset. diff --git a/_third_party/github.com/siddontang/go/bson/bson_test.go b/_third_party/github.com/siddontang/go/bson/bson_test.go index 3d97998503..8ead8216ca 100644 --- a/_third_party/github.com/siddontang/go/bson/bson_test.go +++ b/_third_party/github.com/siddontang/go/bson/bson_test.go @@ -36,8 +36,8 @@ import ( "testing" "time" - . "gopkg.in/check.v1" - "gopkg.in/mgo.v2/bson" + . "bosun.org/_third_party/gopkg.in/check.v1" + "bosun.org/_third_party/gopkg.in/mgo.v2/bson" ) func TestAll(t *testing.T) { diff --git a/_third_party/github.com/tatsushid/go-fastping/fastping.go b/_third_party/github.com/tatsushid/go-fastping/fastping.go index 96950ca012..0670760fd0 100644 --- a/_third_party/github.com/tatsushid/go-fastping/fastping.go +++ b/_third_party/github.com/tatsushid/go-fastping/fastping.go @@ -47,9 +47,9 @@ import ( "syscall" "time" - "golang.org/x/net/icmp" - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" + "bosun.org/_third_party/golang.org/x/net/icmp" + "bosun.org/_third_party/golang.org/x/net/ipv4" + "bosun.org/_third_party/golang.org/x/net/ipv6" ) const ( diff --git a/_third_party/github.com/vdobler/chart/imgg/image.go b/_third_party/github.com/vdobler/chart/imgg/image.go index a47a768e27..21a2c8b8bf 100644 --- a/_third_party/github.com/vdobler/chart/imgg/image.go +++ b/_third_party/github.com/vdobler/chart/imgg/image.go @@ -11,9 +11,9 @@ import ( "bosun.org/_third_party/github.com/llgcode/draw2d" "bosun.org/_third_party/github.com/llgcode/draw2d/draw2dimg" "bosun.org/_third_party/github.com/vdobler/chart" - "golang.org/x/image/draw" - "golang.org/x/image/math/f64" - "golang.org/x/image/math/fixed" + "bosun.org/_third_party/golang.org/x/image/draw" + "bosun.org/_third_party/golang.org/x/image/math/f64" + "bosun.org/_third_party/golang.org/x/image/math/fixed" ) var ( diff --git a/_third_party/golang.org/x/crypto/bcrypt/bcrypt.go b/_third_party/golang.org/x/crypto/bcrypt/bcrypt.go index 2bb0445e31..235585a0dc 100644 --- a/_third_party/golang.org/x/crypto/bcrypt/bcrypt.go +++ b/_third_party/golang.org/x/crypto/bcrypt/bcrypt.go @@ -8,11 +8,11 @@ package bcrypt // import "bosun.org/_third_party/golang.org/x/crypto/bcrypt" // The code is a port of Provos and Mazières's C implementation. import ( + "bosun.org/_third_party/golang.org/x/crypto/blowfish" "crypto/rand" "crypto/subtle" "errors" "fmt" - "golang.org/x/crypto/blowfish" "io" "strconv" ) diff --git a/_third_party/golang.org/x/image/draw/draw.go b/_third_party/golang.org/x/image/draw/draw.go new file mode 100644 index 0000000000..b92e3c7f96 --- /dev/null +++ b/_third_party/golang.org/x/image/draw/draw.go @@ -0,0 +1,79 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package draw provides image composition functions. +// +// See "The Go image/draw package" for an introduction to this package: +// http://golang.org/doc/articles/image_draw.html +// +// This package is a superset of and a drop-in replacement for the image/draw +// package in the standard library. +package draw + +// This file just contains the API exported by the image/draw package in the +// standard library. Other files in this package provide additional features. + +import ( + "image" + "image/color" + "image/draw" +) + +// Draw calls DrawMask with a nil mask. +func Draw(dst Image, r image.Rectangle, src image.Image, sp image.Point, op Op) { + draw.Draw(dst, r, src, sp, draw.Op(op)) +} + +// DrawMask aligns r.Min in dst with sp in src and mp in mask and then +// replaces the rectangle r in dst with the result of a Porter-Duff +// composition. A nil mask is treated as opaque. +func DrawMask(dst Image, r image.Rectangle, src image.Image, sp image.Point, mask image.Image, mp image.Point, op Op) { + draw.DrawMask(dst, r, src, sp, mask, mp, draw.Op(op)) +} + +// Drawer contains the Draw method. +type Drawer interface { + // Draw aligns r.Min in dst with sp in src and then replaces the + // rectangle r in dst with the result of drawing src on dst. + Draw(dst Image, r image.Rectangle, src image.Image, sp image.Point) +} + +// FloydSteinberg is a Drawer that is the Src Op with Floyd-Steinberg error +// diffusion. +var FloydSteinberg Drawer = floydSteinberg{} + +type floydSteinberg struct{} + +func (floydSteinberg) Draw(dst Image, r image.Rectangle, src image.Image, sp image.Point) { + draw.FloydSteinberg.Draw(dst, r, src, sp) +} + +// Image is an image.Image with a Set method to change a single pixel. +type Image interface { + image.Image + Set(x, y int, c color.Color) +} + +// Op is a Porter-Duff compositing operator. +type Op int + +const ( + // Over specifies ``(src in mask) over dst''. + Over Op = Op(draw.Over) + // Src specifies ``src in mask''. + Src Op = Op(draw.Src) +) + +// Draw implements the Drawer interface by calling the Draw function with +// this Op. +func (op Op) Draw(dst Image, r image.Rectangle, src image.Image, sp image.Point) { + (draw.Op(op)).Draw(dst, r, src, sp) +} + +// Quantizer produces a palette for an image. +type Quantizer interface { + // Quantize appends up to cap(p) - len(p) colors to p and returns the + // updated palette suitable for converting m to a paletted image. + Quantize(p color.Palette, m image.Image) color.Palette +} diff --git a/_third_party/golang.org/x/image/draw/example_test.go b/_third_party/golang.org/x/image/draw/example_test.go new file mode 100644 index 0000000000..978282cfff --- /dev/null +++ b/_third_party/golang.org/x/image/draw/example_test.go @@ -0,0 +1,118 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package draw_test + +import ( + "fmt" + "image" + "image/color" + "image/png" + "log" + "math" + "os" + + "bosun.org/_third_party/golang.org/x/image/draw" + "bosun.org/_third_party/golang.org/x/image/math/f64" +) + +func ExampleDraw() { + fSrc, err := os.Open("../testdata/blue-purple-pink.png") + if err != nil { + log.Fatal(err) + } + defer fSrc.Close() + src, err := png.Decode(fSrc) + if err != nil { + log.Fatal(err) + } + + dst := image.NewRGBA(image.Rect(0, 0, 400, 300)) + green := image.NewUniform(color.RGBA{0x00, 0x1f, 0x00, 0xff}) + draw.Copy(dst, image.Point{}, green, dst.Bounds(), draw.Src, nil) + qs := []draw.Interpolator{ + draw.NearestNeighbor, + draw.ApproxBiLinear, + draw.CatmullRom, + } + const cos60, sin60 = 0.5, 0.866025404 + t := f64.Aff3{ + +2 * cos60, -2 * sin60, 100, + +2 * sin60, +2 * cos60, 100, + } + + draw.Copy(dst, image.Point{20, 30}, src, src.Bounds(), draw.Over, nil) + for i, q := range qs { + q.Scale(dst, image.Rect(200+10*i, 100*i, 600+10*i, 150+100*i), src, src.Bounds(), draw.Over, nil) + } + draw.NearestNeighbor.Transform(dst, t, src, src.Bounds(), draw.Over, nil) + + red := image.NewNRGBA(image.Rect(0, 0, 16, 16)) + for y := 0; y < 16; y++ { + for x := 0; x < 16; x++ { + red.SetNRGBA(x, y, color.NRGBA{ + R: uint8(x * 0x11), + A: uint8(y * 0x11), + }) + } + } + red.SetNRGBA(0, 0, color.NRGBA{0xff, 0xff, 0x00, 0xff}) + red.SetNRGBA(15, 15, color.NRGBA{0xff, 0xff, 0x00, 0xff}) + + ops := []draw.Op{ + draw.Over, + draw.Src, + } + for i, op := range ops { + dr := image.Rect(120+10*i, 150+60*i, 170+10*i, 200+60*i) + draw.NearestNeighbor.Scale(dst, dr, red, red.Bounds(), op, nil) + t := f64.Aff3{ + +cos60, -sin60, float64(190 + 10*i), + +sin60, +cos60, float64(140 + 50*i), + } + draw.NearestNeighbor.Transform(dst, t, red, red.Bounds(), op, nil) + } + + dr := image.Rect(0, 0, 128, 128) + checkerboard := image.NewAlpha(dr) + for y := dr.Min.Y; y < dr.Max.Y; y++ { + for x := dr.Min.X; x < dr.Max.X; x++ { + if (x/20)%2 == (y/20)%2 { + checkerboard.SetAlpha(x, y, color.Alpha{0xff}) + } + } + } + sr := image.Rect(0, 0, 16, 16) + circle := image.NewAlpha(sr) + for y := sr.Min.Y; y < sr.Max.Y; y++ { + for x := sr.Min.X; x < sr.Max.X; x++ { + dx, dy := x-10, y-8 + if d := 32 * math.Sqrt(float64(dx*dx)+float64(dy*dy)); d < 0xff { + circle.SetAlpha(x, y, color.Alpha{0xff - uint8(d)}) + } + } + } + cyan := image.NewUniform(color.RGBA{0x00, 0xff, 0xff, 0xff}) + draw.NearestNeighbor.Scale(dst, dr, cyan, sr, draw.Over, &draw.Options{ + DstMask: checkerboard, + SrcMask: circle, + }) + + // Change false to true to write the resultant image to disk. + if false { + fDst, err := os.Create("out.png") + if err != nil { + log.Fatal(err) + } + defer fDst.Close() + err = png.Encode(fDst, dst) + if err != nil { + log.Fatal(err) + } + } + + fmt.Printf("dst has bounds %v.\n", dst.Bounds()) + // Output: + // dst has bounds (0,0)-(400,300). +} diff --git a/_third_party/golang.org/x/image/draw/gen.go b/_third_party/golang.org/x/image/draw/gen.go new file mode 100644 index 0000000000..0fed47437f --- /dev/null +++ b/_third_party/golang.org/x/image/draw/gen.go @@ -0,0 +1,1403 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "log" + "os" + "strings" +) + +var debug = flag.Bool("debug", false, "") + +func main() { + flag.Parse() + + w := new(bytes.Buffer) + w.WriteString("// generated by \"go run gen.go\". DO NOT EDIT.\n\n" + + "package draw\n\nimport (\n" + + "\"image\"\n" + + "\"image/color\"\n" + + "\"math\"\n" + + "\n" + + "\"golang.org/x/image/math/f64\"\n" + + ")\n") + + gen(w, "nnInterpolator", codeNNScaleLeaf, codeNNTransformLeaf) + gen(w, "ablInterpolator", codeABLScaleLeaf, codeABLTransformLeaf) + genKernel(w) + + if *debug { + os.Stdout.Write(w.Bytes()) + return + } + out, err := format.Source(w.Bytes()) + if err != nil { + log.Fatal(err) + } + if err := ioutil.WriteFile("impl.go", out, 0660); err != nil { + log.Fatal(err) + } +} + +var ( + // dsTypes are the (dst image type, src image type) pairs to generate + // scale_DType_SType implementations for. The last element in the slice + // should be the fallback pair ("Image", "image.Image"). + // + // TODO: add *image.CMYK src type after Go 1.5 is released. + // An *image.CMYK is also alwaysOpaque. + dsTypes = []struct{ dType, sType string }{ + {"*image.RGBA", "*image.Gray"}, + {"*image.RGBA", "*image.NRGBA"}, + {"*image.RGBA", "*image.RGBA"}, + {"*image.RGBA", "*image.YCbCr"}, + {"*image.RGBA", "image.Image"}, + {"Image", "image.Image"}, + } + dTypes, sTypes []string + sTypesForDType = map[string][]string{} + subsampleRatios = []string{ + "444", + "422", + "420", + "440", + } + ops = []string{"Over", "Src"} + // alwaysOpaque are those image.Image implementations that are always + // opaque. For these types, Over is equivalent to the faster Src, in the + // absence of a source mask. + alwaysOpaque = map[string]bool{ + "*image.Gray": true, + "*image.YCbCr": true, + } +) + +func init() { + dTypesSeen := map[string]bool{} + sTypesSeen := map[string]bool{} + for _, t := range dsTypes { + if !sTypesSeen[t.sType] { + sTypesSeen[t.sType] = true + sTypes = append(sTypes, t.sType) + } + if !dTypesSeen[t.dType] { + dTypesSeen[t.dType] = true + dTypes = append(dTypes, t.dType) + } + sTypesForDType[t.dType] = append(sTypesForDType[t.dType], t.sType) + } + sTypesForDType["anyDType"] = sTypes +} + +type data struct { + dType string + sType string + sratio string + receiver string + op string +} + +func gen(w *bytes.Buffer, receiver string, codes ...string) { + expn(w, codeRoot, &data{receiver: receiver}) + for _, code := range codes { + for _, t := range dsTypes { + for _, op := range ops { + if op == "Over" && alwaysOpaque[t.sType] { + continue + } + expn(w, code, &data{ + dType: t.dType, + sType: t.sType, + receiver: receiver, + op: op, + }) + } + } + } +} + +func genKernel(w *bytes.Buffer) { + expn(w, codeKernelRoot, &data{}) + for _, sType := range sTypes { + expn(w, codeKernelScaleLeafX, &data{ + sType: sType, + }) + } + for _, dType := range dTypes { + for _, op := range ops { + expn(w, codeKernelScaleLeafY, &data{ + dType: dType, + op: op, + }) + } + } + for _, t := range dsTypes { + for _, op := range ops { + if op == "Over" && alwaysOpaque[t.sType] { + continue + } + expn(w, codeKernelTransformLeaf, &data{ + dType: t.dType, + sType: t.sType, + op: op, + }) + } + } +} + +func expn(w *bytes.Buffer, code string, d *data) { + if d.sType == "*image.YCbCr" && d.sratio == "" { + for _, sratio := range subsampleRatios { + e := *d + e.sratio = sratio + expn(w, code, &e) + } + return + } + + for _, line := range strings.Split(code, "\n") { + line = expnLine(line, d) + if line == ";" { + continue + } + fmt.Fprintln(w, line) + } +} + +func expnLine(line string, d *data) string { + for { + i := strings.IndexByte(line, '$') + if i < 0 { + break + } + prefix, s := line[:i], line[i+1:] + + i = len(s) + for j, c := range s { + if !('A' <= c && c <= 'Z' || 'a' <= c && c <= 'z') { + i = j + break + } + } + dollar, suffix := s[:i], s[i:] + + e := expnDollar(prefix, dollar, suffix, d) + if e == "" { + log.Fatalf("couldn't expand %q", line) + } + line = e + } + return line +} + +// expnDollar expands a "$foo" fragment in a line of generated code. It returns +// the empty string if there was a problem. It returns ";" if the generated +// code is a no-op. +func expnDollar(prefix, dollar, suffix string, d *data) string { + switch dollar { + case "dType": + return prefix + d.dType + suffix + case "dTypeRN": + return prefix + relName(d.dType) + suffix + case "sratio": + return prefix + d.sratio + suffix + case "sType": + return prefix + d.sType + suffix + case "sTypeRN": + return prefix + relName(d.sType) + suffix + case "receiver": + return prefix + d.receiver + suffix + case "op": + return prefix + d.op + suffix + + case "switch": + return expnSwitch("", "", true, suffix) + case "switchD": + return expnSwitch("", "", false, suffix) + case "switchS": + return expnSwitch("", "anyDType", false, suffix) + + case "preOuter": + switch d.dType { + default: + return ";" + case "Image": + s := "" + if d.sType == "image.Image" { + s = "srcMask, smp := opts.SrcMask, opts.SrcMaskP\n" + } + return s + + "dstMask, dmp := opts.DstMask, opts.DstMaskP\n" + + "dstColorRGBA64 := &color.RGBA64{}\n" + + "dstColor := color.Color(dstColorRGBA64)" + } + + case "preInner": + switch d.dType { + default: + return ";" + case "*image.RGBA": + return "d := " + pixOffset("dst", "dr.Min.X+adr.Min.X", "dr.Min.Y+int(dy)", "*4", "*dst.Stride") + } + + case "preKernelOuter": + switch d.sType { + default: + return ";" + case "image.Image": + return "srcMask, smp := opts.SrcMask, opts.SrcMaskP" + } + + case "preKernelInner": + switch d.dType { + default: + return ";" + case "*image.RGBA": + return "d := " + pixOffset("dst", "dr.Min.X+int(dx)", "dr.Min.Y+adr.Min.Y", "*4", "*dst.Stride") + } + + case "blend": + args, _ := splitArgs(suffix) + if len(args) != 4 { + return "" + } + switch d.sType { + default: + return argf(args, ""+ + "$3r = $0*$1r + $2*$3r\n"+ + "$3g = $0*$1g + $2*$3g\n"+ + "$3b = $0*$1b + $2*$3b\n"+ + "$3a = $0*$1a + $2*$3a", + ) + case "*image.Gray": + return argf(args, ""+ + "$3r = $0*$1r + $2*$3r", + ) + case "*image.YCbCr": + return argf(args, ""+ + "$3r = $0*$1r + $2*$3r\n"+ + "$3g = $0*$1g + $2*$3g\n"+ + "$3b = $0*$1b + $2*$3b", + ) + } + + case "clampToAlpha": + if alwaysOpaque[d.sType] { + return ";" + } + // Go uses alpha-premultiplied color. The naive computation can lead to + // invalid colors, e.g. red > alpha, when some weights are negative. + return ` + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + ` + + case "convFtou": + args, _ := splitArgs(suffix) + if len(args) != 2 { + return "" + } + + switch d.sType { + default: + return argf(args, ""+ + "$0r := uint32($1r)\n"+ + "$0g := uint32($1g)\n"+ + "$0b := uint32($1b)\n"+ + "$0a := uint32($1a)", + ) + case "*image.Gray": + return argf(args, ""+ + "$0r := uint32($1r)", + ) + case "*image.YCbCr": + return argf(args, ""+ + "$0r := uint32($1r)\n"+ + "$0g := uint32($1g)\n"+ + "$0b := uint32($1b)", + ) + } + + case "outputu": + args, _ := splitArgs(suffix) + if len(args) != 3 { + return "" + } + + switch d.op { + case "Over": + switch d.dType { + default: + log.Fatalf("bad dType %q", d.dType) + case "Image": + return argf(args, ""+ + "qr, qg, qb, qa := dst.At($0, $1).RGBA()\n"+ + "if dstMask != nil {\n"+ + " _, _, _, ma := dstMask.At(dmp.X + $0, dmp.Y + $1).RGBA()\n"+ + " $2r = $2r * ma / 0xffff\n"+ + " $2g = $2g * ma / 0xffff\n"+ + " $2b = $2b * ma / 0xffff\n"+ + " $2a = $2a * ma / 0xffff\n"+ + "}\n"+ + "$2a1 := 0xffff - $2a\n"+ + "dstColorRGBA64.R = uint16(qr*$2a1/0xffff + $2r)\n"+ + "dstColorRGBA64.G = uint16(qg*$2a1/0xffff + $2g)\n"+ + "dstColorRGBA64.B = uint16(qb*$2a1/0xffff + $2b)\n"+ + "dstColorRGBA64.A = uint16(qa*$2a1/0xffff + $2a)\n"+ + "dst.Set($0, $1, dstColor)", + ) + case "*image.RGBA": + return argf(args, ""+ + "$2a1 := (0xffff - $2a) * 0x101\n"+ + "dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*$2a1/0xffff + $2r) >> 8)\n"+ + "dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*$2a1/0xffff + $2g) >> 8)\n"+ + "dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*$2a1/0xffff + $2b) >> 8)\n"+ + "dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*$2a1/0xffff + $2a) >> 8)", + ) + } + + case "Src": + switch d.dType { + default: + log.Fatalf("bad dType %q", d.dType) + case "Image": + return argf(args, ""+ + "if dstMask != nil {\n"+ + " qr, qg, qb, qa := dst.At($0, $1).RGBA()\n"+ + " _, _, _, ma := dstMask.At(dmp.X + $0, dmp.Y + $1).RGBA()\n"+ + " pr = pr * ma / 0xffff\n"+ + " pg = pg * ma / 0xffff\n"+ + " pb = pb * ma / 0xffff\n"+ + " pa = pa * ma / 0xffff\n"+ + " $2a1 := 0xffff - ma\n"+ // Note that this is ma, not $2a. + " dstColorRGBA64.R = uint16(qr*$2a1/0xffff + $2r)\n"+ + " dstColorRGBA64.G = uint16(qg*$2a1/0xffff + $2g)\n"+ + " dstColorRGBA64.B = uint16(qb*$2a1/0xffff + $2b)\n"+ + " dstColorRGBA64.A = uint16(qa*$2a1/0xffff + $2a)\n"+ + " dst.Set($0, $1, dstColor)\n"+ + "} else {\n"+ + " dstColorRGBA64.R = uint16($2r)\n"+ + " dstColorRGBA64.G = uint16($2g)\n"+ + " dstColorRGBA64.B = uint16($2b)\n"+ + " dstColorRGBA64.A = uint16($2a)\n"+ + " dst.Set($0, $1, dstColor)\n"+ + "}", + ) + case "*image.RGBA": + switch d.sType { + default: + return argf(args, ""+ + "dst.Pix[d+0] = uint8($2r >> 8)\n"+ + "dst.Pix[d+1] = uint8($2g >> 8)\n"+ + "dst.Pix[d+2] = uint8($2b >> 8)\n"+ + "dst.Pix[d+3] = uint8($2a >> 8)", + ) + case "*image.Gray": + return argf(args, ""+ + "out := uint8($2r >> 8)\n"+ + "dst.Pix[d+0] = out\n"+ + "dst.Pix[d+1] = out\n"+ + "dst.Pix[d+2] = out\n"+ + "dst.Pix[d+3] = 0xff", + ) + case "*image.YCbCr": + return argf(args, ""+ + "dst.Pix[d+0] = uint8($2r >> 8)\n"+ + "dst.Pix[d+1] = uint8($2g >> 8)\n"+ + "dst.Pix[d+2] = uint8($2b >> 8)\n"+ + "dst.Pix[d+3] = 0xff", + ) + } + } + } + + case "outputf": + args, _ := splitArgs(suffix) + if len(args) != 5 { + return "" + } + ret := "" + + switch d.op { + case "Over": + switch d.dType { + default: + log.Fatalf("bad dType %q", d.dType) + case "Image": + ret = argf(args, ""+ + "qr, qg, qb, qa := dst.At($0, $1).RGBA()\n"+ + "$3r0 := uint32($2($3r * $4))\n"+ + "$3g0 := uint32($2($3g * $4))\n"+ + "$3b0 := uint32($2($3b * $4))\n"+ + "$3a0 := uint32($2($3a * $4))\n"+ + "if dstMask != nil {\n"+ + " _, _, _, ma := dstMask.At(dmp.X + $0, dmp.Y + $1).RGBA()\n"+ + " $3r0 = $3r0 * ma / 0xffff\n"+ + " $3g0 = $3g0 * ma / 0xffff\n"+ + " $3b0 = $3b0 * ma / 0xffff\n"+ + " $3a0 = $3a0 * ma / 0xffff\n"+ + "}\n"+ + "$3a1 := 0xffff - $3a0\n"+ + "dstColorRGBA64.R = uint16(qr*$3a1/0xffff + $3r0)\n"+ + "dstColorRGBA64.G = uint16(qg*$3a1/0xffff + $3g0)\n"+ + "dstColorRGBA64.B = uint16(qb*$3a1/0xffff + $3b0)\n"+ + "dstColorRGBA64.A = uint16(qa*$3a1/0xffff + $3a0)\n"+ + "dst.Set($0, $1, dstColor)", + ) + case "*image.RGBA": + ret = argf(args, ""+ + "$3r0 := uint32($2($3r * $4))\n"+ + "$3g0 := uint32($2($3g * $4))\n"+ + "$3b0 := uint32($2($3b * $4))\n"+ + "$3a0 := uint32($2($3a * $4))\n"+ + "$3a1 := (0xffff - uint32($3a0)) * 0x101\n"+ + "dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*$3a1/0xffff + $3r0) >> 8)\n"+ + "dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*$3a1/0xffff + $3g0) >> 8)\n"+ + "dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*$3a1/0xffff + $3b0) >> 8)\n"+ + "dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*$3a1/0xffff + $3a0) >> 8)", + ) + } + + case "Src": + switch d.dType { + default: + log.Fatalf("bad dType %q", d.dType) + case "Image": + ret = argf(args, ""+ + "if dstMask != nil {\n"+ + " qr, qg, qb, qa := dst.At($0, $1).RGBA()\n"+ + " _, _, _, ma := dstMask.At(dmp.X + $0, dmp.Y + $1).RGBA()\n"+ + " pr := uint32($2($3r * $4)) * ma / 0xffff\n"+ + " pg := uint32($2($3g * $4)) * ma / 0xffff\n"+ + " pb := uint32($2($3b * $4)) * ma / 0xffff\n"+ + " pa := uint32($2($3a * $4)) * ma / 0xffff\n"+ + " pa1 := 0xffff - ma\n"+ // Note that this is ma, not pa. + " dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr)\n"+ + " dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg)\n"+ + " dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb)\n"+ + " dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa)\n"+ + " dst.Set($0, $1, dstColor)\n"+ + "} else {\n"+ + " dstColorRGBA64.R = $2($3r * $4)\n"+ + " dstColorRGBA64.G = $2($3g * $4)\n"+ + " dstColorRGBA64.B = $2($3b * $4)\n"+ + " dstColorRGBA64.A = $2($3a * $4)\n"+ + " dst.Set($0, $1, dstColor)\n"+ + "}", + ) + case "*image.RGBA": + switch d.sType { + default: + ret = argf(args, ""+ + "dst.Pix[d+0] = uint8($2($3r * $4) >> 8)\n"+ + "dst.Pix[d+1] = uint8($2($3g * $4) >> 8)\n"+ + "dst.Pix[d+2] = uint8($2($3b * $4) >> 8)\n"+ + "dst.Pix[d+3] = uint8($2($3a * $4) >> 8)", + ) + case "*image.Gray": + ret = argf(args, ""+ + "out := uint8($2($3r * $4) >> 8)\n"+ + "dst.Pix[d+0] = out\n"+ + "dst.Pix[d+1] = out\n"+ + "dst.Pix[d+2] = out\n"+ + "dst.Pix[d+3] = 0xff", + ) + case "*image.YCbCr": + ret = argf(args, ""+ + "dst.Pix[d+0] = uint8($2($3r * $4) >> 8)\n"+ + "dst.Pix[d+1] = uint8($2($3g * $4) >> 8)\n"+ + "dst.Pix[d+2] = uint8($2($3b * $4) >> 8)\n"+ + "dst.Pix[d+3] = 0xff", + ) + } + } + } + + return strings.Replace(ret, " * 1)", ")", -1) + + case "srcf", "srcu": + lhs, eqOp := splitEq(prefix) + if lhs == "" { + return "" + } + args, extra := splitArgs(suffix) + if len(args) != 2 { + return "" + } + + tmp := "" + if dollar == "srcf" { + tmp = "u" + } + + // TODO: there's no need to multiply by 0x101 in the switch below if + // the next thing we're going to do is shift right by 8. + + buf := new(bytes.Buffer) + switch d.sType { + default: + log.Fatalf("bad sType %q", d.sType) + case "image.Image": + fmt.Fprintf(buf, ""+ + "%sr%s, %sg%s, %sb%s, %sa%s := src.At(%s, %s).RGBA()\n", + lhs, tmp, lhs, tmp, lhs, tmp, lhs, tmp, args[0], args[1], + ) + if d.dType == "" || d.dType == "Image" { + fmt.Fprintf(buf, ""+ + "if srcMask != nil {\n"+ + " _, _, _, ma := srcMask.At(smp.X+%s, smp.Y+%s).RGBA()\n"+ + " %sr%s = %sr%s * ma / 0xffff\n"+ + " %sg%s = %sg%s * ma / 0xffff\n"+ + " %sb%s = %sb%s * ma / 0xffff\n"+ + " %sa%s = %sa%s * ma / 0xffff\n"+ + "}\n", + args[0], args[1], + lhs, tmp, lhs, tmp, + lhs, tmp, lhs, tmp, + lhs, tmp, lhs, tmp, + lhs, tmp, lhs, tmp, + ) + } + case "*image.Gray": + fmt.Fprintf(buf, ""+ + "%si := %s\n"+ + "%sr%s := uint32(src.Pix[%si]) * 0x101\n", + lhs, pixOffset("src", args[0], args[1], "", "*src.Stride"), + lhs, tmp, lhs, + ) + case "*image.NRGBA": + fmt.Fprintf(buf, ""+ + "%si := %s\n"+ + "%sa%s := uint32(src.Pix[%si+3]) * 0x101\n"+ + "%sr%s := uint32(src.Pix[%si+0]) * %sa%s / 0xff\n"+ + "%sg%s := uint32(src.Pix[%si+1]) * %sa%s / 0xff\n"+ + "%sb%s := uint32(src.Pix[%si+2]) * %sa%s / 0xff\n", + lhs, pixOffset("src", args[0], args[1], "*4", "*src.Stride"), + lhs, tmp, lhs, + lhs, tmp, lhs, lhs, tmp, + lhs, tmp, lhs, lhs, tmp, + lhs, tmp, lhs, lhs, tmp, + ) + case "*image.RGBA": + fmt.Fprintf(buf, ""+ + "%si := %s\n"+ + "%sr%s := uint32(src.Pix[%si+0]) * 0x101\n"+ + "%sg%s := uint32(src.Pix[%si+1]) * 0x101\n"+ + "%sb%s := uint32(src.Pix[%si+2]) * 0x101\n"+ + "%sa%s := uint32(src.Pix[%si+3]) * 0x101\n", + lhs, pixOffset("src", args[0], args[1], "*4", "*src.Stride"), + lhs, tmp, lhs, + lhs, tmp, lhs, + lhs, tmp, lhs, + lhs, tmp, lhs, + ) + case "*image.YCbCr": + fmt.Fprintf(buf, ""+ + "%si := %s\n"+ + "%sj := %s\n"+ + "%s\n", + lhs, pixOffset("src", args[0], args[1], "", "*src.YStride"), + lhs, cOffset(args[0], args[1], d.sratio), + ycbcrToRGB(lhs, tmp), + ) + } + + if dollar == "srcf" { + switch d.sType { + default: + fmt.Fprintf(buf, ""+ + "%sr %s float64(%sru)%s\n"+ + "%sg %s float64(%sgu)%s\n"+ + "%sb %s float64(%sbu)%s\n"+ + "%sa %s float64(%sau)%s\n", + lhs, eqOp, lhs, extra, + lhs, eqOp, lhs, extra, + lhs, eqOp, lhs, extra, + lhs, eqOp, lhs, extra, + ) + case "*image.Gray": + fmt.Fprintf(buf, ""+ + "%sr %s float64(%sru)%s\n", + lhs, eqOp, lhs, extra, + ) + case "*image.YCbCr": + fmt.Fprintf(buf, ""+ + "%sr %s float64(%sru)%s\n"+ + "%sg %s float64(%sgu)%s\n"+ + "%sb %s float64(%sbu)%s\n", + lhs, eqOp, lhs, extra, + lhs, eqOp, lhs, extra, + lhs, eqOp, lhs, extra, + ) + } + } + + return strings.TrimSpace(buf.String()) + + case "tweakD": + if d.dType == "*image.RGBA" { + return "d += dst.Stride" + } + return ";" + + case "tweakDx": + if d.dType == "*image.RGBA" { + return strings.Replace(prefix, "dx++", "dx, d = dx+1, d+4", 1) + } + return prefix + + case "tweakDy": + if d.dType == "*image.RGBA" { + return strings.Replace(prefix, "for dy, s", "for _, s", 1) + } + return prefix + + case "tweakP": + switch d.sType { + case "*image.Gray": + if strings.HasPrefix(strings.TrimSpace(prefix), "pa * ") { + return "1," + } + return "pr," + case "*image.YCbCr": + if strings.HasPrefix(strings.TrimSpace(prefix), "pa * ") { + return "1," + } + } + return prefix + + case "tweakPr": + if d.sType == "*image.Gray" { + return "pr *= s.invTotalWeightFFFF" + } + return ";" + + case "tweakVarP": + switch d.sType { + case "*image.Gray": + return strings.Replace(prefix, "var pr, pg, pb, pa", "var pr", 1) + case "*image.YCbCr": + return strings.Replace(prefix, "var pr, pg, pb, pa", "var pr, pg, pb", 1) + } + return prefix + } + return "" +} + +func expnSwitch(op, dType string, expandBoth bool, template string) string { + if op == "" && dType != "anyDType" { + lines := []string{"switch op {"} + for _, op = range ops { + lines = append(lines, + fmt.Sprintf("case %s:", op), + expnSwitch(op, dType, expandBoth, template), + ) + } + lines = append(lines, "}") + return strings.Join(lines, "\n") + } + + switchVar := "dst" + if dType != "" { + switchVar = "src" + } + lines := []string{fmt.Sprintf("switch %s := %s.(type) {", switchVar, switchVar)} + + fallback, values := "Image", dTypes + if dType != "" { + fallback, values = "image.Image", sTypesForDType[dType] + } + for _, v := range values { + if dType != "" { + // v is the sType. Skip those always-opaque sTypes, where Over is + // equivalent to Src. + if op == "Over" && alwaysOpaque[v] { + continue + } + } + + if v == fallback { + lines = append(lines, "default:") + } else { + lines = append(lines, fmt.Sprintf("case %s:", v)) + } + + if dType != "" { + if v == "*image.YCbCr" { + lines = append(lines, expnSwitchYCbCr(op, dType, template)) + } else { + lines = append(lines, expnLine(template, &data{dType: dType, sType: v, op: op})) + } + } else if !expandBoth { + lines = append(lines, expnLine(template, &data{dType: v, op: op})) + } else { + lines = append(lines, expnSwitch(op, v, false, template)) + } + } + + lines = append(lines, "}") + return strings.Join(lines, "\n") +} + +func expnSwitchYCbCr(op, dType, template string) string { + lines := []string{ + "switch src.SubsampleRatio {", + "default:", + expnLine(template, &data{dType: dType, sType: "image.Image", op: op}), + } + for _, sratio := range subsampleRatios { + lines = append(lines, + fmt.Sprintf("case image.YCbCrSubsampleRatio%s:", sratio), + expnLine(template, &data{dType: dType, sType: "*image.YCbCr", sratio: sratio, op: op}), + ) + } + lines = append(lines, "}") + return strings.Join(lines, "\n") +} + +func argf(args []string, s string) string { + if len(args) > 9 { + panic("too many args") + } + for i, a := range args { + old := fmt.Sprintf("$%d", i) + s = strings.Replace(s, old, a, -1) + } + return s +} + +func pixOffset(m, x, y, xstride, ystride string) string { + return fmt.Sprintf("(%s-%s.Rect.Min.Y)%s + (%s-%s.Rect.Min.X)%s", y, m, ystride, x, m, xstride) +} + +func cOffset(x, y, sratio string) string { + switch sratio { + case "444": + return fmt.Sprintf("( %s - src.Rect.Min.Y )*src.CStride + ( %s - src.Rect.Min.X )", y, x) + case "422": + return fmt.Sprintf("( %s - src.Rect.Min.Y )*src.CStride + ((%s)/2 - src.Rect.Min.X/2)", y, x) + case "420": + return fmt.Sprintf("((%s)/2 - src.Rect.Min.Y/2)*src.CStride + ((%s)/2 - src.Rect.Min.X/2)", y, x) + case "440": + return fmt.Sprintf("((%s)/2 - src.Rect.Min.Y/2)*src.CStride + ( %s - src.Rect.Min.X )", y, x) + } + return fmt.Sprintf("unsupported sratio %q", sratio) +} + +func ycbcrToRGB(lhs, tmp string) string { + s := ` + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + $yy1 := int(src.Y[$i]) * 0x10100 + $cb1 := int(src.Cb[$j]) - 128 + $cr1 := int(src.Cr[$j]) - 128 + $r@ := ($yy1 + 91881*$cr1) >> 8 + $g@ := ($yy1 - 22554*$cb1 - 46802*$cr1) >> 8 + $b@ := ($yy1 + 116130*$cb1) >> 8 + if $r@ < 0 { + $r@ = 0 + } else if $r@ > 0xffff { + $r@ = 0xffff + } + if $g@ < 0 { + $g@ = 0 + } else if $g@ > 0xffff { + $g@ = 0xffff + } + if $b@ < 0 { + $b@ = 0 + } else if $b@ > 0xffff { + $b@ = 0xffff + } + ` + s = strings.Replace(s, "$", lhs, -1) + s = strings.Replace(s, "@", tmp, -1) + return s +} + +func split(s, sep string) (string, string) { + if i := strings.Index(s, sep); i >= 0 { + return strings.TrimSpace(s[:i]), strings.TrimSpace(s[i+len(sep):]) + } + return "", "" +} + +func splitEq(s string) (lhs, eqOp string) { + s = strings.TrimSpace(s) + if lhs, _ = split(s, ":="); lhs != "" { + return lhs, ":=" + } + if lhs, _ = split(s, "+="); lhs != "" { + return lhs, "+=" + } + return "", "" +} + +func splitArgs(s string) (args []string, extra string) { + s = strings.TrimSpace(s) + if s == "" || s[0] != '[' { + return nil, "" + } + s = s[1:] + + i := strings.IndexByte(s, ']') + if i < 0 { + return nil, "" + } + args, extra = strings.Split(s[:i], ","), s[i+1:] + for i := range args { + args[i] = strings.TrimSpace(args[i]) + } + return args, extra +} + +func relName(s string) string { + if i := strings.LastIndex(s, "."); i >= 0 { + return s[i+1:] + } + return s +} + +const ( + codeRoot = ` + func (z $receiver) Scale(dst Image, dr image.Rectangle, src image.Image, sr image.Rectangle, op Op, opts *Options) { + // Try to simplify a Scale to a Copy. + if dr.Size() == sr.Size() { + Copy(dst, dr.Min, src, sr, op, opts) + return + } + + var o Options + if opts != nil { + o = *opts + } + + // adr is the affected destination pixels. + adr := dst.Bounds().Intersect(dr) + adr, o.DstMask = clipAffectedDestRect(adr, o.DstMask, o.DstMaskP) + if adr.Empty() || sr.Empty() { + return + } + // Make adr relative to dr.Min. + adr = adr.Sub(dr.Min) + if op == Over && o.SrcMask == nil && opaque(src) { + op = Src + } + + // sr is the source pixels. If it extends beyond the src bounds, + // we cannot use the type-specific fast paths, as they access + // the Pix fields directly without bounds checking. + // + // Similarly, the fast paths assume that the masks are nil. + if o.DstMask != nil || o.SrcMask != nil || !sr.In(src.Bounds()) { + switch op { + case Over: + z.scale_Image_Image_Over(dst, dr, adr, src, sr, &o) + case Src: + z.scale_Image_Image_Src(dst, dr, adr, src, sr, &o) + } + } else if _, ok := src.(*image.Uniform); ok { + Draw(dst, dr, src, src.Bounds().Min, op) + } else { + $switch z.scale_$dTypeRN_$sTypeRN$sratio_$op(dst, dr, adr, src, sr, &o) + } + } + + func (z $receiver) Transform(dst Image, s2d f64.Aff3, src image.Image, sr image.Rectangle, op Op, opts *Options) { + // Try to simplify a Transform to a Copy. + if s2d[0] == 1 && s2d[1] == 0 && s2d[3] == 0 && s2d[4] == 1 { + dx := int(s2d[2]) + dy := int(s2d[5]) + if float64(dx) == s2d[2] && float64(dy) == s2d[5] { + Copy(dst, image.Point{X: sr.Min.X + dx, Y: sr.Min.X + dy}, src, sr, op, opts) + return + } + } + + var o Options + if opts != nil { + o = *opts + } + + dr := transformRect(&s2d, &sr) + // adr is the affected destination pixels. + adr := dst.Bounds().Intersect(dr) + adr, o.DstMask = clipAffectedDestRect(adr, o.DstMask, o.DstMaskP) + if adr.Empty() || sr.Empty() { + return + } + if op == Over && o.SrcMask == nil && opaque(src) { + op = Src + } + + d2s := invert(&s2d) + // bias is a translation of the mapping from dst coordinates to src + // coordinates such that the latter temporarily have non-negative X + // and Y coordinates. This allows us to write int(f) instead of + // int(math.Floor(f)), since "round to zero" and "round down" are + // equivalent when f >= 0, but the former is much cheaper. The X-- + // and Y-- are because the TransformLeaf methods have a "sx -= 0.5" + // adjustment. + bias := transformRect(&d2s, &adr).Min + bias.X-- + bias.Y-- + d2s[2] -= float64(bias.X) + d2s[5] -= float64(bias.Y) + // Make adr relative to dr.Min. + adr = adr.Sub(dr.Min) + // sr is the source pixels. If it extends beyond the src bounds, + // we cannot use the type-specific fast paths, as they access + // the Pix fields directly without bounds checking. + // + // Similarly, the fast paths assume that the masks are nil. + if o.DstMask != nil || o.SrcMask != nil || !sr.In(src.Bounds()) { + switch op { + case Over: + z.transform_Image_Image_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + case Src: + z.transform_Image_Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + } + } else if u, ok := src.(*image.Uniform); ok { + transform_Uniform(dst, dr, adr, &d2s, u, sr, bias, op) + } else { + $switch z.transform_$dTypeRN_$sTypeRN$sratio_$op(dst, dr, adr, &d2s, src, sr, bias, &o) + } + } + ` + + codeNNScaleLeaf = ` + func (nnInterpolator) scale_$dTypeRN_$sTypeRN$sratio_$op(dst $dType, dr, adr image.Rectangle, src $sType, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + $preOuter + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + $preInner + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { $tweakDx + sx := (2*uint64(dx) + 1) * sw / dw2 + p := $srcu[sr.Min.X + int(sx), sr.Min.Y + int(sy)] + $outputu[dr.Min.X + int(dx), dr.Min.Y + int(dy), p] + } + } + } + ` + + codeNNTransformLeaf = ` + func (nnInterpolator) transform_$dTypeRN_$sTypeRN$sratio_$op(dst $dType, dr, adr image.Rectangle, d2s *f64.Aff3, src $sType, sr image.Rectangle, bias image.Point, opts *Options) { + $preOuter + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y + int(dy)) + 0.5 + $preInner + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { $tweakDx + dxf := float64(dr.Min.X + int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf + d2s[1]*dyf + d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf + d2s[4]*dyf + d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + p := $srcu[sx0, sy0] + $outputu[dr.Min.X + int(dx), dr.Min.Y + int(dy), p] + } + } + } + ` + + codeABLScaleLeaf = ` + func (ablInterpolator) scale_$dTypeRN_$sTypeRN$sratio_$op(dst $dType, dr, adr image.Rectangle, src $sType, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw - 1, sh - 1 + $preOuter + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + $preInner + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { $tweakDx + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00 := $srcf[sr.Min.X + int(sx0), sr.Min.Y + int(sy0)] + s10 := $srcf[sr.Min.X + int(sx1), sr.Min.Y + int(sy0)] + $blend[xFrac1, s00, xFrac0, s10] + s01 := $srcf[sr.Min.X + int(sx0), sr.Min.Y + int(sy1)] + s11 := $srcf[sr.Min.X + int(sx1), sr.Min.Y + int(sy1)] + $blend[xFrac1, s01, xFrac0, s11] + $blend[yFrac1, s10, yFrac0, s11] + $convFtou[p, s11] + $outputu[dr.Min.X + int(dx), dr.Min.Y + int(dy), p] + } + } + } + ` + + codeABLTransformLeaf = ` + func (ablInterpolator) transform_$dTypeRN_$sTypeRN$sratio_$op(dst $dType, dr, adr image.Rectangle, d2s *f64.Aff3, src $sType, sr image.Rectangle, bias image.Point, opts *Options) { + $preOuter + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y + int(dy)) + 0.5 + $preInner + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { $tweakDx + dxf := float64(dr.Min.X + int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00 := $srcf[sx0, sy0] + s10 := $srcf[sx1, sy0] + $blend[xFrac1, s00, xFrac0, s10] + s01 := $srcf[sx0, sy1] + s11 := $srcf[sx1, sy1] + $blend[xFrac1, s01, xFrac0, s11] + $blend[yFrac1, s10, yFrac0, s11] + $convFtou[p, s11] + $outputu[dr.Min.X + int(dx), dr.Min.Y + int(dy), p] + } + } + } + ` + + codeKernelRoot = ` + func (z *kernelScaler) Scale(dst Image, dr image.Rectangle, src image.Image, sr image.Rectangle, op Op, opts *Options) { + if z.dw != int32(dr.Dx()) || z.dh != int32(dr.Dy()) || z.sw != int32(sr.Dx()) || z.sh != int32(sr.Dy()) { + z.kernel.Scale(dst, dr, src, sr, op, opts) + return + } + + var o Options + if opts != nil { + o = *opts + } + + // adr is the affected destination pixels. + adr := dst.Bounds().Intersect(dr) + adr, o.DstMask = clipAffectedDestRect(adr, o.DstMask, o.DstMaskP) + if adr.Empty() || sr.Empty() { + return + } + // Make adr relative to dr.Min. + adr = adr.Sub(dr.Min) + if op == Over && o.SrcMask == nil && opaque(src) { + op = Src + } + + if _, ok := src.(*image.Uniform); ok && o.DstMask == nil && o.SrcMask == nil && sr.In(src.Bounds()) { + Draw(dst, dr, src, src.Bounds().Min, op) + return + } + + // Create a temporary buffer: + // scaleX distributes the source image's columns over the temporary image. + // scaleY distributes the temporary image's rows over the destination image. + var tmp [][4]float64 + if z.pool.New != nil { + tmpp := z.pool.Get().(*[][4]float64) + defer z.pool.Put(tmpp) + tmp = *tmpp + } else { + tmp = z.makeTmpBuf() + } + + // sr is the source pixels. If it extends beyond the src bounds, + // we cannot use the type-specific fast paths, as they access + // the Pix fields directly without bounds checking. + // + // Similarly, the fast paths assume that the masks are nil. + if o.SrcMask != nil || !sr.In(src.Bounds()) { + z.scaleX_Image(tmp, src, sr, &o) + } else { + $switchS z.scaleX_$sTypeRN$sratio(tmp, src, sr, &o) + } + + if o.DstMask != nil { + switch op { + case Over: + z.scaleY_Image_Over(dst, dr, adr, tmp, &o) + case Src: + z.scaleY_Image_Src(dst, dr, adr, tmp, &o) + } + } else { + $switchD z.scaleY_$dTypeRN_$op(dst, dr, adr, tmp, &o) + } + } + + func (q *Kernel) Transform(dst Image, s2d f64.Aff3, src image.Image, sr image.Rectangle, op Op, opts *Options) { + var o Options + if opts != nil { + o = *opts + } + + dr := transformRect(&s2d, &sr) + // adr is the affected destination pixels. + adr := dst.Bounds().Intersect(dr) + adr, o.DstMask = clipAffectedDestRect(adr, o.DstMask, o.DstMaskP) + if adr.Empty() || sr.Empty() { + return + } + if op == Over && o.SrcMask == nil && opaque(src) { + op = Src + } + d2s := invert(&s2d) + // bias is a translation of the mapping from dst coordinates to src + // coordinates such that the latter temporarily have non-negative X + // and Y coordinates. This allows us to write int(f) instead of + // int(math.Floor(f)), since "round to zero" and "round down" are + // equivalent when f >= 0, but the former is much cheaper. The X-- + // and Y-- are because the TransformLeaf methods have a "sx -= 0.5" + // adjustment. + bias := transformRect(&d2s, &adr).Min + bias.X-- + bias.Y-- + d2s[2] -= float64(bias.X) + d2s[5] -= float64(bias.Y) + // Make adr relative to dr.Min. + adr = adr.Sub(dr.Min) + + if u, ok := src.(*image.Uniform); ok && o.DstMask != nil && o.SrcMask != nil && sr.In(src.Bounds()) { + transform_Uniform(dst, dr, adr, &d2s, u, sr, bias, op) + return + } + + xscale := abs(d2s[0]) + if s := abs(d2s[1]); xscale < s { + xscale = s + } + yscale := abs(d2s[3]) + if s := abs(d2s[4]); yscale < s { + yscale = s + } + + // sr is the source pixels. If it extends beyond the src bounds, + // we cannot use the type-specific fast paths, as they access + // the Pix fields directly without bounds checking. + // + // Similarly, the fast paths assume that the masks are nil. + if o.DstMask != nil || o.SrcMask != nil || !sr.In(src.Bounds()) { + switch op { + case Over: + q.transform_Image_Image_Over(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + case Src: + q.transform_Image_Image_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + } + } else { + $switch q.transform_$dTypeRN_$sTypeRN$sratio_$op(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + } + } + ` + + codeKernelScaleLeafX = ` + func (z *kernelScaler) scaleX_$sTypeRN$sratio(tmp [][4]float64, src $sType, sr image.Rectangle, opts *Options) { + t := 0 + $preKernelOuter + for y := int32(0); y < z.sh; y++ { + for _, s := range z.horizontal.sources { + var pr, pg, pb, pa float64 $tweakVarP + for _, c := range z.horizontal.contribs[s.i:s.j] { + p += $srcf[sr.Min.X + int(c.coord), sr.Min.Y + int(y)] * c.weight + } + $tweakPr + tmp[t] = [4]float64{ + pr * s.invTotalWeightFFFF, $tweakP + pg * s.invTotalWeightFFFF, $tweakP + pb * s.invTotalWeightFFFF, $tweakP + pa * s.invTotalWeightFFFF, $tweakP + } + t++ + } + } + } + ` + + codeKernelScaleLeafY = ` + func (z *kernelScaler) scaleY_$dTypeRN_$op(dst $dType, dr, adr image.Rectangle, tmp [][4]float64, opts *Options) { + $preOuter + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + $preKernelInner + for dy, s := range z.vertical.sources[adr.Min.Y:adr.Max.Y] { $tweakDy + var pr, pg, pb, pa float64 + for _, c := range z.vertical.contribs[s.i:s.j] { + p := &tmp[c.coord*z.dw+dx] + pr += p[0] * c.weight + pg += p[1] * c.weight + pb += p[2] * c.weight + pa += p[3] * c.weight + } + $clampToAlpha + $outputf[dr.Min.X + int(dx), dr.Min.Y + int(adr.Min.Y + dy), ftou, p, s.invTotalWeight] + $tweakD + } + } + } + ` + + codeKernelTransformLeaf = ` + func (q *Kernel) transform_$dTypeRN_$sTypeRN$sratio_$op(dst $dType, dr, adr image.Rectangle, d2s *f64.Aff3, src $sType, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1 + 2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1 + 2*int(math.Ceil(yHalfWidth))) + + $preOuter + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y + int(dy)) + 0.5 + $preInner + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { $tweakDx + dxf := float64(dr.Min.X + int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx - ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky - iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb, pa float64 $tweakVarP + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky - iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx - ix] * yWeight; w != 0 { + p += $srcf[kx, ky] * w + } + } + } + } + $clampToAlpha + $outputf[dr.Min.X + int(dx), dr.Min.Y + int(dy), fffftou, p, 1] + } + } + } + ` +) diff --git a/_third_party/golang.org/x/image/draw/impl.go b/_third_party/golang.org/x/image/draw/impl.go new file mode 100644 index 0000000000..7b1cbb0bcd --- /dev/null +++ b/_third_party/golang.org/x/image/draw/impl.go @@ -0,0 +1,6668 @@ +// generated by "go run gen.go". DO NOT EDIT. + +package draw + +import ( + "image" + "image/color" + "math" + + "bosun.org/_third_party/golang.org/x/image/math/f64" +) + +func (z nnInterpolator) Scale(dst Image, dr image.Rectangle, src image.Image, sr image.Rectangle, op Op, opts *Options) { + // Try to simplify a Scale to a Copy. + if dr.Size() == sr.Size() { + Copy(dst, dr.Min, src, sr, op, opts) + return + } + + var o Options + if opts != nil { + o = *opts + } + + // adr is the affected destination pixels. + adr := dst.Bounds().Intersect(dr) + adr, o.DstMask = clipAffectedDestRect(adr, o.DstMask, o.DstMaskP) + if adr.Empty() || sr.Empty() { + return + } + // Make adr relative to dr.Min. + adr = adr.Sub(dr.Min) + if op == Over && o.SrcMask == nil && opaque(src) { + op = Src + } + + // sr is the source pixels. If it extends beyond the src bounds, + // we cannot use the type-specific fast paths, as they access + // the Pix fields directly without bounds checking. + // + // Similarly, the fast paths assume that the masks are nil. + if o.DstMask != nil || o.SrcMask != nil || !sr.In(src.Bounds()) { + switch op { + case Over: + z.scale_Image_Image_Over(dst, dr, adr, src, sr, &o) + case Src: + z.scale_Image_Image_Src(dst, dr, adr, src, sr, &o) + } + } else if _, ok := src.(*image.Uniform); ok { + Draw(dst, dr, src, src.Bounds().Min, op) + } else { + switch op { + case Over: + switch dst := dst.(type) { + case *image.RGBA: + switch src := src.(type) { + case *image.NRGBA: + z.scale_RGBA_NRGBA_Over(dst, dr, adr, src, sr, &o) + case *image.RGBA: + z.scale_RGBA_RGBA_Over(dst, dr, adr, src, sr, &o) + default: + z.scale_RGBA_Image_Over(dst, dr, adr, src, sr, &o) + } + default: + switch src := src.(type) { + default: + z.scale_Image_Image_Over(dst, dr, adr, src, sr, &o) + } + } + case Src: + switch dst := dst.(type) { + case *image.RGBA: + switch src := src.(type) { + case *image.Gray: + z.scale_RGBA_Gray_Src(dst, dr, adr, src, sr, &o) + case *image.NRGBA: + z.scale_RGBA_NRGBA_Src(dst, dr, adr, src, sr, &o) + case *image.RGBA: + z.scale_RGBA_RGBA_Src(dst, dr, adr, src, sr, &o) + case *image.YCbCr: + switch src.SubsampleRatio { + default: + z.scale_RGBA_Image_Src(dst, dr, adr, src, sr, &o) + case image.YCbCrSubsampleRatio444: + z.scale_RGBA_YCbCr444_Src(dst, dr, adr, src, sr, &o) + case image.YCbCrSubsampleRatio422: + z.scale_RGBA_YCbCr422_Src(dst, dr, adr, src, sr, &o) + case image.YCbCrSubsampleRatio420: + z.scale_RGBA_YCbCr420_Src(dst, dr, adr, src, sr, &o) + case image.YCbCrSubsampleRatio440: + z.scale_RGBA_YCbCr440_Src(dst, dr, adr, src, sr, &o) + } + default: + z.scale_RGBA_Image_Src(dst, dr, adr, src, sr, &o) + } + default: + switch src := src.(type) { + default: + z.scale_Image_Image_Src(dst, dr, adr, src, sr, &o) + } + } + } + } +} + +func (z nnInterpolator) Transform(dst Image, s2d f64.Aff3, src image.Image, sr image.Rectangle, op Op, opts *Options) { + // Try to simplify a Transform to a Copy. + if s2d[0] == 1 && s2d[1] == 0 && s2d[3] == 0 && s2d[4] == 1 { + dx := int(s2d[2]) + dy := int(s2d[5]) + if float64(dx) == s2d[2] && float64(dy) == s2d[5] { + Copy(dst, image.Point{X: sr.Min.X + dx, Y: sr.Min.X + dy}, src, sr, op, opts) + return + } + } + + var o Options + if opts != nil { + o = *opts + } + + dr := transformRect(&s2d, &sr) + // adr is the affected destination pixels. + adr := dst.Bounds().Intersect(dr) + adr, o.DstMask = clipAffectedDestRect(adr, o.DstMask, o.DstMaskP) + if adr.Empty() || sr.Empty() { + return + } + if op == Over && o.SrcMask == nil && opaque(src) { + op = Src + } + + d2s := invert(&s2d) + // bias is a translation of the mapping from dst coordinates to src + // coordinates such that the latter temporarily have non-negative X + // and Y coordinates. This allows us to write int(f) instead of + // int(math.Floor(f)), since "round to zero" and "round down" are + // equivalent when f >= 0, but the former is much cheaper. The X-- + // and Y-- are because the TransformLeaf methods have a "sx -= 0.5" + // adjustment. + bias := transformRect(&d2s, &adr).Min + bias.X-- + bias.Y-- + d2s[2] -= float64(bias.X) + d2s[5] -= float64(bias.Y) + // Make adr relative to dr.Min. + adr = adr.Sub(dr.Min) + // sr is the source pixels. If it extends beyond the src bounds, + // we cannot use the type-specific fast paths, as they access + // the Pix fields directly without bounds checking. + // + // Similarly, the fast paths assume that the masks are nil. + if o.DstMask != nil || o.SrcMask != nil || !sr.In(src.Bounds()) { + switch op { + case Over: + z.transform_Image_Image_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + case Src: + z.transform_Image_Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + } + } else if u, ok := src.(*image.Uniform); ok { + transform_Uniform(dst, dr, adr, &d2s, u, sr, bias, op) + } else { + switch op { + case Over: + switch dst := dst.(type) { + case *image.RGBA: + switch src := src.(type) { + case *image.NRGBA: + z.transform_RGBA_NRGBA_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + case *image.RGBA: + z.transform_RGBA_RGBA_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + default: + z.transform_RGBA_Image_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + } + default: + switch src := src.(type) { + default: + z.transform_Image_Image_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + } + } + case Src: + switch dst := dst.(type) { + case *image.RGBA: + switch src := src.(type) { + case *image.Gray: + z.transform_RGBA_Gray_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case *image.NRGBA: + z.transform_RGBA_NRGBA_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case *image.RGBA: + z.transform_RGBA_RGBA_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case *image.YCbCr: + switch src.SubsampleRatio { + default: + z.transform_RGBA_Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case image.YCbCrSubsampleRatio444: + z.transform_RGBA_YCbCr444_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case image.YCbCrSubsampleRatio422: + z.transform_RGBA_YCbCr422_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case image.YCbCrSubsampleRatio420: + z.transform_RGBA_YCbCr420_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case image.YCbCrSubsampleRatio440: + z.transform_RGBA_YCbCr440_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + } + default: + z.transform_RGBA_Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + } + default: + switch src := src.(type) { + default: + z.transform_Image_Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + } + } + } + } +} + +func (nnInterpolator) scale_RGBA_Gray_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.Gray, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pi := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.Stride + (sr.Min.X + int(sx) - src.Rect.Min.X) + pr := uint32(src.Pix[pi]) * 0x101 + out := uint8(pr >> 8) + dst.Pix[d+0] = out + dst.Pix[d+1] = out + dst.Pix[d+2] = out + dst.Pix[d+3] = 0xff + } + } +} + +func (nnInterpolator) scale_RGBA_NRGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, src *image.NRGBA, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pi := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx)-src.Rect.Min.X)*4 + pa := uint32(src.Pix[pi+3]) * 0x101 + pr := uint32(src.Pix[pi+0]) * pa / 0xff + pg := uint32(src.Pix[pi+1]) * pa / 0xff + pb := uint32(src.Pix[pi+2]) * pa / 0xff + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (nnInterpolator) scale_RGBA_NRGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.NRGBA, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pi := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx)-src.Rect.Min.X)*4 + pa := uint32(src.Pix[pi+3]) * 0x101 + pr := uint32(src.Pix[pi+0]) * pa / 0xff + pg := uint32(src.Pix[pi+1]) * pa / 0xff + pb := uint32(src.Pix[pi+2]) * pa / 0xff + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (nnInterpolator) scale_RGBA_RGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, src *image.RGBA, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pi := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx)-src.Rect.Min.X)*4 + pr := uint32(src.Pix[pi+0]) * 0x101 + pg := uint32(src.Pix[pi+1]) * 0x101 + pb := uint32(src.Pix[pi+2]) * 0x101 + pa := uint32(src.Pix[pi+3]) * 0x101 + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (nnInterpolator) scale_RGBA_RGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.RGBA, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pi := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx)-src.Rect.Min.X)*4 + pr := uint32(src.Pix[pi+0]) * 0x101 + pg := uint32(src.Pix[pi+1]) * 0x101 + pb := uint32(src.Pix[pi+2]) * 0x101 + pa := uint32(src.Pix[pi+3]) * 0x101 + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (nnInterpolator) scale_RGBA_YCbCr444_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.YCbCr, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pi := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx) - src.Rect.Min.X) + pj := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.CStride + (sr.Min.X + int(sx) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10100 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pr := (pyy1 + 91881*pcr1) >> 8 + pg := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pb := (pyy1 + 116130*pcb1) >> 8 + if pr < 0 { + pr = 0 + } else if pr > 0xffff { + pr = 0xffff + } + if pg < 0 { + pg = 0 + } else if pg > 0xffff { + pg = 0xffff + } + if pb < 0 { + pb = 0 + } else if pb > 0xffff { + pb = 0xffff + } + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (nnInterpolator) scale_RGBA_YCbCr422_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.YCbCr, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pi := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx) - src.Rect.Min.X) + pj := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.CStride + ((sr.Min.X+int(sx))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10100 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pr := (pyy1 + 91881*pcr1) >> 8 + pg := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pb := (pyy1 + 116130*pcb1) >> 8 + if pr < 0 { + pr = 0 + } else if pr > 0xffff { + pr = 0xffff + } + if pg < 0 { + pg = 0 + } else if pg > 0xffff { + pg = 0xffff + } + if pb < 0 { + pb = 0 + } else if pb > 0xffff { + pb = 0xffff + } + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (nnInterpolator) scale_RGBA_YCbCr420_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.YCbCr, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pi := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx) - src.Rect.Min.X) + pj := ((sr.Min.Y+int(sy))/2-src.Rect.Min.Y/2)*src.CStride + ((sr.Min.X+int(sx))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10100 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pr := (pyy1 + 91881*pcr1) >> 8 + pg := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pb := (pyy1 + 116130*pcb1) >> 8 + if pr < 0 { + pr = 0 + } else if pr > 0xffff { + pr = 0xffff + } + if pg < 0 { + pg = 0 + } else if pg > 0xffff { + pg = 0xffff + } + if pb < 0 { + pb = 0 + } else if pb > 0xffff { + pb = 0xffff + } + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (nnInterpolator) scale_RGBA_YCbCr440_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.YCbCr, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pi := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx) - src.Rect.Min.X) + pj := ((sr.Min.Y+int(sy))/2-src.Rect.Min.Y/2)*src.CStride + (sr.Min.X + int(sx) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10100 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pr := (pyy1 + 91881*pcr1) >> 8 + pg := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pb := (pyy1 + 116130*pcb1) >> 8 + if pr < 0 { + pr = 0 + } else if pr > 0xffff { + pr = 0xffff + } + if pg < 0 { + pg = 0 + } else if pg > 0xffff { + pg = 0xffff + } + if pb < 0 { + pb = 0 + } else if pb > 0xffff { + pb = 0xffff + } + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (nnInterpolator) scale_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pr, pg, pb, pa := src.At(sr.Min.X+int(sx), sr.Min.Y+int(sy)).RGBA() + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (nnInterpolator) scale_RGBA_Image_Src(dst *image.RGBA, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pr, pg, pb, pa := src.At(sr.Min.X+int(sx), sr.Min.Y+int(sy)).RGBA() + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (nnInterpolator) scale_Image_Image_Over(dst Image, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + sx := (2*uint64(dx) + 1) * sw / dw2 + pr, pg, pb, pa := src.At(sr.Min.X+int(sx), sr.Min.Y+int(sy)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx), smp.Y+sr.Min.Y+int(sy)).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + } + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + if dstMask != nil { + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + } + pa1 := 0xffff - pa + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } +} + +func (nnInterpolator) scale_Image_Image_Src(dst Image, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + sx := (2*uint64(dx) + 1) * sw / dw2 + pr, pg, pb, pa := src.At(sr.Min.X+int(sx), sr.Min.Y+int(sy)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx), smp.Y+sr.Min.Y+int(sy)).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + } + if dstMask != nil { + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + pa1 := 0xffff - ma + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } else { + dstColorRGBA64.R = uint16(pr) + dstColorRGBA64.G = uint16(pg) + dstColorRGBA64.B = uint16(pb) + dstColorRGBA64.A = uint16(pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } + } +} + +func (nnInterpolator) transform_RGBA_Gray_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.Gray, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pi := (sy0-src.Rect.Min.Y)*src.Stride + (sx0 - src.Rect.Min.X) + pr := uint32(src.Pix[pi]) * 0x101 + out := uint8(pr >> 8) + dst.Pix[d+0] = out + dst.Pix[d+1] = out + dst.Pix[d+2] = out + dst.Pix[d+3] = 0xff + } + } +} + +func (nnInterpolator) transform_RGBA_NRGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.NRGBA, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pi := (sy0-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + pa := uint32(src.Pix[pi+3]) * 0x101 + pr := uint32(src.Pix[pi+0]) * pa / 0xff + pg := uint32(src.Pix[pi+1]) * pa / 0xff + pb := uint32(src.Pix[pi+2]) * pa / 0xff + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (nnInterpolator) transform_RGBA_NRGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.NRGBA, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pi := (sy0-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + pa := uint32(src.Pix[pi+3]) * 0x101 + pr := uint32(src.Pix[pi+0]) * pa / 0xff + pg := uint32(src.Pix[pi+1]) * pa / 0xff + pb := uint32(src.Pix[pi+2]) * pa / 0xff + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (nnInterpolator) transform_RGBA_RGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.RGBA, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pi := (sy0-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + pr := uint32(src.Pix[pi+0]) * 0x101 + pg := uint32(src.Pix[pi+1]) * 0x101 + pb := uint32(src.Pix[pi+2]) * 0x101 + pa := uint32(src.Pix[pi+3]) * 0x101 + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (nnInterpolator) transform_RGBA_RGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.RGBA, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pi := (sy0-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + pr := uint32(src.Pix[pi+0]) * 0x101 + pg := uint32(src.Pix[pi+1]) * 0x101 + pb := uint32(src.Pix[pi+2]) * 0x101 + pa := uint32(src.Pix[pi+3]) * 0x101 + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (nnInterpolator) transform_RGBA_YCbCr444_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pi := (sy0-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + pj := (sy0-src.Rect.Min.Y)*src.CStride + (sx0 - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10100 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pr := (pyy1 + 91881*pcr1) >> 8 + pg := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pb := (pyy1 + 116130*pcb1) >> 8 + if pr < 0 { + pr = 0 + } else if pr > 0xffff { + pr = 0xffff + } + if pg < 0 { + pg = 0 + } else if pg > 0xffff { + pg = 0xffff + } + if pb < 0 { + pb = 0 + } else if pb > 0xffff { + pb = 0xffff + } + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (nnInterpolator) transform_RGBA_YCbCr422_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pi := (sy0-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + pj := (sy0-src.Rect.Min.Y)*src.CStride + ((sx0)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10100 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pr := (pyy1 + 91881*pcr1) >> 8 + pg := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pb := (pyy1 + 116130*pcb1) >> 8 + if pr < 0 { + pr = 0 + } else if pr > 0xffff { + pr = 0xffff + } + if pg < 0 { + pg = 0 + } else if pg > 0xffff { + pg = 0xffff + } + if pb < 0 { + pb = 0 + } else if pb > 0xffff { + pb = 0xffff + } + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (nnInterpolator) transform_RGBA_YCbCr420_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pi := (sy0-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + pj := ((sy0)/2-src.Rect.Min.Y/2)*src.CStride + ((sx0)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10100 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pr := (pyy1 + 91881*pcr1) >> 8 + pg := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pb := (pyy1 + 116130*pcb1) >> 8 + if pr < 0 { + pr = 0 + } else if pr > 0xffff { + pr = 0xffff + } + if pg < 0 { + pg = 0 + } else if pg > 0xffff { + pg = 0xffff + } + if pb < 0 { + pb = 0 + } else if pb > 0xffff { + pb = 0xffff + } + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (nnInterpolator) transform_RGBA_YCbCr440_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pi := (sy0-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + pj := ((sy0)/2-src.Rect.Min.Y/2)*src.CStride + (sx0 - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10100 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pr := (pyy1 + 91881*pcr1) >> 8 + pg := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pb := (pyy1 + 116130*pcb1) >> 8 + if pr < 0 { + pr = 0 + } else if pr > 0xffff { + pr = 0xffff + } + if pg < 0 { + pg = 0 + } else if pg > 0xffff { + pg = 0xffff + } + if pb < 0 { + pb = 0 + } else if pb > 0xffff { + pb = 0xffff + } + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (nnInterpolator) transform_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pr, pg, pb, pa := src.At(sx0, sy0).RGBA() + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (nnInterpolator) transform_RGBA_Image_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pr, pg, pb, pa := src.At(sx0, sy0).RGBA() + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (nnInterpolator) transform_Image_Image_Over(dst Image, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, opts *Options) { + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pr, pg, pb, pa := src.At(sx0, sy0).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx0, smp.Y+sy0).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + } + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + if dstMask != nil { + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + } + pa1 := 0xffff - pa + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } +} + +func (nnInterpolator) transform_Image_Image_Src(dst Image, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, opts *Options) { + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pr, pg, pb, pa := src.At(sx0, sy0).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx0, smp.Y+sy0).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + } + if dstMask != nil { + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + pa1 := 0xffff - ma + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } else { + dstColorRGBA64.R = uint16(pr) + dstColorRGBA64.G = uint16(pg) + dstColorRGBA64.B = uint16(pb) + dstColorRGBA64.A = uint16(pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } + } +} + +func (z ablInterpolator) Scale(dst Image, dr image.Rectangle, src image.Image, sr image.Rectangle, op Op, opts *Options) { + // Try to simplify a Scale to a Copy. + if dr.Size() == sr.Size() { + Copy(dst, dr.Min, src, sr, op, opts) + return + } + + var o Options + if opts != nil { + o = *opts + } + + // adr is the affected destination pixels. + adr := dst.Bounds().Intersect(dr) + adr, o.DstMask = clipAffectedDestRect(adr, o.DstMask, o.DstMaskP) + if adr.Empty() || sr.Empty() { + return + } + // Make adr relative to dr.Min. + adr = adr.Sub(dr.Min) + if op == Over && o.SrcMask == nil && opaque(src) { + op = Src + } + + // sr is the source pixels. If it extends beyond the src bounds, + // we cannot use the type-specific fast paths, as they access + // the Pix fields directly without bounds checking. + // + // Similarly, the fast paths assume that the masks are nil. + if o.DstMask != nil || o.SrcMask != nil || !sr.In(src.Bounds()) { + switch op { + case Over: + z.scale_Image_Image_Over(dst, dr, adr, src, sr, &o) + case Src: + z.scale_Image_Image_Src(dst, dr, adr, src, sr, &o) + } + } else if _, ok := src.(*image.Uniform); ok { + Draw(dst, dr, src, src.Bounds().Min, op) + } else { + switch op { + case Over: + switch dst := dst.(type) { + case *image.RGBA: + switch src := src.(type) { + case *image.NRGBA: + z.scale_RGBA_NRGBA_Over(dst, dr, adr, src, sr, &o) + case *image.RGBA: + z.scale_RGBA_RGBA_Over(dst, dr, adr, src, sr, &o) + default: + z.scale_RGBA_Image_Over(dst, dr, adr, src, sr, &o) + } + default: + switch src := src.(type) { + default: + z.scale_Image_Image_Over(dst, dr, adr, src, sr, &o) + } + } + case Src: + switch dst := dst.(type) { + case *image.RGBA: + switch src := src.(type) { + case *image.Gray: + z.scale_RGBA_Gray_Src(dst, dr, adr, src, sr, &o) + case *image.NRGBA: + z.scale_RGBA_NRGBA_Src(dst, dr, adr, src, sr, &o) + case *image.RGBA: + z.scale_RGBA_RGBA_Src(dst, dr, adr, src, sr, &o) + case *image.YCbCr: + switch src.SubsampleRatio { + default: + z.scale_RGBA_Image_Src(dst, dr, adr, src, sr, &o) + case image.YCbCrSubsampleRatio444: + z.scale_RGBA_YCbCr444_Src(dst, dr, adr, src, sr, &o) + case image.YCbCrSubsampleRatio422: + z.scale_RGBA_YCbCr422_Src(dst, dr, adr, src, sr, &o) + case image.YCbCrSubsampleRatio420: + z.scale_RGBA_YCbCr420_Src(dst, dr, adr, src, sr, &o) + case image.YCbCrSubsampleRatio440: + z.scale_RGBA_YCbCr440_Src(dst, dr, adr, src, sr, &o) + } + default: + z.scale_RGBA_Image_Src(dst, dr, adr, src, sr, &o) + } + default: + switch src := src.(type) { + default: + z.scale_Image_Image_Src(dst, dr, adr, src, sr, &o) + } + } + } + } +} + +func (z ablInterpolator) Transform(dst Image, s2d f64.Aff3, src image.Image, sr image.Rectangle, op Op, opts *Options) { + // Try to simplify a Transform to a Copy. + if s2d[0] == 1 && s2d[1] == 0 && s2d[3] == 0 && s2d[4] == 1 { + dx := int(s2d[2]) + dy := int(s2d[5]) + if float64(dx) == s2d[2] && float64(dy) == s2d[5] { + Copy(dst, image.Point{X: sr.Min.X + dx, Y: sr.Min.X + dy}, src, sr, op, opts) + return + } + } + + var o Options + if opts != nil { + o = *opts + } + + dr := transformRect(&s2d, &sr) + // adr is the affected destination pixels. + adr := dst.Bounds().Intersect(dr) + adr, o.DstMask = clipAffectedDestRect(adr, o.DstMask, o.DstMaskP) + if adr.Empty() || sr.Empty() { + return + } + if op == Over && o.SrcMask == nil && opaque(src) { + op = Src + } + + d2s := invert(&s2d) + // bias is a translation of the mapping from dst coordinates to src + // coordinates such that the latter temporarily have non-negative X + // and Y coordinates. This allows us to write int(f) instead of + // int(math.Floor(f)), since "round to zero" and "round down" are + // equivalent when f >= 0, but the former is much cheaper. The X-- + // and Y-- are because the TransformLeaf methods have a "sx -= 0.5" + // adjustment. + bias := transformRect(&d2s, &adr).Min + bias.X-- + bias.Y-- + d2s[2] -= float64(bias.X) + d2s[5] -= float64(bias.Y) + // Make adr relative to dr.Min. + adr = adr.Sub(dr.Min) + // sr is the source pixels. If it extends beyond the src bounds, + // we cannot use the type-specific fast paths, as they access + // the Pix fields directly without bounds checking. + // + // Similarly, the fast paths assume that the masks are nil. + if o.DstMask != nil || o.SrcMask != nil || !sr.In(src.Bounds()) { + switch op { + case Over: + z.transform_Image_Image_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + case Src: + z.transform_Image_Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + } + } else if u, ok := src.(*image.Uniform); ok { + transform_Uniform(dst, dr, adr, &d2s, u, sr, bias, op) + } else { + switch op { + case Over: + switch dst := dst.(type) { + case *image.RGBA: + switch src := src.(type) { + case *image.NRGBA: + z.transform_RGBA_NRGBA_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + case *image.RGBA: + z.transform_RGBA_RGBA_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + default: + z.transform_RGBA_Image_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + } + default: + switch src := src.(type) { + default: + z.transform_Image_Image_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + } + } + case Src: + switch dst := dst.(type) { + case *image.RGBA: + switch src := src.(type) { + case *image.Gray: + z.transform_RGBA_Gray_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case *image.NRGBA: + z.transform_RGBA_NRGBA_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case *image.RGBA: + z.transform_RGBA_RGBA_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case *image.YCbCr: + switch src.SubsampleRatio { + default: + z.transform_RGBA_Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case image.YCbCrSubsampleRatio444: + z.transform_RGBA_YCbCr444_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case image.YCbCrSubsampleRatio422: + z.transform_RGBA_YCbCr422_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case image.YCbCrSubsampleRatio420: + z.transform_RGBA_YCbCr420_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case image.YCbCrSubsampleRatio440: + z.transform_RGBA_YCbCr440_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + } + default: + z.transform_RGBA_Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + } + default: + switch src := src.(type) { + default: + z.transform_Image_Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + } + } + } + } +} + +func (ablInterpolator) scale_RGBA_Gray_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.Gray, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.Stride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + s00ru := uint32(src.Pix[s00i]) * 0x101 + s00r := float64(s00ru) + s10i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.Stride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + s10ru := uint32(src.Pix[s10i]) * 0x101 + s10r := float64(s10ru) + s10r = xFrac1*s00r + xFrac0*s10r + s01i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.Stride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + s01ru := uint32(src.Pix[s01i]) * 0x101 + s01r := float64(s01ru) + s11i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.Stride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + s11ru := uint32(src.Pix[s11i]) * 0x101 + s11r := float64(s11ru) + s11r = xFrac1*s01r + xFrac0*s11r + s11r = yFrac1*s10r + yFrac0*s11r + pr := uint32(s11r) + out := uint8(pr >> 8) + dst.Pix[d+0] = out + dst.Pix[d+1] = out + dst.Pix[d+2] = out + dst.Pix[d+3] = 0xff + } + } +} + +func (ablInterpolator) scale_RGBA_NRGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, src *image.NRGBA, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx0)-src.Rect.Min.X)*4 + s00au := uint32(src.Pix[s00i+3]) * 0x101 + s00ru := uint32(src.Pix[s00i+0]) * s00au / 0xff + s00gu := uint32(src.Pix[s00i+1]) * s00au / 0xff + s00bu := uint32(src.Pix[s00i+2]) * s00au / 0xff + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx1)-src.Rect.Min.X)*4 + s10au := uint32(src.Pix[s10i+3]) * 0x101 + s10ru := uint32(src.Pix[s10i+0]) * s10au / 0xff + s10gu := uint32(src.Pix[s10i+1]) * s10au / 0xff + s10bu := uint32(src.Pix[s10i+2]) * s10au / 0xff + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx0)-src.Rect.Min.X)*4 + s01au := uint32(src.Pix[s01i+3]) * 0x101 + s01ru := uint32(src.Pix[s01i+0]) * s01au / 0xff + s01gu := uint32(src.Pix[s01i+1]) * s01au / 0xff + s01bu := uint32(src.Pix[s01i+2]) * s01au / 0xff + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx1)-src.Rect.Min.X)*4 + s11au := uint32(src.Pix[s11i+3]) * 0x101 + s11ru := uint32(src.Pix[s11i+0]) * s11au / 0xff + s11gu := uint32(src.Pix[s11i+1]) * s11au / 0xff + s11bu := uint32(src.Pix[s11i+2]) * s11au / 0xff + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (ablInterpolator) scale_RGBA_NRGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.NRGBA, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx0)-src.Rect.Min.X)*4 + s00au := uint32(src.Pix[s00i+3]) * 0x101 + s00ru := uint32(src.Pix[s00i+0]) * s00au / 0xff + s00gu := uint32(src.Pix[s00i+1]) * s00au / 0xff + s00bu := uint32(src.Pix[s00i+2]) * s00au / 0xff + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx1)-src.Rect.Min.X)*4 + s10au := uint32(src.Pix[s10i+3]) * 0x101 + s10ru := uint32(src.Pix[s10i+0]) * s10au / 0xff + s10gu := uint32(src.Pix[s10i+1]) * s10au / 0xff + s10bu := uint32(src.Pix[s10i+2]) * s10au / 0xff + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx0)-src.Rect.Min.X)*4 + s01au := uint32(src.Pix[s01i+3]) * 0x101 + s01ru := uint32(src.Pix[s01i+0]) * s01au / 0xff + s01gu := uint32(src.Pix[s01i+1]) * s01au / 0xff + s01bu := uint32(src.Pix[s01i+2]) * s01au / 0xff + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx1)-src.Rect.Min.X)*4 + s11au := uint32(src.Pix[s11i+3]) * 0x101 + s11ru := uint32(src.Pix[s11i+0]) * s11au / 0xff + s11gu := uint32(src.Pix[s11i+1]) * s11au / 0xff + s11bu := uint32(src.Pix[s11i+2]) * s11au / 0xff + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (ablInterpolator) scale_RGBA_RGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, src *image.RGBA, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx0)-src.Rect.Min.X)*4 + s00ru := uint32(src.Pix[s00i+0]) * 0x101 + s00gu := uint32(src.Pix[s00i+1]) * 0x101 + s00bu := uint32(src.Pix[s00i+2]) * 0x101 + s00au := uint32(src.Pix[s00i+3]) * 0x101 + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx1)-src.Rect.Min.X)*4 + s10ru := uint32(src.Pix[s10i+0]) * 0x101 + s10gu := uint32(src.Pix[s10i+1]) * 0x101 + s10bu := uint32(src.Pix[s10i+2]) * 0x101 + s10au := uint32(src.Pix[s10i+3]) * 0x101 + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx0)-src.Rect.Min.X)*4 + s01ru := uint32(src.Pix[s01i+0]) * 0x101 + s01gu := uint32(src.Pix[s01i+1]) * 0x101 + s01bu := uint32(src.Pix[s01i+2]) * 0x101 + s01au := uint32(src.Pix[s01i+3]) * 0x101 + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx1)-src.Rect.Min.X)*4 + s11ru := uint32(src.Pix[s11i+0]) * 0x101 + s11gu := uint32(src.Pix[s11i+1]) * 0x101 + s11bu := uint32(src.Pix[s11i+2]) * 0x101 + s11au := uint32(src.Pix[s11i+3]) * 0x101 + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (ablInterpolator) scale_RGBA_RGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.RGBA, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx0)-src.Rect.Min.X)*4 + s00ru := uint32(src.Pix[s00i+0]) * 0x101 + s00gu := uint32(src.Pix[s00i+1]) * 0x101 + s00bu := uint32(src.Pix[s00i+2]) * 0x101 + s00au := uint32(src.Pix[s00i+3]) * 0x101 + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx1)-src.Rect.Min.X)*4 + s10ru := uint32(src.Pix[s10i+0]) * 0x101 + s10gu := uint32(src.Pix[s10i+1]) * 0x101 + s10bu := uint32(src.Pix[s10i+2]) * 0x101 + s10au := uint32(src.Pix[s10i+3]) * 0x101 + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx0)-src.Rect.Min.X)*4 + s01ru := uint32(src.Pix[s01i+0]) * 0x101 + s01gu := uint32(src.Pix[s01i+1]) * 0x101 + s01bu := uint32(src.Pix[s01i+2]) * 0x101 + s01au := uint32(src.Pix[s01i+3]) * 0x101 + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx1)-src.Rect.Min.X)*4 + s11ru := uint32(src.Pix[s11i+0]) * 0x101 + s11gu := uint32(src.Pix[s11i+1]) * 0x101 + s11bu := uint32(src.Pix[s11i+2]) * 0x101 + s11au := uint32(src.Pix[s11i+3]) * 0x101 + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (ablInterpolator) scale_RGBA_YCbCr444_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.YCbCr, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + s00j := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.CStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s00yy1 := int(src.Y[s00i]) * 0x10100 + s00cb1 := int(src.Cb[s00j]) - 128 + s00cr1 := int(src.Cr[s00j]) - 128 + s00ru := (s00yy1 + 91881*s00cr1) >> 8 + s00gu := (s00yy1 - 22554*s00cb1 - 46802*s00cr1) >> 8 + s00bu := (s00yy1 + 116130*s00cb1) >> 8 + if s00ru < 0 { + s00ru = 0 + } else if s00ru > 0xffff { + s00ru = 0xffff + } + if s00gu < 0 { + s00gu = 0 + } else if s00gu > 0xffff { + s00gu = 0xffff + } + if s00bu < 0 { + s00bu = 0 + } else if s00bu > 0xffff { + s00bu = 0xffff + } + + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s10i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + s10j := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.CStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s10yy1 := int(src.Y[s10i]) * 0x10100 + s10cb1 := int(src.Cb[s10j]) - 128 + s10cr1 := int(src.Cr[s10j]) - 128 + s10ru := (s10yy1 + 91881*s10cr1) >> 8 + s10gu := (s10yy1 - 22554*s10cb1 - 46802*s10cr1) >> 8 + s10bu := (s10yy1 + 116130*s10cb1) >> 8 + if s10ru < 0 { + s10ru = 0 + } else if s10ru > 0xffff { + s10ru = 0xffff + } + if s10gu < 0 { + s10gu = 0 + } else if s10gu > 0xffff { + s10gu = 0xffff + } + if s10bu < 0 { + s10bu = 0 + } else if s10bu > 0xffff { + s10bu = 0xffff + } + + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s01i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + s01j := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.CStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s01yy1 := int(src.Y[s01i]) * 0x10100 + s01cb1 := int(src.Cb[s01j]) - 128 + s01cr1 := int(src.Cr[s01j]) - 128 + s01ru := (s01yy1 + 91881*s01cr1) >> 8 + s01gu := (s01yy1 - 22554*s01cb1 - 46802*s01cr1) >> 8 + s01bu := (s01yy1 + 116130*s01cb1) >> 8 + if s01ru < 0 { + s01ru = 0 + } else if s01ru > 0xffff { + s01ru = 0xffff + } + if s01gu < 0 { + s01gu = 0 + } else if s01gu > 0xffff { + s01gu = 0xffff + } + if s01bu < 0 { + s01bu = 0 + } else if s01bu > 0xffff { + s01bu = 0xffff + } + + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s11i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + s11j := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.CStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s11yy1 := int(src.Y[s11i]) * 0x10100 + s11cb1 := int(src.Cb[s11j]) - 128 + s11cr1 := int(src.Cr[s11j]) - 128 + s11ru := (s11yy1 + 91881*s11cr1) >> 8 + s11gu := (s11yy1 - 22554*s11cb1 - 46802*s11cr1) >> 8 + s11bu := (s11yy1 + 116130*s11cb1) >> 8 + if s11ru < 0 { + s11ru = 0 + } else if s11ru > 0xffff { + s11ru = 0xffff + } + if s11gu < 0 { + s11gu = 0 + } else if s11gu > 0xffff { + s11gu = 0xffff + } + if s11bu < 0 { + s11bu = 0 + } else if s11bu > 0xffff { + s11bu = 0xffff + } + + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (ablInterpolator) scale_RGBA_YCbCr422_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.YCbCr, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + s00j := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.CStride + ((sr.Min.X+int(sx0))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s00yy1 := int(src.Y[s00i]) * 0x10100 + s00cb1 := int(src.Cb[s00j]) - 128 + s00cr1 := int(src.Cr[s00j]) - 128 + s00ru := (s00yy1 + 91881*s00cr1) >> 8 + s00gu := (s00yy1 - 22554*s00cb1 - 46802*s00cr1) >> 8 + s00bu := (s00yy1 + 116130*s00cb1) >> 8 + if s00ru < 0 { + s00ru = 0 + } else if s00ru > 0xffff { + s00ru = 0xffff + } + if s00gu < 0 { + s00gu = 0 + } else if s00gu > 0xffff { + s00gu = 0xffff + } + if s00bu < 0 { + s00bu = 0 + } else if s00bu > 0xffff { + s00bu = 0xffff + } + + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s10i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + s10j := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.CStride + ((sr.Min.X+int(sx1))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s10yy1 := int(src.Y[s10i]) * 0x10100 + s10cb1 := int(src.Cb[s10j]) - 128 + s10cr1 := int(src.Cr[s10j]) - 128 + s10ru := (s10yy1 + 91881*s10cr1) >> 8 + s10gu := (s10yy1 - 22554*s10cb1 - 46802*s10cr1) >> 8 + s10bu := (s10yy1 + 116130*s10cb1) >> 8 + if s10ru < 0 { + s10ru = 0 + } else if s10ru > 0xffff { + s10ru = 0xffff + } + if s10gu < 0 { + s10gu = 0 + } else if s10gu > 0xffff { + s10gu = 0xffff + } + if s10bu < 0 { + s10bu = 0 + } else if s10bu > 0xffff { + s10bu = 0xffff + } + + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s01i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + s01j := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.CStride + ((sr.Min.X+int(sx0))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s01yy1 := int(src.Y[s01i]) * 0x10100 + s01cb1 := int(src.Cb[s01j]) - 128 + s01cr1 := int(src.Cr[s01j]) - 128 + s01ru := (s01yy1 + 91881*s01cr1) >> 8 + s01gu := (s01yy1 - 22554*s01cb1 - 46802*s01cr1) >> 8 + s01bu := (s01yy1 + 116130*s01cb1) >> 8 + if s01ru < 0 { + s01ru = 0 + } else if s01ru > 0xffff { + s01ru = 0xffff + } + if s01gu < 0 { + s01gu = 0 + } else if s01gu > 0xffff { + s01gu = 0xffff + } + if s01bu < 0 { + s01bu = 0 + } else if s01bu > 0xffff { + s01bu = 0xffff + } + + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s11i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + s11j := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.CStride + ((sr.Min.X+int(sx1))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s11yy1 := int(src.Y[s11i]) * 0x10100 + s11cb1 := int(src.Cb[s11j]) - 128 + s11cr1 := int(src.Cr[s11j]) - 128 + s11ru := (s11yy1 + 91881*s11cr1) >> 8 + s11gu := (s11yy1 - 22554*s11cb1 - 46802*s11cr1) >> 8 + s11bu := (s11yy1 + 116130*s11cb1) >> 8 + if s11ru < 0 { + s11ru = 0 + } else if s11ru > 0xffff { + s11ru = 0xffff + } + if s11gu < 0 { + s11gu = 0 + } else if s11gu > 0xffff { + s11gu = 0xffff + } + if s11bu < 0 { + s11bu = 0 + } else if s11bu > 0xffff { + s11bu = 0xffff + } + + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (ablInterpolator) scale_RGBA_YCbCr420_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.YCbCr, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + s00j := ((sr.Min.Y+int(sy0))/2-src.Rect.Min.Y/2)*src.CStride + ((sr.Min.X+int(sx0))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s00yy1 := int(src.Y[s00i]) * 0x10100 + s00cb1 := int(src.Cb[s00j]) - 128 + s00cr1 := int(src.Cr[s00j]) - 128 + s00ru := (s00yy1 + 91881*s00cr1) >> 8 + s00gu := (s00yy1 - 22554*s00cb1 - 46802*s00cr1) >> 8 + s00bu := (s00yy1 + 116130*s00cb1) >> 8 + if s00ru < 0 { + s00ru = 0 + } else if s00ru > 0xffff { + s00ru = 0xffff + } + if s00gu < 0 { + s00gu = 0 + } else if s00gu > 0xffff { + s00gu = 0xffff + } + if s00bu < 0 { + s00bu = 0 + } else if s00bu > 0xffff { + s00bu = 0xffff + } + + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s10i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + s10j := ((sr.Min.Y+int(sy0))/2-src.Rect.Min.Y/2)*src.CStride + ((sr.Min.X+int(sx1))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s10yy1 := int(src.Y[s10i]) * 0x10100 + s10cb1 := int(src.Cb[s10j]) - 128 + s10cr1 := int(src.Cr[s10j]) - 128 + s10ru := (s10yy1 + 91881*s10cr1) >> 8 + s10gu := (s10yy1 - 22554*s10cb1 - 46802*s10cr1) >> 8 + s10bu := (s10yy1 + 116130*s10cb1) >> 8 + if s10ru < 0 { + s10ru = 0 + } else if s10ru > 0xffff { + s10ru = 0xffff + } + if s10gu < 0 { + s10gu = 0 + } else if s10gu > 0xffff { + s10gu = 0xffff + } + if s10bu < 0 { + s10bu = 0 + } else if s10bu > 0xffff { + s10bu = 0xffff + } + + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s01i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + s01j := ((sr.Min.Y+int(sy1))/2-src.Rect.Min.Y/2)*src.CStride + ((sr.Min.X+int(sx0))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s01yy1 := int(src.Y[s01i]) * 0x10100 + s01cb1 := int(src.Cb[s01j]) - 128 + s01cr1 := int(src.Cr[s01j]) - 128 + s01ru := (s01yy1 + 91881*s01cr1) >> 8 + s01gu := (s01yy1 - 22554*s01cb1 - 46802*s01cr1) >> 8 + s01bu := (s01yy1 + 116130*s01cb1) >> 8 + if s01ru < 0 { + s01ru = 0 + } else if s01ru > 0xffff { + s01ru = 0xffff + } + if s01gu < 0 { + s01gu = 0 + } else if s01gu > 0xffff { + s01gu = 0xffff + } + if s01bu < 0 { + s01bu = 0 + } else if s01bu > 0xffff { + s01bu = 0xffff + } + + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s11i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + s11j := ((sr.Min.Y+int(sy1))/2-src.Rect.Min.Y/2)*src.CStride + ((sr.Min.X+int(sx1))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s11yy1 := int(src.Y[s11i]) * 0x10100 + s11cb1 := int(src.Cb[s11j]) - 128 + s11cr1 := int(src.Cr[s11j]) - 128 + s11ru := (s11yy1 + 91881*s11cr1) >> 8 + s11gu := (s11yy1 - 22554*s11cb1 - 46802*s11cr1) >> 8 + s11bu := (s11yy1 + 116130*s11cb1) >> 8 + if s11ru < 0 { + s11ru = 0 + } else if s11ru > 0xffff { + s11ru = 0xffff + } + if s11gu < 0 { + s11gu = 0 + } else if s11gu > 0xffff { + s11gu = 0xffff + } + if s11bu < 0 { + s11bu = 0 + } else if s11bu > 0xffff { + s11bu = 0xffff + } + + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (ablInterpolator) scale_RGBA_YCbCr440_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.YCbCr, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + s00j := ((sr.Min.Y+int(sy0))/2-src.Rect.Min.Y/2)*src.CStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s00yy1 := int(src.Y[s00i]) * 0x10100 + s00cb1 := int(src.Cb[s00j]) - 128 + s00cr1 := int(src.Cr[s00j]) - 128 + s00ru := (s00yy1 + 91881*s00cr1) >> 8 + s00gu := (s00yy1 - 22554*s00cb1 - 46802*s00cr1) >> 8 + s00bu := (s00yy1 + 116130*s00cb1) >> 8 + if s00ru < 0 { + s00ru = 0 + } else if s00ru > 0xffff { + s00ru = 0xffff + } + if s00gu < 0 { + s00gu = 0 + } else if s00gu > 0xffff { + s00gu = 0xffff + } + if s00bu < 0 { + s00bu = 0 + } else if s00bu > 0xffff { + s00bu = 0xffff + } + + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s10i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + s10j := ((sr.Min.Y+int(sy0))/2-src.Rect.Min.Y/2)*src.CStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s10yy1 := int(src.Y[s10i]) * 0x10100 + s10cb1 := int(src.Cb[s10j]) - 128 + s10cr1 := int(src.Cr[s10j]) - 128 + s10ru := (s10yy1 + 91881*s10cr1) >> 8 + s10gu := (s10yy1 - 22554*s10cb1 - 46802*s10cr1) >> 8 + s10bu := (s10yy1 + 116130*s10cb1) >> 8 + if s10ru < 0 { + s10ru = 0 + } else if s10ru > 0xffff { + s10ru = 0xffff + } + if s10gu < 0 { + s10gu = 0 + } else if s10gu > 0xffff { + s10gu = 0xffff + } + if s10bu < 0 { + s10bu = 0 + } else if s10bu > 0xffff { + s10bu = 0xffff + } + + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s01i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + s01j := ((sr.Min.Y+int(sy1))/2-src.Rect.Min.Y/2)*src.CStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s01yy1 := int(src.Y[s01i]) * 0x10100 + s01cb1 := int(src.Cb[s01j]) - 128 + s01cr1 := int(src.Cr[s01j]) - 128 + s01ru := (s01yy1 + 91881*s01cr1) >> 8 + s01gu := (s01yy1 - 22554*s01cb1 - 46802*s01cr1) >> 8 + s01bu := (s01yy1 + 116130*s01cb1) >> 8 + if s01ru < 0 { + s01ru = 0 + } else if s01ru > 0xffff { + s01ru = 0xffff + } + if s01gu < 0 { + s01gu = 0 + } else if s01gu > 0xffff { + s01gu = 0xffff + } + if s01bu < 0 { + s01bu = 0 + } else if s01bu > 0xffff { + s01bu = 0xffff + } + + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s11i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + s11j := ((sr.Min.Y+int(sy1))/2-src.Rect.Min.Y/2)*src.CStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s11yy1 := int(src.Y[s11i]) * 0x10100 + s11cb1 := int(src.Cb[s11j]) - 128 + s11cr1 := int(src.Cr[s11j]) - 128 + s11ru := (s11yy1 + 91881*s11cr1) >> 8 + s11gu := (s11yy1 - 22554*s11cb1 - 46802*s11cr1) >> 8 + s11bu := (s11yy1 + 116130*s11cb1) >> 8 + if s11ru < 0 { + s11ru = 0 + } else if s11ru > 0xffff { + s11ru = 0xffff + } + if s11gu < 0 { + s11gu = 0 + } else if s11gu > 0xffff { + s11gu = 0xffff + } + if s11bu < 0 { + s11bu = 0 + } else if s11bu > 0xffff { + s11bu = 0xffff + } + + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (ablInterpolator) scale_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00ru, s00gu, s00bu, s00au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy0)).RGBA() + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10ru, s10gu, s10bu, s10au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy0)).RGBA() + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01ru, s01gu, s01bu, s01au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy1)).RGBA() + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11ru, s11gu, s11bu, s11au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy1)).RGBA() + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (ablInterpolator) scale_RGBA_Image_Src(dst *image.RGBA, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00ru, s00gu, s00bu, s00au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy0)).RGBA() + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10ru, s10gu, s10bu, s10au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy0)).RGBA() + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01ru, s01gu, s01bu, s01au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy1)).RGBA() + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11ru, s11gu, s11bu, s11au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy1)).RGBA() + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (ablInterpolator) scale_Image_Image_Over(dst Image, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00ru, s00gu, s00bu, s00au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy0)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx0), smp.Y+sr.Min.Y+int(sy0)).RGBA() + s00ru = s00ru * ma / 0xffff + s00gu = s00gu * ma / 0xffff + s00bu = s00bu * ma / 0xffff + s00au = s00au * ma / 0xffff + } + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10ru, s10gu, s10bu, s10au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy0)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx1), smp.Y+sr.Min.Y+int(sy0)).RGBA() + s10ru = s10ru * ma / 0xffff + s10gu = s10gu * ma / 0xffff + s10bu = s10bu * ma / 0xffff + s10au = s10au * ma / 0xffff + } + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01ru, s01gu, s01bu, s01au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy1)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx0), smp.Y+sr.Min.Y+int(sy1)).RGBA() + s01ru = s01ru * ma / 0xffff + s01gu = s01gu * ma / 0xffff + s01bu = s01bu * ma / 0xffff + s01au = s01au * ma / 0xffff + } + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11ru, s11gu, s11bu, s11au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy1)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx1), smp.Y+sr.Min.Y+int(sy1)).RGBA() + s11ru = s11ru * ma / 0xffff + s11gu = s11gu * ma / 0xffff + s11bu = s11bu * ma / 0xffff + s11au = s11au * ma / 0xffff + } + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + if dstMask != nil { + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + } + pa1 := 0xffff - pa + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } +} + +func (ablInterpolator) scale_Image_Image_Src(dst Image, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00ru, s00gu, s00bu, s00au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy0)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx0), smp.Y+sr.Min.Y+int(sy0)).RGBA() + s00ru = s00ru * ma / 0xffff + s00gu = s00gu * ma / 0xffff + s00bu = s00bu * ma / 0xffff + s00au = s00au * ma / 0xffff + } + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10ru, s10gu, s10bu, s10au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy0)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx1), smp.Y+sr.Min.Y+int(sy0)).RGBA() + s10ru = s10ru * ma / 0xffff + s10gu = s10gu * ma / 0xffff + s10bu = s10bu * ma / 0xffff + s10au = s10au * ma / 0xffff + } + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01ru, s01gu, s01bu, s01au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy1)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx0), smp.Y+sr.Min.Y+int(sy1)).RGBA() + s01ru = s01ru * ma / 0xffff + s01gu = s01gu * ma / 0xffff + s01bu = s01bu * ma / 0xffff + s01au = s01au * ma / 0xffff + } + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11ru, s11gu, s11bu, s11au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy1)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx1), smp.Y+sr.Min.Y+int(sy1)).RGBA() + s11ru = s11ru * ma / 0xffff + s11gu = s11gu * ma / 0xffff + s11bu = s11bu * ma / 0xffff + s11au = s11au * ma / 0xffff + } + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + if dstMask != nil { + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + pa1 := 0xffff - ma + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } else { + dstColorRGBA64.R = uint16(pr) + dstColorRGBA64.G = uint16(pg) + dstColorRGBA64.B = uint16(pb) + dstColorRGBA64.A = uint16(pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } + } +} + +func (ablInterpolator) transform_RGBA_Gray_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.Gray, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00i := (sy0-src.Rect.Min.Y)*src.Stride + (sx0 - src.Rect.Min.X) + s00ru := uint32(src.Pix[s00i]) * 0x101 + s00r := float64(s00ru) + s10i := (sy0-src.Rect.Min.Y)*src.Stride + (sx1 - src.Rect.Min.X) + s10ru := uint32(src.Pix[s10i]) * 0x101 + s10r := float64(s10ru) + s10r = xFrac1*s00r + xFrac0*s10r + s01i := (sy1-src.Rect.Min.Y)*src.Stride + (sx0 - src.Rect.Min.X) + s01ru := uint32(src.Pix[s01i]) * 0x101 + s01r := float64(s01ru) + s11i := (sy1-src.Rect.Min.Y)*src.Stride + (sx1 - src.Rect.Min.X) + s11ru := uint32(src.Pix[s11i]) * 0x101 + s11r := float64(s11ru) + s11r = xFrac1*s01r + xFrac0*s11r + s11r = yFrac1*s10r + yFrac0*s11r + pr := uint32(s11r) + out := uint8(pr >> 8) + dst.Pix[d+0] = out + dst.Pix[d+1] = out + dst.Pix[d+2] = out + dst.Pix[d+3] = 0xff + } + } +} + +func (ablInterpolator) transform_RGBA_NRGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.NRGBA, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00i := (sy0-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + s00au := uint32(src.Pix[s00i+3]) * 0x101 + s00ru := uint32(src.Pix[s00i+0]) * s00au / 0xff + s00gu := uint32(src.Pix[s00i+1]) * s00au / 0xff + s00bu := uint32(src.Pix[s00i+2]) * s00au / 0xff + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10i := (sy0-src.Rect.Min.Y)*src.Stride + (sx1-src.Rect.Min.X)*4 + s10au := uint32(src.Pix[s10i+3]) * 0x101 + s10ru := uint32(src.Pix[s10i+0]) * s10au / 0xff + s10gu := uint32(src.Pix[s10i+1]) * s10au / 0xff + s10bu := uint32(src.Pix[s10i+2]) * s10au / 0xff + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01i := (sy1-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + s01au := uint32(src.Pix[s01i+3]) * 0x101 + s01ru := uint32(src.Pix[s01i+0]) * s01au / 0xff + s01gu := uint32(src.Pix[s01i+1]) * s01au / 0xff + s01bu := uint32(src.Pix[s01i+2]) * s01au / 0xff + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11i := (sy1-src.Rect.Min.Y)*src.Stride + (sx1-src.Rect.Min.X)*4 + s11au := uint32(src.Pix[s11i+3]) * 0x101 + s11ru := uint32(src.Pix[s11i+0]) * s11au / 0xff + s11gu := uint32(src.Pix[s11i+1]) * s11au / 0xff + s11bu := uint32(src.Pix[s11i+2]) * s11au / 0xff + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (ablInterpolator) transform_RGBA_NRGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.NRGBA, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00i := (sy0-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + s00au := uint32(src.Pix[s00i+3]) * 0x101 + s00ru := uint32(src.Pix[s00i+0]) * s00au / 0xff + s00gu := uint32(src.Pix[s00i+1]) * s00au / 0xff + s00bu := uint32(src.Pix[s00i+2]) * s00au / 0xff + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10i := (sy0-src.Rect.Min.Y)*src.Stride + (sx1-src.Rect.Min.X)*4 + s10au := uint32(src.Pix[s10i+3]) * 0x101 + s10ru := uint32(src.Pix[s10i+0]) * s10au / 0xff + s10gu := uint32(src.Pix[s10i+1]) * s10au / 0xff + s10bu := uint32(src.Pix[s10i+2]) * s10au / 0xff + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01i := (sy1-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + s01au := uint32(src.Pix[s01i+3]) * 0x101 + s01ru := uint32(src.Pix[s01i+0]) * s01au / 0xff + s01gu := uint32(src.Pix[s01i+1]) * s01au / 0xff + s01bu := uint32(src.Pix[s01i+2]) * s01au / 0xff + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11i := (sy1-src.Rect.Min.Y)*src.Stride + (sx1-src.Rect.Min.X)*4 + s11au := uint32(src.Pix[s11i+3]) * 0x101 + s11ru := uint32(src.Pix[s11i+0]) * s11au / 0xff + s11gu := uint32(src.Pix[s11i+1]) * s11au / 0xff + s11bu := uint32(src.Pix[s11i+2]) * s11au / 0xff + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (ablInterpolator) transform_RGBA_RGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.RGBA, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00i := (sy0-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + s00ru := uint32(src.Pix[s00i+0]) * 0x101 + s00gu := uint32(src.Pix[s00i+1]) * 0x101 + s00bu := uint32(src.Pix[s00i+2]) * 0x101 + s00au := uint32(src.Pix[s00i+3]) * 0x101 + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10i := (sy0-src.Rect.Min.Y)*src.Stride + (sx1-src.Rect.Min.X)*4 + s10ru := uint32(src.Pix[s10i+0]) * 0x101 + s10gu := uint32(src.Pix[s10i+1]) * 0x101 + s10bu := uint32(src.Pix[s10i+2]) * 0x101 + s10au := uint32(src.Pix[s10i+3]) * 0x101 + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01i := (sy1-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + s01ru := uint32(src.Pix[s01i+0]) * 0x101 + s01gu := uint32(src.Pix[s01i+1]) * 0x101 + s01bu := uint32(src.Pix[s01i+2]) * 0x101 + s01au := uint32(src.Pix[s01i+3]) * 0x101 + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11i := (sy1-src.Rect.Min.Y)*src.Stride + (sx1-src.Rect.Min.X)*4 + s11ru := uint32(src.Pix[s11i+0]) * 0x101 + s11gu := uint32(src.Pix[s11i+1]) * 0x101 + s11bu := uint32(src.Pix[s11i+2]) * 0x101 + s11au := uint32(src.Pix[s11i+3]) * 0x101 + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (ablInterpolator) transform_RGBA_RGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.RGBA, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00i := (sy0-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + s00ru := uint32(src.Pix[s00i+0]) * 0x101 + s00gu := uint32(src.Pix[s00i+1]) * 0x101 + s00bu := uint32(src.Pix[s00i+2]) * 0x101 + s00au := uint32(src.Pix[s00i+3]) * 0x101 + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10i := (sy0-src.Rect.Min.Y)*src.Stride + (sx1-src.Rect.Min.X)*4 + s10ru := uint32(src.Pix[s10i+0]) * 0x101 + s10gu := uint32(src.Pix[s10i+1]) * 0x101 + s10bu := uint32(src.Pix[s10i+2]) * 0x101 + s10au := uint32(src.Pix[s10i+3]) * 0x101 + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01i := (sy1-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + s01ru := uint32(src.Pix[s01i+0]) * 0x101 + s01gu := uint32(src.Pix[s01i+1]) * 0x101 + s01bu := uint32(src.Pix[s01i+2]) * 0x101 + s01au := uint32(src.Pix[s01i+3]) * 0x101 + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11i := (sy1-src.Rect.Min.Y)*src.Stride + (sx1-src.Rect.Min.X)*4 + s11ru := uint32(src.Pix[s11i+0]) * 0x101 + s11gu := uint32(src.Pix[s11i+1]) * 0x101 + s11bu := uint32(src.Pix[s11i+2]) * 0x101 + s11au := uint32(src.Pix[s11i+3]) * 0x101 + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (ablInterpolator) transform_RGBA_YCbCr444_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00i := (sy0-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + s00j := (sy0-src.Rect.Min.Y)*src.CStride + (sx0 - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s00yy1 := int(src.Y[s00i]) * 0x10100 + s00cb1 := int(src.Cb[s00j]) - 128 + s00cr1 := int(src.Cr[s00j]) - 128 + s00ru := (s00yy1 + 91881*s00cr1) >> 8 + s00gu := (s00yy1 - 22554*s00cb1 - 46802*s00cr1) >> 8 + s00bu := (s00yy1 + 116130*s00cb1) >> 8 + if s00ru < 0 { + s00ru = 0 + } else if s00ru > 0xffff { + s00ru = 0xffff + } + if s00gu < 0 { + s00gu = 0 + } else if s00gu > 0xffff { + s00gu = 0xffff + } + if s00bu < 0 { + s00bu = 0 + } else if s00bu > 0xffff { + s00bu = 0xffff + } + + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s10i := (sy0-src.Rect.Min.Y)*src.YStride + (sx1 - src.Rect.Min.X) + s10j := (sy0-src.Rect.Min.Y)*src.CStride + (sx1 - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s10yy1 := int(src.Y[s10i]) * 0x10100 + s10cb1 := int(src.Cb[s10j]) - 128 + s10cr1 := int(src.Cr[s10j]) - 128 + s10ru := (s10yy1 + 91881*s10cr1) >> 8 + s10gu := (s10yy1 - 22554*s10cb1 - 46802*s10cr1) >> 8 + s10bu := (s10yy1 + 116130*s10cb1) >> 8 + if s10ru < 0 { + s10ru = 0 + } else if s10ru > 0xffff { + s10ru = 0xffff + } + if s10gu < 0 { + s10gu = 0 + } else if s10gu > 0xffff { + s10gu = 0xffff + } + if s10bu < 0 { + s10bu = 0 + } else if s10bu > 0xffff { + s10bu = 0xffff + } + + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s01i := (sy1-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + s01j := (sy1-src.Rect.Min.Y)*src.CStride + (sx0 - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s01yy1 := int(src.Y[s01i]) * 0x10100 + s01cb1 := int(src.Cb[s01j]) - 128 + s01cr1 := int(src.Cr[s01j]) - 128 + s01ru := (s01yy1 + 91881*s01cr1) >> 8 + s01gu := (s01yy1 - 22554*s01cb1 - 46802*s01cr1) >> 8 + s01bu := (s01yy1 + 116130*s01cb1) >> 8 + if s01ru < 0 { + s01ru = 0 + } else if s01ru > 0xffff { + s01ru = 0xffff + } + if s01gu < 0 { + s01gu = 0 + } else if s01gu > 0xffff { + s01gu = 0xffff + } + if s01bu < 0 { + s01bu = 0 + } else if s01bu > 0xffff { + s01bu = 0xffff + } + + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s11i := (sy1-src.Rect.Min.Y)*src.YStride + (sx1 - src.Rect.Min.X) + s11j := (sy1-src.Rect.Min.Y)*src.CStride + (sx1 - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s11yy1 := int(src.Y[s11i]) * 0x10100 + s11cb1 := int(src.Cb[s11j]) - 128 + s11cr1 := int(src.Cr[s11j]) - 128 + s11ru := (s11yy1 + 91881*s11cr1) >> 8 + s11gu := (s11yy1 - 22554*s11cb1 - 46802*s11cr1) >> 8 + s11bu := (s11yy1 + 116130*s11cb1) >> 8 + if s11ru < 0 { + s11ru = 0 + } else if s11ru > 0xffff { + s11ru = 0xffff + } + if s11gu < 0 { + s11gu = 0 + } else if s11gu > 0xffff { + s11gu = 0xffff + } + if s11bu < 0 { + s11bu = 0 + } else if s11bu > 0xffff { + s11bu = 0xffff + } + + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (ablInterpolator) transform_RGBA_YCbCr422_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00i := (sy0-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + s00j := (sy0-src.Rect.Min.Y)*src.CStride + ((sx0)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s00yy1 := int(src.Y[s00i]) * 0x10100 + s00cb1 := int(src.Cb[s00j]) - 128 + s00cr1 := int(src.Cr[s00j]) - 128 + s00ru := (s00yy1 + 91881*s00cr1) >> 8 + s00gu := (s00yy1 - 22554*s00cb1 - 46802*s00cr1) >> 8 + s00bu := (s00yy1 + 116130*s00cb1) >> 8 + if s00ru < 0 { + s00ru = 0 + } else if s00ru > 0xffff { + s00ru = 0xffff + } + if s00gu < 0 { + s00gu = 0 + } else if s00gu > 0xffff { + s00gu = 0xffff + } + if s00bu < 0 { + s00bu = 0 + } else if s00bu > 0xffff { + s00bu = 0xffff + } + + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s10i := (sy0-src.Rect.Min.Y)*src.YStride + (sx1 - src.Rect.Min.X) + s10j := (sy0-src.Rect.Min.Y)*src.CStride + ((sx1)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s10yy1 := int(src.Y[s10i]) * 0x10100 + s10cb1 := int(src.Cb[s10j]) - 128 + s10cr1 := int(src.Cr[s10j]) - 128 + s10ru := (s10yy1 + 91881*s10cr1) >> 8 + s10gu := (s10yy1 - 22554*s10cb1 - 46802*s10cr1) >> 8 + s10bu := (s10yy1 + 116130*s10cb1) >> 8 + if s10ru < 0 { + s10ru = 0 + } else if s10ru > 0xffff { + s10ru = 0xffff + } + if s10gu < 0 { + s10gu = 0 + } else if s10gu > 0xffff { + s10gu = 0xffff + } + if s10bu < 0 { + s10bu = 0 + } else if s10bu > 0xffff { + s10bu = 0xffff + } + + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s01i := (sy1-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + s01j := (sy1-src.Rect.Min.Y)*src.CStride + ((sx0)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s01yy1 := int(src.Y[s01i]) * 0x10100 + s01cb1 := int(src.Cb[s01j]) - 128 + s01cr1 := int(src.Cr[s01j]) - 128 + s01ru := (s01yy1 + 91881*s01cr1) >> 8 + s01gu := (s01yy1 - 22554*s01cb1 - 46802*s01cr1) >> 8 + s01bu := (s01yy1 + 116130*s01cb1) >> 8 + if s01ru < 0 { + s01ru = 0 + } else if s01ru > 0xffff { + s01ru = 0xffff + } + if s01gu < 0 { + s01gu = 0 + } else if s01gu > 0xffff { + s01gu = 0xffff + } + if s01bu < 0 { + s01bu = 0 + } else if s01bu > 0xffff { + s01bu = 0xffff + } + + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s11i := (sy1-src.Rect.Min.Y)*src.YStride + (sx1 - src.Rect.Min.X) + s11j := (sy1-src.Rect.Min.Y)*src.CStride + ((sx1)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s11yy1 := int(src.Y[s11i]) * 0x10100 + s11cb1 := int(src.Cb[s11j]) - 128 + s11cr1 := int(src.Cr[s11j]) - 128 + s11ru := (s11yy1 + 91881*s11cr1) >> 8 + s11gu := (s11yy1 - 22554*s11cb1 - 46802*s11cr1) >> 8 + s11bu := (s11yy1 + 116130*s11cb1) >> 8 + if s11ru < 0 { + s11ru = 0 + } else if s11ru > 0xffff { + s11ru = 0xffff + } + if s11gu < 0 { + s11gu = 0 + } else if s11gu > 0xffff { + s11gu = 0xffff + } + if s11bu < 0 { + s11bu = 0 + } else if s11bu > 0xffff { + s11bu = 0xffff + } + + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (ablInterpolator) transform_RGBA_YCbCr420_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00i := (sy0-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + s00j := ((sy0)/2-src.Rect.Min.Y/2)*src.CStride + ((sx0)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s00yy1 := int(src.Y[s00i]) * 0x10100 + s00cb1 := int(src.Cb[s00j]) - 128 + s00cr1 := int(src.Cr[s00j]) - 128 + s00ru := (s00yy1 + 91881*s00cr1) >> 8 + s00gu := (s00yy1 - 22554*s00cb1 - 46802*s00cr1) >> 8 + s00bu := (s00yy1 + 116130*s00cb1) >> 8 + if s00ru < 0 { + s00ru = 0 + } else if s00ru > 0xffff { + s00ru = 0xffff + } + if s00gu < 0 { + s00gu = 0 + } else if s00gu > 0xffff { + s00gu = 0xffff + } + if s00bu < 0 { + s00bu = 0 + } else if s00bu > 0xffff { + s00bu = 0xffff + } + + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s10i := (sy0-src.Rect.Min.Y)*src.YStride + (sx1 - src.Rect.Min.X) + s10j := ((sy0)/2-src.Rect.Min.Y/2)*src.CStride + ((sx1)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s10yy1 := int(src.Y[s10i]) * 0x10100 + s10cb1 := int(src.Cb[s10j]) - 128 + s10cr1 := int(src.Cr[s10j]) - 128 + s10ru := (s10yy1 + 91881*s10cr1) >> 8 + s10gu := (s10yy1 - 22554*s10cb1 - 46802*s10cr1) >> 8 + s10bu := (s10yy1 + 116130*s10cb1) >> 8 + if s10ru < 0 { + s10ru = 0 + } else if s10ru > 0xffff { + s10ru = 0xffff + } + if s10gu < 0 { + s10gu = 0 + } else if s10gu > 0xffff { + s10gu = 0xffff + } + if s10bu < 0 { + s10bu = 0 + } else if s10bu > 0xffff { + s10bu = 0xffff + } + + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s01i := (sy1-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + s01j := ((sy1)/2-src.Rect.Min.Y/2)*src.CStride + ((sx0)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s01yy1 := int(src.Y[s01i]) * 0x10100 + s01cb1 := int(src.Cb[s01j]) - 128 + s01cr1 := int(src.Cr[s01j]) - 128 + s01ru := (s01yy1 + 91881*s01cr1) >> 8 + s01gu := (s01yy1 - 22554*s01cb1 - 46802*s01cr1) >> 8 + s01bu := (s01yy1 + 116130*s01cb1) >> 8 + if s01ru < 0 { + s01ru = 0 + } else if s01ru > 0xffff { + s01ru = 0xffff + } + if s01gu < 0 { + s01gu = 0 + } else if s01gu > 0xffff { + s01gu = 0xffff + } + if s01bu < 0 { + s01bu = 0 + } else if s01bu > 0xffff { + s01bu = 0xffff + } + + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s11i := (sy1-src.Rect.Min.Y)*src.YStride + (sx1 - src.Rect.Min.X) + s11j := ((sy1)/2-src.Rect.Min.Y/2)*src.CStride + ((sx1)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s11yy1 := int(src.Y[s11i]) * 0x10100 + s11cb1 := int(src.Cb[s11j]) - 128 + s11cr1 := int(src.Cr[s11j]) - 128 + s11ru := (s11yy1 + 91881*s11cr1) >> 8 + s11gu := (s11yy1 - 22554*s11cb1 - 46802*s11cr1) >> 8 + s11bu := (s11yy1 + 116130*s11cb1) >> 8 + if s11ru < 0 { + s11ru = 0 + } else if s11ru > 0xffff { + s11ru = 0xffff + } + if s11gu < 0 { + s11gu = 0 + } else if s11gu > 0xffff { + s11gu = 0xffff + } + if s11bu < 0 { + s11bu = 0 + } else if s11bu > 0xffff { + s11bu = 0xffff + } + + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (ablInterpolator) transform_RGBA_YCbCr440_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00i := (sy0-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + s00j := ((sy0)/2-src.Rect.Min.Y/2)*src.CStride + (sx0 - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s00yy1 := int(src.Y[s00i]) * 0x10100 + s00cb1 := int(src.Cb[s00j]) - 128 + s00cr1 := int(src.Cr[s00j]) - 128 + s00ru := (s00yy1 + 91881*s00cr1) >> 8 + s00gu := (s00yy1 - 22554*s00cb1 - 46802*s00cr1) >> 8 + s00bu := (s00yy1 + 116130*s00cb1) >> 8 + if s00ru < 0 { + s00ru = 0 + } else if s00ru > 0xffff { + s00ru = 0xffff + } + if s00gu < 0 { + s00gu = 0 + } else if s00gu > 0xffff { + s00gu = 0xffff + } + if s00bu < 0 { + s00bu = 0 + } else if s00bu > 0xffff { + s00bu = 0xffff + } + + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s10i := (sy0-src.Rect.Min.Y)*src.YStride + (sx1 - src.Rect.Min.X) + s10j := ((sy0)/2-src.Rect.Min.Y/2)*src.CStride + (sx1 - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s10yy1 := int(src.Y[s10i]) * 0x10100 + s10cb1 := int(src.Cb[s10j]) - 128 + s10cr1 := int(src.Cr[s10j]) - 128 + s10ru := (s10yy1 + 91881*s10cr1) >> 8 + s10gu := (s10yy1 - 22554*s10cb1 - 46802*s10cr1) >> 8 + s10bu := (s10yy1 + 116130*s10cb1) >> 8 + if s10ru < 0 { + s10ru = 0 + } else if s10ru > 0xffff { + s10ru = 0xffff + } + if s10gu < 0 { + s10gu = 0 + } else if s10gu > 0xffff { + s10gu = 0xffff + } + if s10bu < 0 { + s10bu = 0 + } else if s10bu > 0xffff { + s10bu = 0xffff + } + + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s01i := (sy1-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + s01j := ((sy1)/2-src.Rect.Min.Y/2)*src.CStride + (sx0 - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s01yy1 := int(src.Y[s01i]) * 0x10100 + s01cb1 := int(src.Cb[s01j]) - 128 + s01cr1 := int(src.Cr[s01j]) - 128 + s01ru := (s01yy1 + 91881*s01cr1) >> 8 + s01gu := (s01yy1 - 22554*s01cb1 - 46802*s01cr1) >> 8 + s01bu := (s01yy1 + 116130*s01cb1) >> 8 + if s01ru < 0 { + s01ru = 0 + } else if s01ru > 0xffff { + s01ru = 0xffff + } + if s01gu < 0 { + s01gu = 0 + } else if s01gu > 0xffff { + s01gu = 0xffff + } + if s01bu < 0 { + s01bu = 0 + } else if s01bu > 0xffff { + s01bu = 0xffff + } + + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s11i := (sy1-src.Rect.Min.Y)*src.YStride + (sx1 - src.Rect.Min.X) + s11j := ((sy1)/2-src.Rect.Min.Y/2)*src.CStride + (sx1 - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s11yy1 := int(src.Y[s11i]) * 0x10100 + s11cb1 := int(src.Cb[s11j]) - 128 + s11cr1 := int(src.Cr[s11j]) - 128 + s11ru := (s11yy1 + 91881*s11cr1) >> 8 + s11gu := (s11yy1 - 22554*s11cb1 - 46802*s11cr1) >> 8 + s11bu := (s11yy1 + 116130*s11cb1) >> 8 + if s11ru < 0 { + s11ru = 0 + } else if s11ru > 0xffff { + s11ru = 0xffff + } + if s11gu < 0 { + s11gu = 0 + } else if s11gu > 0xffff { + s11gu = 0xffff + } + if s11bu < 0 { + s11bu = 0 + } else if s11bu > 0xffff { + s11bu = 0xffff + } + + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (ablInterpolator) transform_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00ru, s00gu, s00bu, s00au := src.At(sx0, sy0).RGBA() + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10ru, s10gu, s10bu, s10au := src.At(sx1, sy0).RGBA() + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01ru, s01gu, s01bu, s01au := src.At(sx0, sy1).RGBA() + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11ru, s11gu, s11bu, s11au := src.At(sx1, sy1).RGBA() + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (ablInterpolator) transform_RGBA_Image_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00ru, s00gu, s00bu, s00au := src.At(sx0, sy0).RGBA() + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10ru, s10gu, s10bu, s10au := src.At(sx1, sy0).RGBA() + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01ru, s01gu, s01bu, s01au := src.At(sx0, sy1).RGBA() + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11ru, s11gu, s11bu, s11au := src.At(sx1, sy1).RGBA() + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (ablInterpolator) transform_Image_Image_Over(dst Image, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, opts *Options) { + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00ru, s00gu, s00bu, s00au := src.At(sx0, sy0).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx0, smp.Y+sy0).RGBA() + s00ru = s00ru * ma / 0xffff + s00gu = s00gu * ma / 0xffff + s00bu = s00bu * ma / 0xffff + s00au = s00au * ma / 0xffff + } + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10ru, s10gu, s10bu, s10au := src.At(sx1, sy0).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx1, smp.Y+sy0).RGBA() + s10ru = s10ru * ma / 0xffff + s10gu = s10gu * ma / 0xffff + s10bu = s10bu * ma / 0xffff + s10au = s10au * ma / 0xffff + } + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01ru, s01gu, s01bu, s01au := src.At(sx0, sy1).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx0, smp.Y+sy1).RGBA() + s01ru = s01ru * ma / 0xffff + s01gu = s01gu * ma / 0xffff + s01bu = s01bu * ma / 0xffff + s01au = s01au * ma / 0xffff + } + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11ru, s11gu, s11bu, s11au := src.At(sx1, sy1).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx1, smp.Y+sy1).RGBA() + s11ru = s11ru * ma / 0xffff + s11gu = s11gu * ma / 0xffff + s11bu = s11bu * ma / 0xffff + s11au = s11au * ma / 0xffff + } + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + if dstMask != nil { + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + } + pa1 := 0xffff - pa + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } +} + +func (ablInterpolator) transform_Image_Image_Src(dst Image, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, opts *Options) { + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00ru, s00gu, s00bu, s00au := src.At(sx0, sy0).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx0, smp.Y+sy0).RGBA() + s00ru = s00ru * ma / 0xffff + s00gu = s00gu * ma / 0xffff + s00bu = s00bu * ma / 0xffff + s00au = s00au * ma / 0xffff + } + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10ru, s10gu, s10bu, s10au := src.At(sx1, sy0).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx1, smp.Y+sy0).RGBA() + s10ru = s10ru * ma / 0xffff + s10gu = s10gu * ma / 0xffff + s10bu = s10bu * ma / 0xffff + s10au = s10au * ma / 0xffff + } + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01ru, s01gu, s01bu, s01au := src.At(sx0, sy1).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx0, smp.Y+sy1).RGBA() + s01ru = s01ru * ma / 0xffff + s01gu = s01gu * ma / 0xffff + s01bu = s01bu * ma / 0xffff + s01au = s01au * ma / 0xffff + } + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11ru, s11gu, s11bu, s11au := src.At(sx1, sy1).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx1, smp.Y+sy1).RGBA() + s11ru = s11ru * ma / 0xffff + s11gu = s11gu * ma / 0xffff + s11bu = s11bu * ma / 0xffff + s11au = s11au * ma / 0xffff + } + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + if dstMask != nil { + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + pa1 := 0xffff - ma + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } else { + dstColorRGBA64.R = uint16(pr) + dstColorRGBA64.G = uint16(pg) + dstColorRGBA64.B = uint16(pb) + dstColorRGBA64.A = uint16(pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } + } +} + +func (z *kernelScaler) Scale(dst Image, dr image.Rectangle, src image.Image, sr image.Rectangle, op Op, opts *Options) { + if z.dw != int32(dr.Dx()) || z.dh != int32(dr.Dy()) || z.sw != int32(sr.Dx()) || z.sh != int32(sr.Dy()) { + z.kernel.Scale(dst, dr, src, sr, op, opts) + return + } + + var o Options + if opts != nil { + o = *opts + } + + // adr is the affected destination pixels. + adr := dst.Bounds().Intersect(dr) + adr, o.DstMask = clipAffectedDestRect(adr, o.DstMask, o.DstMaskP) + if adr.Empty() || sr.Empty() { + return + } + // Make adr relative to dr.Min. + adr = adr.Sub(dr.Min) + if op == Over && o.SrcMask == nil && opaque(src) { + op = Src + } + + if _, ok := src.(*image.Uniform); ok && o.DstMask == nil && o.SrcMask == nil && sr.In(src.Bounds()) { + Draw(dst, dr, src, src.Bounds().Min, op) + return + } + + // Create a temporary buffer: + // scaleX distributes the source image's columns over the temporary image. + // scaleY distributes the temporary image's rows over the destination image. + var tmp [][4]float64 + if z.pool.New != nil { + tmpp := z.pool.Get().(*[][4]float64) + defer z.pool.Put(tmpp) + tmp = *tmpp + } else { + tmp = z.makeTmpBuf() + } + + // sr is the source pixels. If it extends beyond the src bounds, + // we cannot use the type-specific fast paths, as they access + // the Pix fields directly without bounds checking. + // + // Similarly, the fast paths assume that the masks are nil. + if o.SrcMask != nil || !sr.In(src.Bounds()) { + z.scaleX_Image(tmp, src, sr, &o) + } else { + switch src := src.(type) { + case *image.Gray: + z.scaleX_Gray(tmp, src, sr, &o) + case *image.NRGBA: + z.scaleX_NRGBA(tmp, src, sr, &o) + case *image.RGBA: + z.scaleX_RGBA(tmp, src, sr, &o) + case *image.YCbCr: + switch src.SubsampleRatio { + default: + z.scaleX_Image(tmp, src, sr, &o) + case image.YCbCrSubsampleRatio444: + z.scaleX_YCbCr444(tmp, src, sr, &o) + case image.YCbCrSubsampleRatio422: + z.scaleX_YCbCr422(tmp, src, sr, &o) + case image.YCbCrSubsampleRatio420: + z.scaleX_YCbCr420(tmp, src, sr, &o) + case image.YCbCrSubsampleRatio440: + z.scaleX_YCbCr440(tmp, src, sr, &o) + } + default: + z.scaleX_Image(tmp, src, sr, &o) + } + } + + if o.DstMask != nil { + switch op { + case Over: + z.scaleY_Image_Over(dst, dr, adr, tmp, &o) + case Src: + z.scaleY_Image_Src(dst, dr, adr, tmp, &o) + } + } else { + switch op { + case Over: + switch dst := dst.(type) { + case *image.RGBA: + z.scaleY_RGBA_Over(dst, dr, adr, tmp, &o) + default: + z.scaleY_Image_Over(dst, dr, adr, tmp, &o) + } + case Src: + switch dst := dst.(type) { + case *image.RGBA: + z.scaleY_RGBA_Src(dst, dr, adr, tmp, &o) + default: + z.scaleY_Image_Src(dst, dr, adr, tmp, &o) + } + } + } +} + +func (q *Kernel) Transform(dst Image, s2d f64.Aff3, src image.Image, sr image.Rectangle, op Op, opts *Options) { + var o Options + if opts != nil { + o = *opts + } + + dr := transformRect(&s2d, &sr) + // adr is the affected destination pixels. + adr := dst.Bounds().Intersect(dr) + adr, o.DstMask = clipAffectedDestRect(adr, o.DstMask, o.DstMaskP) + if adr.Empty() || sr.Empty() { + return + } + if op == Over && o.SrcMask == nil && opaque(src) { + op = Src + } + d2s := invert(&s2d) + // bias is a translation of the mapping from dst coordinates to src + // coordinates such that the latter temporarily have non-negative X + // and Y coordinates. This allows us to write int(f) instead of + // int(math.Floor(f)), since "round to zero" and "round down" are + // equivalent when f >= 0, but the former is much cheaper. The X-- + // and Y-- are because the TransformLeaf methods have a "sx -= 0.5" + // adjustment. + bias := transformRect(&d2s, &adr).Min + bias.X-- + bias.Y-- + d2s[2] -= float64(bias.X) + d2s[5] -= float64(bias.Y) + // Make adr relative to dr.Min. + adr = adr.Sub(dr.Min) + + if u, ok := src.(*image.Uniform); ok && o.DstMask != nil && o.SrcMask != nil && sr.In(src.Bounds()) { + transform_Uniform(dst, dr, adr, &d2s, u, sr, bias, op) + return + } + + xscale := abs(d2s[0]) + if s := abs(d2s[1]); xscale < s { + xscale = s + } + yscale := abs(d2s[3]) + if s := abs(d2s[4]); yscale < s { + yscale = s + } + + // sr is the source pixels. If it extends beyond the src bounds, + // we cannot use the type-specific fast paths, as they access + // the Pix fields directly without bounds checking. + // + // Similarly, the fast paths assume that the masks are nil. + if o.DstMask != nil || o.SrcMask != nil || !sr.In(src.Bounds()) { + switch op { + case Over: + q.transform_Image_Image_Over(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + case Src: + q.transform_Image_Image_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + } + } else { + switch op { + case Over: + switch dst := dst.(type) { + case *image.RGBA: + switch src := src.(type) { + case *image.NRGBA: + q.transform_RGBA_NRGBA_Over(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + case *image.RGBA: + q.transform_RGBA_RGBA_Over(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + default: + q.transform_RGBA_Image_Over(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + } + default: + switch src := src.(type) { + default: + q.transform_Image_Image_Over(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + } + } + case Src: + switch dst := dst.(type) { + case *image.RGBA: + switch src := src.(type) { + case *image.Gray: + q.transform_RGBA_Gray_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + case *image.NRGBA: + q.transform_RGBA_NRGBA_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + case *image.RGBA: + q.transform_RGBA_RGBA_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + case *image.YCbCr: + switch src.SubsampleRatio { + default: + q.transform_RGBA_Image_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + case image.YCbCrSubsampleRatio444: + q.transform_RGBA_YCbCr444_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + case image.YCbCrSubsampleRatio422: + q.transform_RGBA_YCbCr422_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + case image.YCbCrSubsampleRatio420: + q.transform_RGBA_YCbCr420_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + case image.YCbCrSubsampleRatio440: + q.transform_RGBA_YCbCr440_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + } + default: + q.transform_RGBA_Image_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + } + default: + switch src := src.(type) { + default: + q.transform_Image_Image_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + } + } + } + } +} + +func (z *kernelScaler) scaleX_Gray(tmp [][4]float64, src *image.Gray, sr image.Rectangle, opts *Options) { + t := 0 + for y := int32(0); y < z.sh; y++ { + for _, s := range z.horizontal.sources { + var pr float64 + for _, c := range z.horizontal.contribs[s.i:s.j] { + pi := (sr.Min.Y+int(y)-src.Rect.Min.Y)*src.Stride + (sr.Min.X + int(c.coord) - src.Rect.Min.X) + pru := uint32(src.Pix[pi]) * 0x101 + pr += float64(pru) * c.weight + } + pr *= s.invTotalWeightFFFF + tmp[t] = [4]float64{ + pr, + pr, + pr, + 1, + } + t++ + } + } +} + +func (z *kernelScaler) scaleX_NRGBA(tmp [][4]float64, src *image.NRGBA, sr image.Rectangle, opts *Options) { + t := 0 + for y := int32(0); y < z.sh; y++ { + for _, s := range z.horizontal.sources { + var pr, pg, pb, pa float64 + for _, c := range z.horizontal.contribs[s.i:s.j] { + pi := (sr.Min.Y+int(y)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(c.coord)-src.Rect.Min.X)*4 + pau := uint32(src.Pix[pi+3]) * 0x101 + pru := uint32(src.Pix[pi+0]) * pau / 0xff + pgu := uint32(src.Pix[pi+1]) * pau / 0xff + pbu := uint32(src.Pix[pi+2]) * pau / 0xff + pr += float64(pru) * c.weight + pg += float64(pgu) * c.weight + pb += float64(pbu) * c.weight + pa += float64(pau) * c.weight + } + tmp[t] = [4]float64{ + pr * s.invTotalWeightFFFF, + pg * s.invTotalWeightFFFF, + pb * s.invTotalWeightFFFF, + pa * s.invTotalWeightFFFF, + } + t++ + } + } +} + +func (z *kernelScaler) scaleX_RGBA(tmp [][4]float64, src *image.RGBA, sr image.Rectangle, opts *Options) { + t := 0 + for y := int32(0); y < z.sh; y++ { + for _, s := range z.horizontal.sources { + var pr, pg, pb, pa float64 + for _, c := range z.horizontal.contribs[s.i:s.j] { + pi := (sr.Min.Y+int(y)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(c.coord)-src.Rect.Min.X)*4 + pru := uint32(src.Pix[pi+0]) * 0x101 + pgu := uint32(src.Pix[pi+1]) * 0x101 + pbu := uint32(src.Pix[pi+2]) * 0x101 + pau := uint32(src.Pix[pi+3]) * 0x101 + pr += float64(pru) * c.weight + pg += float64(pgu) * c.weight + pb += float64(pbu) * c.weight + pa += float64(pau) * c.weight + } + tmp[t] = [4]float64{ + pr * s.invTotalWeightFFFF, + pg * s.invTotalWeightFFFF, + pb * s.invTotalWeightFFFF, + pa * s.invTotalWeightFFFF, + } + t++ + } + } +} + +func (z *kernelScaler) scaleX_YCbCr444(tmp [][4]float64, src *image.YCbCr, sr image.Rectangle, opts *Options) { + t := 0 + for y := int32(0); y < z.sh; y++ { + for _, s := range z.horizontal.sources { + var pr, pg, pb float64 + for _, c := range z.horizontal.contribs[s.i:s.j] { + pi := (sr.Min.Y+int(y)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(c.coord) - src.Rect.Min.X) + pj := (sr.Min.Y+int(y)-src.Rect.Min.Y)*src.CStride + (sr.Min.X + int(c.coord) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10100 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pru := (pyy1 + 91881*pcr1) >> 8 + pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pbu := (pyy1 + 116130*pcb1) >> 8 + if pru < 0 { + pru = 0 + } else if pru > 0xffff { + pru = 0xffff + } + if pgu < 0 { + pgu = 0 + } else if pgu > 0xffff { + pgu = 0xffff + } + if pbu < 0 { + pbu = 0 + } else if pbu > 0xffff { + pbu = 0xffff + } + + pr += float64(pru) * c.weight + pg += float64(pgu) * c.weight + pb += float64(pbu) * c.weight + } + tmp[t] = [4]float64{ + pr * s.invTotalWeightFFFF, + pg * s.invTotalWeightFFFF, + pb * s.invTotalWeightFFFF, + 1, + } + t++ + } + } +} + +func (z *kernelScaler) scaleX_YCbCr422(tmp [][4]float64, src *image.YCbCr, sr image.Rectangle, opts *Options) { + t := 0 + for y := int32(0); y < z.sh; y++ { + for _, s := range z.horizontal.sources { + var pr, pg, pb float64 + for _, c := range z.horizontal.contribs[s.i:s.j] { + pi := (sr.Min.Y+int(y)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(c.coord) - src.Rect.Min.X) + pj := (sr.Min.Y+int(y)-src.Rect.Min.Y)*src.CStride + ((sr.Min.X+int(c.coord))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10100 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pru := (pyy1 + 91881*pcr1) >> 8 + pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pbu := (pyy1 + 116130*pcb1) >> 8 + if pru < 0 { + pru = 0 + } else if pru > 0xffff { + pru = 0xffff + } + if pgu < 0 { + pgu = 0 + } else if pgu > 0xffff { + pgu = 0xffff + } + if pbu < 0 { + pbu = 0 + } else if pbu > 0xffff { + pbu = 0xffff + } + + pr += float64(pru) * c.weight + pg += float64(pgu) * c.weight + pb += float64(pbu) * c.weight + } + tmp[t] = [4]float64{ + pr * s.invTotalWeightFFFF, + pg * s.invTotalWeightFFFF, + pb * s.invTotalWeightFFFF, + 1, + } + t++ + } + } +} + +func (z *kernelScaler) scaleX_YCbCr420(tmp [][4]float64, src *image.YCbCr, sr image.Rectangle, opts *Options) { + t := 0 + for y := int32(0); y < z.sh; y++ { + for _, s := range z.horizontal.sources { + var pr, pg, pb float64 + for _, c := range z.horizontal.contribs[s.i:s.j] { + pi := (sr.Min.Y+int(y)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(c.coord) - src.Rect.Min.X) + pj := ((sr.Min.Y+int(y))/2-src.Rect.Min.Y/2)*src.CStride + ((sr.Min.X+int(c.coord))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10100 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pru := (pyy1 + 91881*pcr1) >> 8 + pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pbu := (pyy1 + 116130*pcb1) >> 8 + if pru < 0 { + pru = 0 + } else if pru > 0xffff { + pru = 0xffff + } + if pgu < 0 { + pgu = 0 + } else if pgu > 0xffff { + pgu = 0xffff + } + if pbu < 0 { + pbu = 0 + } else if pbu > 0xffff { + pbu = 0xffff + } + + pr += float64(pru) * c.weight + pg += float64(pgu) * c.weight + pb += float64(pbu) * c.weight + } + tmp[t] = [4]float64{ + pr * s.invTotalWeightFFFF, + pg * s.invTotalWeightFFFF, + pb * s.invTotalWeightFFFF, + 1, + } + t++ + } + } +} + +func (z *kernelScaler) scaleX_YCbCr440(tmp [][4]float64, src *image.YCbCr, sr image.Rectangle, opts *Options) { + t := 0 + for y := int32(0); y < z.sh; y++ { + for _, s := range z.horizontal.sources { + var pr, pg, pb float64 + for _, c := range z.horizontal.contribs[s.i:s.j] { + pi := (sr.Min.Y+int(y)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(c.coord) - src.Rect.Min.X) + pj := ((sr.Min.Y+int(y))/2-src.Rect.Min.Y/2)*src.CStride + (sr.Min.X + int(c.coord) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10100 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pru := (pyy1 + 91881*pcr1) >> 8 + pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pbu := (pyy1 + 116130*pcb1) >> 8 + if pru < 0 { + pru = 0 + } else if pru > 0xffff { + pru = 0xffff + } + if pgu < 0 { + pgu = 0 + } else if pgu > 0xffff { + pgu = 0xffff + } + if pbu < 0 { + pbu = 0 + } else if pbu > 0xffff { + pbu = 0xffff + } + + pr += float64(pru) * c.weight + pg += float64(pgu) * c.weight + pb += float64(pbu) * c.weight + } + tmp[t] = [4]float64{ + pr * s.invTotalWeightFFFF, + pg * s.invTotalWeightFFFF, + pb * s.invTotalWeightFFFF, + 1, + } + t++ + } + } +} + +func (z *kernelScaler) scaleX_Image(tmp [][4]float64, src image.Image, sr image.Rectangle, opts *Options) { + t := 0 + srcMask, smp := opts.SrcMask, opts.SrcMaskP + for y := int32(0); y < z.sh; y++ { + for _, s := range z.horizontal.sources { + var pr, pg, pb, pa float64 + for _, c := range z.horizontal.contribs[s.i:s.j] { + pru, pgu, pbu, pau := src.At(sr.Min.X+int(c.coord), sr.Min.Y+int(y)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(c.coord), smp.Y+sr.Min.Y+int(y)).RGBA() + pru = pru * ma / 0xffff + pgu = pgu * ma / 0xffff + pbu = pbu * ma / 0xffff + pau = pau * ma / 0xffff + } + pr += float64(pru) * c.weight + pg += float64(pgu) * c.weight + pb += float64(pbu) * c.weight + pa += float64(pau) * c.weight + } + tmp[t] = [4]float64{ + pr * s.invTotalWeightFFFF, + pg * s.invTotalWeightFFFF, + pb * s.invTotalWeightFFFF, + pa * s.invTotalWeightFFFF, + } + t++ + } + } +} + +func (z *kernelScaler) scaleY_RGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, tmp [][4]float64, opts *Options) { + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + d := (dr.Min.Y+adr.Min.Y-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+int(dx)-dst.Rect.Min.X)*4 + for _, s := range z.vertical.sources[adr.Min.Y:adr.Max.Y] { + var pr, pg, pb, pa float64 + for _, c := range z.vertical.contribs[s.i:s.j] { + p := &tmp[c.coord*z.dw+dx] + pr += p[0] * c.weight + pg += p[1] * c.weight + pb += p[2] * c.weight + pa += p[3] * c.weight + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + pr0 := uint32(ftou(pr * s.invTotalWeight)) + pg0 := uint32(ftou(pg * s.invTotalWeight)) + pb0 := uint32(ftou(pb * s.invTotalWeight)) + pa0 := uint32(ftou(pa * s.invTotalWeight)) + pa1 := (0xffff - uint32(pa0)) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr0) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg0) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb0) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa0) >> 8) + d += dst.Stride + } + } +} + +func (z *kernelScaler) scaleY_RGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, tmp [][4]float64, opts *Options) { + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + d := (dr.Min.Y+adr.Min.Y-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+int(dx)-dst.Rect.Min.X)*4 + for _, s := range z.vertical.sources[adr.Min.Y:adr.Max.Y] { + var pr, pg, pb, pa float64 + for _, c := range z.vertical.contribs[s.i:s.j] { + p := &tmp[c.coord*z.dw+dx] + pr += p[0] * c.weight + pg += p[1] * c.weight + pb += p[2] * c.weight + pa += p[3] * c.weight + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + dst.Pix[d+0] = uint8(ftou(pr*s.invTotalWeight) >> 8) + dst.Pix[d+1] = uint8(ftou(pg*s.invTotalWeight) >> 8) + dst.Pix[d+2] = uint8(ftou(pb*s.invTotalWeight) >> 8) + dst.Pix[d+3] = uint8(ftou(pa*s.invTotalWeight) >> 8) + d += dst.Stride + } + } +} + +func (z *kernelScaler) scaleY_Image_Over(dst Image, dr, adr image.Rectangle, tmp [][4]float64, opts *Options) { + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + for dy, s := range z.vertical.sources[adr.Min.Y:adr.Max.Y] { + var pr, pg, pb, pa float64 + for _, c := range z.vertical.contribs[s.i:s.j] { + p := &tmp[c.coord*z.dw+dx] + pr += p[0] * c.weight + pg += p[1] * c.weight + pb += p[2] * c.weight + pa += p[3] * c.weight + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy)).RGBA() + pr0 := uint32(ftou(pr * s.invTotalWeight)) + pg0 := uint32(ftou(pg * s.invTotalWeight)) + pb0 := uint32(ftou(pb * s.invTotalWeight)) + pa0 := uint32(ftou(pa * s.invTotalWeight)) + if dstMask != nil { + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(adr.Min.Y+dy)).RGBA() + pr0 = pr0 * ma / 0xffff + pg0 = pg0 * ma / 0xffff + pb0 = pb0 * ma / 0xffff + pa0 = pa0 * ma / 0xffff + } + pa1 := 0xffff - pa0 + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr0) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg0) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb0) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa0) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy), dstColor) + } + } +} + +func (z *kernelScaler) scaleY_Image_Src(dst Image, dr, adr image.Rectangle, tmp [][4]float64, opts *Options) { + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + for dy, s := range z.vertical.sources[adr.Min.Y:adr.Max.Y] { + var pr, pg, pb, pa float64 + for _, c := range z.vertical.contribs[s.i:s.j] { + p := &tmp[c.coord*z.dw+dx] + pr += p[0] * c.weight + pg += p[1] * c.weight + pb += p[2] * c.weight + pa += p[3] * c.weight + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + if dstMask != nil { + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy)).RGBA() + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(adr.Min.Y+dy)).RGBA() + pr := uint32(ftou(pr*s.invTotalWeight)) * ma / 0xffff + pg := uint32(ftou(pg*s.invTotalWeight)) * ma / 0xffff + pb := uint32(ftou(pb*s.invTotalWeight)) * ma / 0xffff + pa := uint32(ftou(pa*s.invTotalWeight)) * ma / 0xffff + pa1 := 0xffff - ma + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy), dstColor) + } else { + dstColorRGBA64.R = ftou(pr * s.invTotalWeight) + dstColorRGBA64.G = ftou(pg * s.invTotalWeight) + dstColorRGBA64.B = ftou(pb * s.invTotalWeight) + dstColorRGBA64.A = ftou(pa * s.invTotalWeight) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy), dstColor) + } + } + } +} + +func (q *Kernel) transform_RGBA_Gray_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.Gray, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pi := (ky-src.Rect.Min.Y)*src.Stride + (kx - src.Rect.Min.X) + pru := uint32(src.Pix[pi]) * 0x101 + pr += float64(pru) * w + } + } + } + } + out := uint8(fffftou(pr) >> 8) + dst.Pix[d+0] = out + dst.Pix[d+1] = out + dst.Pix[d+2] = out + dst.Pix[d+3] = 0xff + } + } +} + +func (q *Kernel) transform_RGBA_NRGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.NRGBA, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb, pa float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pi := (ky-src.Rect.Min.Y)*src.Stride + (kx-src.Rect.Min.X)*4 + pau := uint32(src.Pix[pi+3]) * 0x101 + pru := uint32(src.Pix[pi+0]) * pau / 0xff + pgu := uint32(src.Pix[pi+1]) * pau / 0xff + pbu := uint32(src.Pix[pi+2]) * pau / 0xff + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + pa += float64(pau) * w + } + } + } + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + pr0 := uint32(fffftou(pr)) + pg0 := uint32(fffftou(pg)) + pb0 := uint32(fffftou(pb)) + pa0 := uint32(fffftou(pa)) + pa1 := (0xffff - uint32(pa0)) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr0) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg0) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb0) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa0) >> 8) + } + } +} + +func (q *Kernel) transform_RGBA_NRGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.NRGBA, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb, pa float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pi := (ky-src.Rect.Min.Y)*src.Stride + (kx-src.Rect.Min.X)*4 + pau := uint32(src.Pix[pi+3]) * 0x101 + pru := uint32(src.Pix[pi+0]) * pau / 0xff + pgu := uint32(src.Pix[pi+1]) * pau / 0xff + pbu := uint32(src.Pix[pi+2]) * pau / 0xff + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + pa += float64(pau) * w + } + } + } + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + dst.Pix[d+0] = uint8(fffftou(pr) >> 8) + dst.Pix[d+1] = uint8(fffftou(pg) >> 8) + dst.Pix[d+2] = uint8(fffftou(pb) >> 8) + dst.Pix[d+3] = uint8(fffftou(pa) >> 8) + } + } +} + +func (q *Kernel) transform_RGBA_RGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.RGBA, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb, pa float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pi := (ky-src.Rect.Min.Y)*src.Stride + (kx-src.Rect.Min.X)*4 + pru := uint32(src.Pix[pi+0]) * 0x101 + pgu := uint32(src.Pix[pi+1]) * 0x101 + pbu := uint32(src.Pix[pi+2]) * 0x101 + pau := uint32(src.Pix[pi+3]) * 0x101 + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + pa += float64(pau) * w + } + } + } + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + pr0 := uint32(fffftou(pr)) + pg0 := uint32(fffftou(pg)) + pb0 := uint32(fffftou(pb)) + pa0 := uint32(fffftou(pa)) + pa1 := (0xffff - uint32(pa0)) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr0) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg0) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb0) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa0) >> 8) + } + } +} + +func (q *Kernel) transform_RGBA_RGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.RGBA, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb, pa float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pi := (ky-src.Rect.Min.Y)*src.Stride + (kx-src.Rect.Min.X)*4 + pru := uint32(src.Pix[pi+0]) * 0x101 + pgu := uint32(src.Pix[pi+1]) * 0x101 + pbu := uint32(src.Pix[pi+2]) * 0x101 + pau := uint32(src.Pix[pi+3]) * 0x101 + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + pa += float64(pau) * w + } + } + } + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + dst.Pix[d+0] = uint8(fffftou(pr) >> 8) + dst.Pix[d+1] = uint8(fffftou(pg) >> 8) + dst.Pix[d+2] = uint8(fffftou(pb) >> 8) + dst.Pix[d+3] = uint8(fffftou(pa) >> 8) + } + } +} + +func (q *Kernel) transform_RGBA_YCbCr444_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pi := (ky-src.Rect.Min.Y)*src.YStride + (kx - src.Rect.Min.X) + pj := (ky-src.Rect.Min.Y)*src.CStride + (kx - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10100 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pru := (pyy1 + 91881*pcr1) >> 8 + pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pbu := (pyy1 + 116130*pcb1) >> 8 + if pru < 0 { + pru = 0 + } else if pru > 0xffff { + pru = 0xffff + } + if pgu < 0 { + pgu = 0 + } else if pgu > 0xffff { + pgu = 0xffff + } + if pbu < 0 { + pbu = 0 + } else if pbu > 0xffff { + pbu = 0xffff + } + + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + } + } + } + } + dst.Pix[d+0] = uint8(fffftou(pr) >> 8) + dst.Pix[d+1] = uint8(fffftou(pg) >> 8) + dst.Pix[d+2] = uint8(fffftou(pb) >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (q *Kernel) transform_RGBA_YCbCr422_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pi := (ky-src.Rect.Min.Y)*src.YStride + (kx - src.Rect.Min.X) + pj := (ky-src.Rect.Min.Y)*src.CStride + ((kx)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10100 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pru := (pyy1 + 91881*pcr1) >> 8 + pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pbu := (pyy1 + 116130*pcb1) >> 8 + if pru < 0 { + pru = 0 + } else if pru > 0xffff { + pru = 0xffff + } + if pgu < 0 { + pgu = 0 + } else if pgu > 0xffff { + pgu = 0xffff + } + if pbu < 0 { + pbu = 0 + } else if pbu > 0xffff { + pbu = 0xffff + } + + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + } + } + } + } + dst.Pix[d+0] = uint8(fffftou(pr) >> 8) + dst.Pix[d+1] = uint8(fffftou(pg) >> 8) + dst.Pix[d+2] = uint8(fffftou(pb) >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (q *Kernel) transform_RGBA_YCbCr420_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pi := (ky-src.Rect.Min.Y)*src.YStride + (kx - src.Rect.Min.X) + pj := ((ky)/2-src.Rect.Min.Y/2)*src.CStride + ((kx)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10100 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pru := (pyy1 + 91881*pcr1) >> 8 + pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pbu := (pyy1 + 116130*pcb1) >> 8 + if pru < 0 { + pru = 0 + } else if pru > 0xffff { + pru = 0xffff + } + if pgu < 0 { + pgu = 0 + } else if pgu > 0xffff { + pgu = 0xffff + } + if pbu < 0 { + pbu = 0 + } else if pbu > 0xffff { + pbu = 0xffff + } + + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + } + } + } + } + dst.Pix[d+0] = uint8(fffftou(pr) >> 8) + dst.Pix[d+1] = uint8(fffftou(pg) >> 8) + dst.Pix[d+2] = uint8(fffftou(pb) >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (q *Kernel) transform_RGBA_YCbCr440_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pi := (ky-src.Rect.Min.Y)*src.YStride + (kx - src.Rect.Min.X) + pj := ((ky)/2-src.Rect.Min.Y/2)*src.CStride + (kx - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10100 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pru := (pyy1 + 91881*pcr1) >> 8 + pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pbu := (pyy1 + 116130*pcb1) >> 8 + if pru < 0 { + pru = 0 + } else if pru > 0xffff { + pru = 0xffff + } + if pgu < 0 { + pgu = 0 + } else if pgu > 0xffff { + pgu = 0xffff + } + if pbu < 0 { + pbu = 0 + } else if pbu > 0xffff { + pbu = 0xffff + } + + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + } + } + } + } + dst.Pix[d+0] = uint8(fffftou(pr) >> 8) + dst.Pix[d+1] = uint8(fffftou(pg) >> 8) + dst.Pix[d+2] = uint8(fffftou(pb) >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (q *Kernel) transform_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb, pa float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pru, pgu, pbu, pau := src.At(kx, ky).RGBA() + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + pa += float64(pau) * w + } + } + } + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + pr0 := uint32(fffftou(pr)) + pg0 := uint32(fffftou(pg)) + pb0 := uint32(fffftou(pb)) + pa0 := uint32(fffftou(pa)) + pa1 := (0xffff - uint32(pa0)) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr0) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg0) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb0) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa0) >> 8) + } + } +} + +func (q *Kernel) transform_RGBA_Image_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb, pa float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pru, pgu, pbu, pau := src.At(kx, ky).RGBA() + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + pa += float64(pau) * w + } + } + } + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + dst.Pix[d+0] = uint8(fffftou(pr) >> 8) + dst.Pix[d+1] = uint8(fffftou(pg) >> 8) + dst.Pix[d+2] = uint8(fffftou(pb) >> 8) + dst.Pix[d+3] = uint8(fffftou(pa) >> 8) + } + } +} + +func (q *Kernel) transform_Image_Image_Over(dst Image, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb, pa float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pru, pgu, pbu, pau := src.At(kx, ky).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+kx, smp.Y+ky).RGBA() + pru = pru * ma / 0xffff + pgu = pgu * ma / 0xffff + pbu = pbu * ma / 0xffff + pau = pau * ma / 0xffff + } + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + pa += float64(pau) * w + } + } + } + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + pr0 := uint32(fffftou(pr)) + pg0 := uint32(fffftou(pg)) + pb0 := uint32(fffftou(pb)) + pa0 := uint32(fffftou(pa)) + if dstMask != nil { + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr0 = pr0 * ma / 0xffff + pg0 = pg0 * ma / 0xffff + pb0 = pb0 * ma / 0xffff + pa0 = pa0 * ma / 0xffff + } + pa1 := 0xffff - pa0 + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr0) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg0) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb0) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa0) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } +} + +func (q *Kernel) transform_Image_Image_Src(dst Image, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb, pa float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pru, pgu, pbu, pau := src.At(kx, ky).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+kx, smp.Y+ky).RGBA() + pru = pru * ma / 0xffff + pgu = pgu * ma / 0xffff + pbu = pbu * ma / 0xffff + pau = pau * ma / 0xffff + } + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + pa += float64(pau) * w + } + } + } + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + if dstMask != nil { + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr := uint32(fffftou(pr)) * ma / 0xffff + pg := uint32(fffftou(pg)) * ma / 0xffff + pb := uint32(fffftou(pb)) * ma / 0xffff + pa := uint32(fffftou(pa)) * ma / 0xffff + pa1 := 0xffff - ma + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } else { + dstColorRGBA64.R = fffftou(pr) + dstColorRGBA64.G = fffftou(pg) + dstColorRGBA64.B = fffftou(pb) + dstColorRGBA64.A = fffftou(pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } + } +} diff --git a/_third_party/golang.org/x/image/draw/scale.go b/_third_party/golang.org/x/image/draw/scale.go new file mode 100644 index 0000000000..1b43d57831 --- /dev/null +++ b/_third_party/golang.org/x/image/draw/scale.go @@ -0,0 +1,527 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +package draw + +import ( + "image" + "image/color" + "math" + "sync" + + "bosun.org/_third_party/golang.org/x/image/math/f64" +) + +// Copy copies the part of the source image defined by src and sr and writes +// the result of a Porter-Duff composition to the part of the destination image +// defined by dst and the translation of sr so that sr.Min translates to dp. +func Copy(dst Image, dp image.Point, src image.Image, sr image.Rectangle, op Op, opts *Options) { + var o Options + if opts != nil { + o = *opts + } + dr := sr.Add(dp.Sub(sr.Min)) + if o.DstMask == nil { + DrawMask(dst, dr, src, sr.Min, o.SrcMask, o.SrcMaskP.Add(sr.Min), op) + } else { + NearestNeighbor.Scale(dst, dr, src, sr, op, opts) + } +} + +// Scaler scales the part of the source image defined by src and sr and writes +// the result of a Porter-Duff composition to the part of the destination image +// defined by dst and dr. +// +// A Scaler is safe to use concurrently. +type Scaler interface { + Scale(dst Image, dr image.Rectangle, src image.Image, sr image.Rectangle, op Op, opts *Options) +} + +// Transformer transforms the part of the source image defined by src and sr +// and writes the result of a Porter-Duff composition to the part of the +// destination image defined by dst and the affine transform m applied to sr. +// +// For example, if m is the matrix +// +// m00 m01 m02 +// m10 m11 m12 +// +// then the src-space point (sx, sy) maps to the dst-space point +// (m00*sx + m01*sy + m02, m10*sx + m11*sy + m12). +// +// A Transformer is safe to use concurrently. +type Transformer interface { + Transform(dst Image, m f64.Aff3, src image.Image, sr image.Rectangle, op Op, opts *Options) +} + +// Options are optional parameters to Copy, Scale and Transform. +// +// A nil *Options means to use the default (zero) values of each field. +type Options struct { + // Masks limit what parts of the dst image are drawn to and what parts of + // the src image are drawn from. + // + // A dst or src mask image having a zero alpha (transparent) pixel value in + // the respective coordinate space means that that dst pixel is entirely + // unaffected or that src pixel is considered transparent black. A full + // alpha (opaque) value means that the dst pixel is maximally affected or + // the src pixel contributes maximally. The default values, nil, are + // equivalent to fully opaque, infinitely large mask images. + // + // The DstMask is otherwise known as a clip mask, and its pixels map 1:1 to + // the dst image's pixels. DstMaskP in DstMask space corresponds to + // image.Point{X:0, Y:0} in dst space. For example, when limiting + // repainting to a 'dirty rectangle', use that image.Rectangle and a zero + // image.Point as the DstMask and DstMaskP. + // + // The SrcMask's pixels map 1:1 to the src image's pixels. SrcMaskP in + // SrcMask space corresponds to image.Point{X:0, Y:0} in src space. For + // example, when drawing font glyphs in a uniform color, use an + // *image.Uniform as the src, and use the glyph atlas image and the + // per-glyph offset as SrcMask and SrcMaskP: + // Copy(dst, dp, image.NewUniform(color), image.Rect(0, 0, glyphWidth, glyphHeight), &Options{ + // SrcMask: glyphAtlas, + // SrcMaskP: glyphOffset, + // }) + DstMask image.Image + DstMaskP image.Point + SrcMask image.Image + SrcMaskP image.Point + + // TODO: a smooth vs sharp edges option, for arbitrary rotations? +} + +// Interpolator is an interpolation algorithm, when dst and src pixels don't +// have a 1:1 correspondence. +// +// Of the interpolators provided by this package: +// - NearestNeighbor is fast but usually looks worst. +// - CatmullRom is slow but usually looks best. +// - ApproxBiLinear has reasonable speed and quality. +// +// The time taken depends on the size of dr. For kernel interpolators, the +// speed also depends on the size of sr, and so are often slower than +// non-kernel interpolators, especially when scaling down. +type Interpolator interface { + Scaler + Transformer +} + +// Kernel is an interpolator that blends source pixels weighted by a symmetric +// kernel function. +type Kernel struct { + // Support is the kernel support and must be >= 0. At(t) is assumed to be + // zero when t >= Support. + Support float64 + // At is the kernel function. It will only be called with t in the + // range [0, Support). + At func(t float64) float64 +} + +// Scale implements the Scaler interface. +func (q *Kernel) Scale(dst Image, dr image.Rectangle, src image.Image, sr image.Rectangle, op Op, opts *Options) { + q.newScaler(dr.Dx(), dr.Dy(), sr.Dx(), sr.Dy(), false).Scale(dst, dr, src, sr, op, opts) +} + +// NewScaler returns a Scaler that is optimized for scaling multiple times with +// the same fixed destination and source width and height. +func (q *Kernel) NewScaler(dw, dh, sw, sh int) Scaler { + return q.newScaler(dw, dh, sw, sh, true) +} + +func (q *Kernel) newScaler(dw, dh, sw, sh int, usePool bool) Scaler { + z := &kernelScaler{ + kernel: q, + dw: int32(dw), + dh: int32(dh), + sw: int32(sw), + sh: int32(sh), + horizontal: newDistrib(q, int32(dw), int32(sw)), + vertical: newDistrib(q, int32(dh), int32(sh)), + } + if usePool { + z.pool.New = func() interface{} { + tmp := z.makeTmpBuf() + return &tmp + } + } + return z +} + +var ( + // NearestNeighbor is the nearest neighbor interpolator. It is very fast, + // but usually gives very low quality results. When scaling up, the result + // will look 'blocky'. + NearestNeighbor = Interpolator(nnInterpolator{}) + + // ApproxBiLinear is a mixture of the nearest neighbor and bi-linear + // interpolators. It is fast, but usually gives medium quality results. + // + // It implements bi-linear interpolation when upscaling and a bi-linear + // blend of the 4 nearest neighbor pixels when downscaling. This yields + // nicer quality than nearest neighbor interpolation when upscaling, but + // the time taken is independent of the number of source pixels, unlike the + // bi-linear interpolator. When downscaling a large image, the performance + // difference can be significant. + ApproxBiLinear = Interpolator(ablInterpolator{}) + + // BiLinear is the tent kernel. It is slow, but usually gives high quality + // results. + BiLinear = &Kernel{1, func(t float64) float64 { + return 1 - t + }} + + // CatmullRom is the Catmull-Rom kernel. It is very slow, but usually gives + // very high quality results. + // + // It is an instance of the more general cubic BC-spline kernel with parameters + // B=0 and C=0.5. See Mitchell and Netravali, "Reconstruction Filters in + // Computer Graphics", Computer Graphics, Vol. 22, No. 4, pp. 221-228. + CatmullRom = &Kernel{2, func(t float64) float64 { + if t < 1 { + return (1.5*t-2.5)*t*t + 1 + } + return ((-0.5*t+2.5)*t-4)*t + 2 + }} + + // TODO: a Kaiser-Bessel kernel? +) + +type nnInterpolator struct{} + +type ablInterpolator struct{} + +type kernelScaler struct { + kernel *Kernel + dw, dh, sw, sh int32 + horizontal, vertical distrib + pool sync.Pool +} + +func (z *kernelScaler) makeTmpBuf() [][4]float64 { + return make([][4]float64, z.dw*z.sh) +} + +// source is a range of contribs, their inverse total weight, and that ITW +// divided by 0xffff. +type source struct { + i, j int32 + invTotalWeight float64 + invTotalWeightFFFF float64 +} + +// contrib is the weight of a column or row. +type contrib struct { + coord int32 + weight float64 +} + +// distrib measures how source pixels are distributed over destination pixels. +type distrib struct { + // sources are what contribs each column or row in the source image owns, + // and the total weight of those contribs. + sources []source + // contribs are the contributions indexed by sources[s].i and sources[s].j. + contribs []contrib +} + +// newDistrib returns a distrib that distributes sw source columns (or rows) +// over dw destination columns (or rows). +func newDistrib(q *Kernel, dw, sw int32) distrib { + scale := float64(sw) / float64(dw) + halfWidth, kernelArgScale := q.Support, 1.0 + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + if scale > 1 { + halfWidth *= scale + kernelArgScale = 1 / scale + } + + // Make the sources slice, one source for each column or row, and temporarily + // appropriate its elements' fields so that invTotalWeight is the scaled + // coordinate of the source column or row, and i and j are the lower and + // upper bounds of the range of destination columns or rows affected by the + // source column or row. + n, sources := int32(0), make([]source, dw) + for x := range sources { + center := (float64(x)+0.5)*scale - 0.5 + i := int32(math.Floor(center - halfWidth)) + if i < 0 { + i = 0 + } + j := int32(math.Ceil(center + halfWidth)) + if j > sw { + j = sw + if j < i { + j = i + } + } + sources[x] = source{i: i, j: j, invTotalWeight: center} + n += j - i + } + + contribs := make([]contrib, 0, n) + for k, b := range sources { + totalWeight := 0.0 + l := int32(len(contribs)) + for coord := b.i; coord < b.j; coord++ { + t := abs((b.invTotalWeight - float64(coord)) * kernelArgScale) + if t >= q.Support { + continue + } + weight := q.At(t) + if weight == 0 { + continue + } + totalWeight += weight + contribs = append(contribs, contrib{coord, weight}) + } + totalWeight = 1 / totalWeight + sources[k] = source{ + i: l, + j: int32(len(contribs)), + invTotalWeight: totalWeight, + invTotalWeightFFFF: totalWeight / 0xffff, + } + } + + return distrib{sources, contribs} +} + +// abs is like math.Abs, but it doesn't care about negative zero, infinities or +// NaNs. +func abs(f float64) float64 { + if f < 0 { + f = -f + } + return f +} + +// ftou converts the range [0.0, 1.0] to [0, 0xffff]. +func ftou(f float64) uint16 { + i := int32(0xffff*f + 0.5) + if i > 0xffff { + return 0xffff + } + if i > 0 { + return uint16(i) + } + return 0 +} + +// fffftou converts the range [0.0, 65535.0] to [0, 0xffff]. +func fffftou(f float64) uint16 { + i := int32(f + 0.5) + if i > 0xffff { + return 0xffff + } + if i > 0 { + return uint16(i) + } + return 0 +} + +// invert returns the inverse of m. +// +// TODO: move this into the f64 package, once we work out the convention for +// matrix methods in that package: do they modify the receiver, take a dst +// pointer argument, or return a new value? +func invert(m *f64.Aff3) f64.Aff3 { + m00 := +m[3*1+1] + m01 := -m[3*0+1] + m02 := +m[3*1+2]*m[3*0+1] - m[3*1+1]*m[3*0+2] + m10 := -m[3*1+0] + m11 := +m[3*0+0] + m12 := +m[3*1+0]*m[3*0+2] - m[3*1+2]*m[3*0+0] + + det := m00*m11 - m10*m01 + + return f64.Aff3{ + m00 / det, + m01 / det, + m02 / det, + m10 / det, + m11 / det, + m12 / det, + } +} + +func matMul(p, q *f64.Aff3) f64.Aff3 { + return f64.Aff3{ + p[3*0+0]*q[3*0+0] + p[3*0+1]*q[3*1+0], + p[3*0+0]*q[3*0+1] + p[3*0+1]*q[3*1+1], + p[3*0+0]*q[3*0+2] + p[3*0+1]*q[3*1+2] + p[3*0+2], + p[3*1+0]*q[3*0+0] + p[3*1+1]*q[3*1+0], + p[3*1+0]*q[3*0+1] + p[3*1+1]*q[3*1+1], + p[3*1+0]*q[3*0+2] + p[3*1+1]*q[3*1+2] + p[3*1+2], + } +} + +// transformRect returns a rectangle dr that contains sr transformed by s2d. +func transformRect(s2d *f64.Aff3, sr *image.Rectangle) (dr image.Rectangle) { + ps := [...]image.Point{ + {sr.Min.X, sr.Min.Y}, + {sr.Max.X, sr.Min.Y}, + {sr.Min.X, sr.Max.Y}, + {sr.Max.X, sr.Max.Y}, + } + for i, p := range ps { + sxf := float64(p.X) + syf := float64(p.Y) + dx := int(math.Floor(s2d[0]*sxf + s2d[1]*syf + s2d[2])) + dy := int(math.Floor(s2d[3]*sxf + s2d[4]*syf + s2d[5])) + + // The +1 adjustments below are because an image.Rectangle is inclusive + // on the low end but exclusive on the high end. + + if i == 0 { + dr = image.Rectangle{ + Min: image.Point{dx + 0, dy + 0}, + Max: image.Point{dx + 1, dy + 1}, + } + continue + } + + if dr.Min.X > dx { + dr.Min.X = dx + } + dx++ + if dr.Max.X < dx { + dr.Max.X = dx + } + + if dr.Min.Y > dy { + dr.Min.Y = dy + } + dy++ + if dr.Max.Y < dy { + dr.Max.Y = dy + } + } + return dr +} + +func clipAffectedDestRect(adr image.Rectangle, dstMask image.Image, dstMaskP image.Point) (image.Rectangle, image.Image) { + if dstMask == nil { + return adr, nil + } + // TODO: enable this fast path once Go 1.5 is released, where an + // image.Rectangle implements image.Image. + // if r, ok := dstMask.(image.Rectangle); ok { + // return adr.Intersect(r.Sub(dstMaskP)), nil + // } + // TODO: clip to dstMask.Bounds() if the color model implies that out-of-bounds means 0 alpha? + return adr, dstMask +} + +func transform_Uniform(dst Image, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.Uniform, sr image.Rectangle, bias image.Point, op Op) { + switch op { + case Over: + switch dst := dst.(type) { + case *image.RGBA: + pr, pg, pb, pa := src.C.RGBA() + pa1 := (0xffff - pa) * 0x101 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := dst.PixOffset(dr.Min.X+adr.Min.X, dr.Min.Y+int(dy)) + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } + + default: + pr, pg, pb, pa := src.C.RGBA() + pa1 := 0xffff - pa + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } + } + + case Src: + switch dst := dst.(type) { + case *image.RGBA: + pr, pg, pb, pa := src.C.RGBA() + pr8 := uint8(pr >> 8) + pg8 := uint8(pg >> 8) + pb8 := uint8(pb >> 8) + pa8 := uint8(pa >> 8) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := dst.PixOffset(dr.Min.X+adr.Min.X, dr.Min.Y+int(dy)) + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + dst.Pix[d+0] = pr8 + dst.Pix[d+1] = pg8 + dst.Pix[d+2] = pb8 + dst.Pix[d+3] = pa8 + } + } + + default: + pr, pg, pb, pa := src.C.RGBA() + dstColorRGBA64 := &color.RGBA64{ + uint16(pr), + uint16(pg), + uint16(pb), + uint16(pa), + } + dstColor := color.Color(dstColorRGBA64) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } + } + } +} + +func opaque(m image.Image) bool { + o, ok := m.(interface { + Opaque() bool + }) + return ok && o.Opaque() +} diff --git a/_third_party/golang.org/x/image/draw/scale_test.go b/_third_party/golang.org/x/image/draw/scale_test.go new file mode 100644 index 0000000000..a477d87d0d --- /dev/null +++ b/_third_party/golang.org/x/image/draw/scale_test.go @@ -0,0 +1,731 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package draw + +import ( + "bytes" + "flag" + "fmt" + "image" + "image/color" + "image/png" + "math/rand" + "os" + "reflect" + "testing" + + "bosun.org/_third_party/golang.org/x/image/math/f64" + + _ "image/jpeg" +) + +var genGoldenFiles = flag.Bool("gen_golden_files", false, "whether to generate the TestXxx golden files.") + +var transformMatrix = func(scale, tx, ty float64) f64.Aff3 { + const cos30, sin30 = 0.866025404, 0.5 + return f64.Aff3{ + +scale * cos30, -scale * sin30, tx, + +scale * sin30, +scale * cos30, ty, + } +} + +func encode(filename string, m image.Image) error { + f, err := os.Create(filename) + if err != nil { + return fmt.Errorf("Create: %v", err) + } + defer f.Close() + if err := png.Encode(f, m); err != nil { + return fmt.Errorf("Encode: %v", err) + } + return nil +} + +// testInterp tests that interpolating the source image gives the exact +// destination image. This is to ensure that any refactoring or optimization of +// the interpolation code doesn't change the behavior. Changing the actual +// algorithm or kernel used by any particular quality setting will obviously +// change the resultant pixels. In such a case, use the gen_golden_files flag +// to regenerate the golden files. +func testInterp(t *testing.T, w int, h int, direction, prefix, suffix string) { + f, err := os.Open("../testdata/" + prefix + suffix) + if err != nil { + t.Fatalf("Open: %v", err) + } + defer f.Close() + src, _, err := image.Decode(f) + if err != nil { + t.Fatalf("Decode: %v", err) + } + + op, scale := Src, 3.75 + if prefix == "tux" { + op, scale = Over, 0.125 + } + green := image.NewUniform(color.RGBA{0x00, 0x22, 0x11, 0xff}) + + testCases := map[string]Interpolator{ + "nn": NearestNeighbor, + "ab": ApproxBiLinear, + "bl": BiLinear, + "cr": CatmullRom, + } + for name, q := range testCases { + goldenFilename := fmt.Sprintf("../testdata/%s-%s-%s.png", prefix, direction, name) + + got := image.NewRGBA(image.Rect(0, 0, w, h)) + Copy(got, image.Point{}, green, got.Bounds(), Src, nil) + if direction == "rotate" { + q.Transform(got, transformMatrix(scale, 40, 10), src, src.Bounds(), op, nil) + } else { + q.Scale(got, got.Bounds(), src, src.Bounds(), op, nil) + } + + if *genGoldenFiles { + if err := encode(goldenFilename, got); err != nil { + t.Error(err) + } + continue + } + + g, err := os.Open(goldenFilename) + if err != nil { + t.Errorf("Open: %v", err) + continue + } + defer g.Close() + wantRaw, err := png.Decode(g) + if err != nil { + t.Errorf("Decode: %v", err) + continue + } + // convert wantRaw to RGBA. + want, ok := wantRaw.(*image.RGBA) + if !ok { + b := wantRaw.Bounds() + want = image.NewRGBA(b) + Draw(want, b, wantRaw, b.Min, Src) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("%s: actual image differs from golden image", goldenFilename) + continue + } + } +} + +func TestScaleDown(t *testing.T) { testInterp(t, 100, 100, "down", "go-turns-two", "-280x360.jpeg") } +func TestScaleUp(t *testing.T) { testInterp(t, 75, 100, "up", "go-turns-two", "-14x18.png") } +func TestTformSrc(t *testing.T) { testInterp(t, 100, 100, "rotate", "go-turns-two", "-14x18.png") } +func TestTformOver(t *testing.T) { testInterp(t, 100, 100, "rotate", "tux", ".png") } + +// TestSimpleTransforms tests Scale and Transform calls that simplify to Copy +// or Scale calls. +func TestSimpleTransforms(t *testing.T) { + f, err := os.Open("../testdata/testpattern.png") // A 100x100 image. + if err != nil { + t.Fatalf("Open: %v", err) + } + defer f.Close() + src, _, err := image.Decode(f) + if err != nil { + t.Fatalf("Decode: %v", err) + } + + dst0 := image.NewRGBA(image.Rect(0, 0, 120, 150)) + dst1 := image.NewRGBA(image.Rect(0, 0, 120, 150)) + for _, op := range []string{"scale/copy", "tform/copy", "tform/scale"} { + for _, epsilon := range []float64{0, 1e-50, 1e-1} { + Copy(dst0, image.Point{}, image.Transparent, dst0.Bounds(), Src, nil) + Copy(dst1, image.Point{}, image.Transparent, dst1.Bounds(), Src, nil) + + switch op { + case "scale/copy": + dr := image.Rect(10, 30, 10+100, 30+100) + if epsilon > 1e-10 { + dr.Max.X++ + } + Copy(dst0, image.Point{10, 30}, src, src.Bounds(), Src, nil) + ApproxBiLinear.Scale(dst1, dr, src, src.Bounds(), Src, nil) + case "tform/copy": + Copy(dst0, image.Point{10, 30}, src, src.Bounds(), Src, nil) + ApproxBiLinear.Transform(dst1, f64.Aff3{ + 1, 0 + epsilon, 10, + 0, 1, 30, + }, src, src.Bounds(), Src, nil) + case "tform/scale": + ApproxBiLinear.Scale(dst0, image.Rect(10, 50, 10+50, 50+50), src, src.Bounds(), Src, nil) + ApproxBiLinear.Transform(dst1, f64.Aff3{ + 0.5, 0.0 + epsilon, 10, + 0.0, 0.5, 50, + }, src, src.Bounds(), Src, nil) + } + + differ := !bytes.Equal(dst0.Pix, dst1.Pix) + if epsilon > 1e-10 { + if !differ { + t.Errorf("%s yielded same pixels, want different pixels: epsilon=%v", op, epsilon) + } + } else { + if differ { + t.Errorf("%s yielded different pixels, want same pixels: epsilon=%v", op, epsilon) + } + } + } + } +} + +func BenchmarkSimpleScaleCopy(b *testing.B) { + dst := image.NewRGBA(image.Rect(0, 0, 640, 480)) + src := image.NewRGBA(image.Rect(0, 0, 400, 300)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ApproxBiLinear.Scale(dst, image.Rect(10, 20, 10+400, 20+300), src, src.Bounds(), Src, nil) + } +} + +func BenchmarkSimpleTransformCopy(b *testing.B) { + dst := image.NewRGBA(image.Rect(0, 0, 640, 480)) + src := image.NewRGBA(image.Rect(0, 0, 400, 300)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ApproxBiLinear.Transform(dst, f64.Aff3{ + 1, 0, 10, + 0, 1, 20, + }, src, src.Bounds(), Src, nil) + } +} + +func BenchmarkSimpleTransformScale(b *testing.B) { + dst := image.NewRGBA(image.Rect(0, 0, 640, 480)) + src := image.NewRGBA(image.Rect(0, 0, 400, 300)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ApproxBiLinear.Transform(dst, f64.Aff3{ + 0.5, 0.0, 10, + 0.0, 0.5, 20, + }, src, src.Bounds(), Src, nil) + } +} + +func TestOps(t *testing.T) { + blue := image.NewUniform(color.RGBA{0x00, 0x00, 0xff, 0xff}) + testCases := map[Op]color.RGBA{ + Over: color.RGBA{0x7f, 0x00, 0x80, 0xff}, + Src: color.RGBA{0x7f, 0x00, 0x00, 0x7f}, + } + for op, want := range testCases { + dst := image.NewRGBA(image.Rect(0, 0, 2, 2)) + Copy(dst, image.Point{}, blue, dst.Bounds(), Src, nil) + + src := image.NewRGBA(image.Rect(0, 0, 1, 1)) + src.SetRGBA(0, 0, color.RGBA{0x7f, 0x00, 0x00, 0x7f}) + + NearestNeighbor.Scale(dst, dst.Bounds(), src, src.Bounds(), op, nil) + + if got := dst.RGBAAt(0, 0); got != want { + t.Errorf("op=%v: got %v, want %v", op, got, want) + } + } +} + +// TestNegativeWeights tests that scaling by a kernel that produces negative +// weights, such as the Catmull-Rom kernel, doesn't produce an invalid color +// according to Go's alpha-premultiplied model. +func TestNegativeWeights(t *testing.T) { + check := func(m *image.RGBA) error { + b := m.Bounds() + for y := b.Min.Y; y < b.Max.Y; y++ { + for x := b.Min.X; x < b.Max.X; x++ { + if c := m.RGBAAt(x, y); c.R > c.A || c.G > c.A || c.B > c.A { + return fmt.Errorf("invalid color.RGBA at (%d, %d): %v", x, y, c) + } + } + } + return nil + } + + src := image.NewRGBA(image.Rect(0, 0, 16, 16)) + for y := 0; y < 16; y++ { + for x := 0; x < 16; x++ { + a := y * 0x11 + src.Set(x, y, color.RGBA{ + R: uint8(x * 0x11 * a / 0xff), + A: uint8(a), + }) + } + } + if err := check(src); err != nil { + t.Fatalf("src image: %v", err) + } + + dst := image.NewRGBA(image.Rect(0, 0, 32, 32)) + CatmullRom.Scale(dst, dst.Bounds(), src, src.Bounds(), Over, nil) + if err := check(dst); err != nil { + t.Fatalf("dst image: %v", err) + } +} + +func fillPix(r *rand.Rand, pixs ...[]byte) { + for _, pix := range pixs { + for i := range pix { + pix[i] = uint8(r.Intn(256)) + } + } +} + +func TestInterpClipCommute(t *testing.T) { + src := image.NewNRGBA(image.Rect(0, 0, 20, 20)) + fillPix(rand.New(rand.NewSource(0)), src.Pix) + + outer := image.Rect(1, 1, 8, 5) + inner := image.Rect(2, 3, 6, 5) + qs := []Interpolator{ + NearestNeighbor, + ApproxBiLinear, + CatmullRom, + } + for _, transform := range []bool{false, true} { + for _, q := range qs { + dst0 := image.NewRGBA(image.Rect(1, 1, 10, 10)) + dst1 := image.NewRGBA(image.Rect(1, 1, 10, 10)) + for i := range dst0.Pix { + dst0.Pix[i] = uint8(i / 4) + dst1.Pix[i] = uint8(i / 4) + } + + var interp func(dst *image.RGBA) + if transform { + interp = func(dst *image.RGBA) { + q.Transform(dst, transformMatrix(3.75, 2, 1), src, src.Bounds(), Over, nil) + } + } else { + interp = func(dst *image.RGBA) { + q.Scale(dst, outer, src, src.Bounds(), Over, nil) + } + } + + // Interpolate then clip. + interp(dst0) + dst0 = dst0.SubImage(inner).(*image.RGBA) + + // Clip then interpolate. + dst1 = dst1.SubImage(inner).(*image.RGBA) + interp(dst1) + + loop: + for y := inner.Min.Y; y < inner.Max.Y; y++ { + for x := inner.Min.X; x < inner.Max.X; x++ { + if c0, c1 := dst0.RGBAAt(x, y), dst1.RGBAAt(x, y); c0 != c1 { + t.Errorf("q=%T: at (%d, %d): c0=%v, c1=%v", q, x, y, c0, c1) + break loop + } + } + } + } + } +} + +// translatedImage is an image m translated by t. +type translatedImage struct { + m image.Image + t image.Point +} + +func (t *translatedImage) At(x, y int) color.Color { return t.m.At(x-t.t.X, y-t.t.Y) } +func (t *translatedImage) Bounds() image.Rectangle { return t.m.Bounds().Add(t.t) } +func (t *translatedImage) ColorModel() color.Model { return t.m.ColorModel() } + +// TestSrcTranslationInvariance tests that Scale and Transform are invariant +// under src translations. Specifically, when some source pixels are not in the +// bottom-right quadrant of src coordinate space, we consistently round down, +// not round towards zero. +func TestSrcTranslationInvariance(t *testing.T) { + f, err := os.Open("../testdata/testpattern.png") + if err != nil { + t.Fatalf("Open: %v", err) + } + defer f.Close() + src, _, err := image.Decode(f) + if err != nil { + t.Fatalf("Decode: %v", err) + } + sr := image.Rect(2, 3, 16, 12) + if !sr.In(src.Bounds()) { + t.Fatalf("src bounds too small: got %v", src.Bounds()) + } + qs := []Interpolator{ + NearestNeighbor, + ApproxBiLinear, + CatmullRom, + } + deltas := []image.Point{ + {+0, +0}, + {+0, +5}, + {+0, -5}, + {+5, +0}, + {-5, +0}, + {+8, +8}, + {+8, -8}, + {-8, +8}, + {-8, -8}, + } + m00 := transformMatrix(3.75, 0, 0) + + for _, transform := range []bool{false, true} { + for _, q := range qs { + want := image.NewRGBA(image.Rect(0, 0, 20, 20)) + if transform { + q.Transform(want, m00, src, sr, Over, nil) + } else { + q.Scale(want, want.Bounds(), src, sr, Over, nil) + } + for _, delta := range deltas { + tsrc := &translatedImage{src, delta} + got := image.NewRGBA(image.Rect(0, 0, 20, 20)) + if transform { + m := matMul(&m00, &f64.Aff3{ + 1, 0, -float64(delta.X), + 0, 1, -float64(delta.Y), + }) + q.Transform(got, m, tsrc, sr.Add(delta), Over, nil) + } else { + q.Scale(got, got.Bounds(), tsrc, sr.Add(delta), Over, nil) + } + if !bytes.Equal(got.Pix, want.Pix) { + t.Errorf("pix differ for delta=%v, transform=%t, q=%T", delta, transform, q) + } + } + } + } +} + +func TestSrcMask(t *testing.T) { + srcMask := image.NewRGBA(image.Rect(0, 0, 23, 1)) + srcMask.SetRGBA(19, 0, color.RGBA{0x00, 0x00, 0x00, 0x7f}) + srcMask.SetRGBA(20, 0, color.RGBA{0x00, 0x00, 0x00, 0xff}) + srcMask.SetRGBA(21, 0, color.RGBA{0x00, 0x00, 0x00, 0x3f}) + srcMask.SetRGBA(22, 0, color.RGBA{0x00, 0x00, 0x00, 0x00}) + red := image.NewUniform(color.RGBA{0xff, 0x00, 0x00, 0xff}) + blue := image.NewUniform(color.RGBA{0x00, 0x00, 0xff, 0xff}) + dst := image.NewRGBA(image.Rect(0, 0, 6, 1)) + Copy(dst, image.Point{}, blue, dst.Bounds(), Src, nil) + NearestNeighbor.Scale(dst, dst.Bounds(), red, image.Rect(0, 0, 3, 1), Over, &Options{ + SrcMask: srcMask, + SrcMaskP: image.Point{20, 0}, + }) + got := [6]color.RGBA{ + dst.RGBAAt(0, 0), + dst.RGBAAt(1, 0), + dst.RGBAAt(2, 0), + dst.RGBAAt(3, 0), + dst.RGBAAt(4, 0), + dst.RGBAAt(5, 0), + } + want := [6]color.RGBA{ + {0xff, 0x00, 0x00, 0xff}, + {0xff, 0x00, 0x00, 0xff}, + {0x3f, 0x00, 0xc0, 0xff}, + {0x3f, 0x00, 0xc0, 0xff}, + {0x00, 0x00, 0xff, 0xff}, + {0x00, 0x00, 0xff, 0xff}, + } + if got != want { + t.Errorf("\ngot %v\nwant %v", got, want) + } +} + +func TestDstMask(t *testing.T) { + dstMask := image.NewRGBA(image.Rect(0, 0, 23, 1)) + dstMask.SetRGBA(19, 0, color.RGBA{0x00, 0x00, 0x00, 0x7f}) + dstMask.SetRGBA(20, 0, color.RGBA{0x00, 0x00, 0x00, 0xff}) + dstMask.SetRGBA(21, 0, color.RGBA{0x00, 0x00, 0x00, 0x3f}) + dstMask.SetRGBA(22, 0, color.RGBA{0x00, 0x00, 0x00, 0x00}) + red := image.NewRGBA(image.Rect(0, 0, 1, 1)) + red.SetRGBA(0, 0, color.RGBA{0xff, 0x00, 0x00, 0xff}) + blue := image.NewUniform(color.RGBA{0x00, 0x00, 0xff, 0xff}) + qs := []Interpolator{ + NearestNeighbor, + ApproxBiLinear, + CatmullRom, + } + for _, q := range qs { + dst := image.NewRGBA(image.Rect(0, 0, 3, 1)) + Copy(dst, image.Point{}, blue, dst.Bounds(), Src, nil) + q.Scale(dst, dst.Bounds(), red, red.Bounds(), Over, &Options{ + DstMask: dstMask, + DstMaskP: image.Point{20, 0}, + }) + got := [3]color.RGBA{ + dst.RGBAAt(0, 0), + dst.RGBAAt(1, 0), + dst.RGBAAt(2, 0), + } + want := [3]color.RGBA{ + {0xff, 0x00, 0x00, 0xff}, + {0x3f, 0x00, 0xc0, 0xff}, + {0x00, 0x00, 0xff, 0xff}, + } + if got != want { + t.Errorf("q=%T:\ngot %v\nwant %v", q, got, want) + } + } +} + +func TestRectDstMask(t *testing.T) { + f, err := os.Open("../testdata/testpattern.png") + if err != nil { + t.Fatalf("Open: %v", err) + } + defer f.Close() + src, _, err := image.Decode(f) + if err != nil { + t.Fatalf("Decode: %v", err) + } + m00 := transformMatrix(1, 0, 0) + + bounds := image.Rect(0, 0, 50, 50) + dstOutside := image.NewRGBA(bounds) + for y := bounds.Min.Y; y < bounds.Max.Y; y++ { + for x := bounds.Min.X; x < bounds.Max.X; x++ { + dstOutside.SetRGBA(x, y, color.RGBA{uint8(5 * x), uint8(5 * y), 0x00, 0xff}) + } + } + + mk := func(q Transformer, dstMask image.Image, dstMaskP image.Point) *image.RGBA { + m := image.NewRGBA(bounds) + Copy(m, bounds.Min, dstOutside, bounds, Src, nil) + q.Transform(m, m00, src, src.Bounds(), Over, &Options{ + DstMask: dstMask, + DstMaskP: dstMaskP, + }) + return m + } + + qs := []Interpolator{ + NearestNeighbor, + ApproxBiLinear, + CatmullRom, + } + dstMaskPs := []image.Point{ + {0, 0}, + {5, 7}, + {-3, 0}, + } + rect := image.Rect(10, 10, 30, 40) + for _, q := range qs { + for _, dstMaskP := range dstMaskPs { + dstInside := mk(q, nil, image.Point{}) + for _, wrap := range []bool{false, true} { + // TODO: replace "rectImage(rect)" with "rect" once Go 1.5 is + // released, where an image.Rectangle implements image.Image. + dstMask := image.Image(rectImage(rect)) + if wrap { + dstMask = srcWrapper{dstMask} + } + dst := mk(q, dstMask, dstMaskP) + + nError := 0 + loop: + for y := bounds.Min.Y; y < bounds.Max.Y; y++ { + for x := bounds.Min.X; x < bounds.Max.X; x++ { + which := dstOutside + if (image.Point{x, y}).Add(dstMaskP).In(rect) { + which = dstInside + } + if got, want := dst.RGBAAt(x, y), which.RGBAAt(x, y); got != want { + if nError == 10 { + t.Errorf("q=%T dmp=%v wrap=%v: ...and more errors", q, dstMaskP, wrap) + break loop + } + nError++ + t.Errorf("q=%T dmp=%v wrap=%v: x=%3d y=%3d: got %v, want %v", + q, dstMaskP, wrap, x, y, got, want) + } + } + } + } + } + } +} + +// TODO: delete this wrapper type once Go 1.5 is released, where an +// image.Rectangle implements image.Image. +type rectImage image.Rectangle + +func (r rectImage) ColorModel() color.Model { return color.Alpha16Model } +func (r rectImage) Bounds() image.Rectangle { return image.Rectangle(r) } +func (r rectImage) At(x, y int) color.Color { + if (image.Point{x, y}).In(image.Rectangle(r)) { + return color.Opaque + } + return color.Transparent +} + +// The fooWrapper types wrap the dst or src image to avoid triggering the +// type-specific fast path implementations. +type ( + dstWrapper struct{ Image } + srcWrapper struct{ image.Image } +) + +func srcGray(boundsHint image.Rectangle) (image.Image, error) { + m := image.NewGray(boundsHint) + fillPix(rand.New(rand.NewSource(0)), m.Pix) + return m, nil +} + +func srcNRGBA(boundsHint image.Rectangle) (image.Image, error) { + m := image.NewNRGBA(boundsHint) + fillPix(rand.New(rand.NewSource(1)), m.Pix) + return m, nil +} + +func srcRGBA(boundsHint image.Rectangle) (image.Image, error) { + m := image.NewRGBA(boundsHint) + fillPix(rand.New(rand.NewSource(2)), m.Pix) + // RGBA is alpha-premultiplied, so the R, G and B values should + // be <= the A values. + for i := 0; i < len(m.Pix); i += 4 { + m.Pix[i+0] = uint8(uint32(m.Pix[i+0]) * uint32(m.Pix[i+3]) / 0xff) + m.Pix[i+1] = uint8(uint32(m.Pix[i+1]) * uint32(m.Pix[i+3]) / 0xff) + m.Pix[i+2] = uint8(uint32(m.Pix[i+2]) * uint32(m.Pix[i+3]) / 0xff) + } + return m, nil +} + +func srcUnif(boundsHint image.Rectangle) (image.Image, error) { + return image.NewUniform(color.RGBA64{0x1234, 0x5555, 0x9181, 0xbeef}), nil +} + +func srcYCbCr(boundsHint image.Rectangle) (image.Image, error) { + m := image.NewYCbCr(boundsHint, image.YCbCrSubsampleRatio420) + fillPix(rand.New(rand.NewSource(3)), m.Y, m.Cb, m.Cr) + return m, nil +} + +func srcLarge(boundsHint image.Rectangle) (image.Image, error) { + // 3072 x 2304 is over 7 million pixels at 4:3, comparable to a + // 2015 smart-phone camera's output. + return srcYCbCr(image.Rect(0, 0, 3072, 2304)) +} + +func srcTux(boundsHint image.Rectangle) (image.Image, error) { + // tux.png is a 386 x 395 image. + f, err := os.Open("../testdata/tux.png") + if err != nil { + return nil, fmt.Errorf("Open: %v", err) + } + defer f.Close() + src, err := png.Decode(f) + if err != nil { + return nil, fmt.Errorf("Decode: %v", err) + } + return src, nil +} + +func benchScale(b *testing.B, w int, h int, op Op, srcf func(image.Rectangle) (image.Image, error), q Interpolator) { + dst := image.NewRGBA(image.Rect(0, 0, w, h)) + src, err := srcf(image.Rect(0, 0, 1024, 768)) + if err != nil { + b.Fatal(err) + } + dr, sr := dst.Bounds(), src.Bounds() + scaler := Scaler(q) + if n, ok := q.(interface { + NewScaler(int, int, int, int) Scaler + }); ok { + scaler = n.NewScaler(dr.Dx(), dr.Dy(), sr.Dx(), sr.Dy()) + } + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + scaler.Scale(dst, dr, src, sr, op, nil) + } +} + +func benchTform(b *testing.B, w int, h int, op Op, srcf func(image.Rectangle) (image.Image, error), q Interpolator) { + dst := image.NewRGBA(image.Rect(0, 0, w, h)) + src, err := srcf(image.Rect(0, 0, 1024, 768)) + if err != nil { + b.Fatal(err) + } + sr := src.Bounds() + m := transformMatrix(3.75, 40, 10) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + q.Transform(dst, m, src, sr, op, nil) + } +} + +func BenchmarkScaleNNLargeDown(b *testing.B) { benchScale(b, 200, 150, Src, srcLarge, NearestNeighbor) } +func BenchmarkScaleABLargeDown(b *testing.B) { benchScale(b, 200, 150, Src, srcLarge, ApproxBiLinear) } +func BenchmarkScaleBLLargeDown(b *testing.B) { benchScale(b, 200, 150, Src, srcLarge, BiLinear) } +func BenchmarkScaleCRLargeDown(b *testing.B) { benchScale(b, 200, 150, Src, srcLarge, CatmullRom) } + +func BenchmarkScaleNNDown(b *testing.B) { benchScale(b, 120, 80, Src, srcTux, NearestNeighbor) } +func BenchmarkScaleABDown(b *testing.B) { benchScale(b, 120, 80, Src, srcTux, ApproxBiLinear) } +func BenchmarkScaleBLDown(b *testing.B) { benchScale(b, 120, 80, Src, srcTux, BiLinear) } +func BenchmarkScaleCRDown(b *testing.B) { benchScale(b, 120, 80, Src, srcTux, CatmullRom) } + +func BenchmarkScaleNNUp(b *testing.B) { benchScale(b, 800, 600, Src, srcTux, NearestNeighbor) } +func BenchmarkScaleABUp(b *testing.B) { benchScale(b, 800, 600, Src, srcTux, ApproxBiLinear) } +func BenchmarkScaleBLUp(b *testing.B) { benchScale(b, 800, 600, Src, srcTux, BiLinear) } +func BenchmarkScaleCRUp(b *testing.B) { benchScale(b, 800, 600, Src, srcTux, CatmullRom) } + +func BenchmarkScaleNNSrcRGBA(b *testing.B) { benchScale(b, 200, 150, Src, srcRGBA, NearestNeighbor) } +func BenchmarkScaleNNSrcUnif(b *testing.B) { benchScale(b, 200, 150, Src, srcUnif, NearestNeighbor) } + +func BenchmarkScaleNNOverRGBA(b *testing.B) { benchScale(b, 200, 150, Over, srcRGBA, NearestNeighbor) } +func BenchmarkScaleNNOverUnif(b *testing.B) { benchScale(b, 200, 150, Over, srcUnif, NearestNeighbor) } + +func BenchmarkTformNNSrcRGBA(b *testing.B) { benchTform(b, 200, 150, Src, srcRGBA, NearestNeighbor) } +func BenchmarkTformNNSrcUnif(b *testing.B) { benchTform(b, 200, 150, Src, srcUnif, NearestNeighbor) } + +func BenchmarkTformNNOverRGBA(b *testing.B) { benchTform(b, 200, 150, Over, srcRGBA, NearestNeighbor) } +func BenchmarkTformNNOverUnif(b *testing.B) { benchTform(b, 200, 150, Over, srcUnif, NearestNeighbor) } + +func BenchmarkScaleABSrcGray(b *testing.B) { benchScale(b, 200, 150, Src, srcGray, ApproxBiLinear) } +func BenchmarkScaleABSrcNRGBA(b *testing.B) { benchScale(b, 200, 150, Src, srcNRGBA, ApproxBiLinear) } +func BenchmarkScaleABSrcRGBA(b *testing.B) { benchScale(b, 200, 150, Src, srcRGBA, ApproxBiLinear) } +func BenchmarkScaleABSrcYCbCr(b *testing.B) { benchScale(b, 200, 150, Src, srcYCbCr, ApproxBiLinear) } + +func BenchmarkScaleABOverGray(b *testing.B) { benchScale(b, 200, 150, Over, srcGray, ApproxBiLinear) } +func BenchmarkScaleABOverNRGBA(b *testing.B) { benchScale(b, 200, 150, Over, srcNRGBA, ApproxBiLinear) } +func BenchmarkScaleABOverRGBA(b *testing.B) { benchScale(b, 200, 150, Over, srcRGBA, ApproxBiLinear) } +func BenchmarkScaleABOverYCbCr(b *testing.B) { benchScale(b, 200, 150, Over, srcYCbCr, ApproxBiLinear) } + +func BenchmarkTformABSrcGray(b *testing.B) { benchTform(b, 200, 150, Src, srcGray, ApproxBiLinear) } +func BenchmarkTformABSrcNRGBA(b *testing.B) { benchTform(b, 200, 150, Src, srcNRGBA, ApproxBiLinear) } +func BenchmarkTformABSrcRGBA(b *testing.B) { benchTform(b, 200, 150, Src, srcRGBA, ApproxBiLinear) } +func BenchmarkTformABSrcYCbCr(b *testing.B) { benchTform(b, 200, 150, Src, srcYCbCr, ApproxBiLinear) } + +func BenchmarkTformABOverGray(b *testing.B) { benchTform(b, 200, 150, Over, srcGray, ApproxBiLinear) } +func BenchmarkTformABOverNRGBA(b *testing.B) { benchTform(b, 200, 150, Over, srcNRGBA, ApproxBiLinear) } +func BenchmarkTformABOverRGBA(b *testing.B) { benchTform(b, 200, 150, Over, srcRGBA, ApproxBiLinear) } +func BenchmarkTformABOverYCbCr(b *testing.B) { benchTform(b, 200, 150, Over, srcYCbCr, ApproxBiLinear) } + +func BenchmarkScaleCRSrcGray(b *testing.B) { benchScale(b, 200, 150, Src, srcGray, CatmullRom) } +func BenchmarkScaleCRSrcNRGBA(b *testing.B) { benchScale(b, 200, 150, Src, srcNRGBA, CatmullRom) } +func BenchmarkScaleCRSrcRGBA(b *testing.B) { benchScale(b, 200, 150, Src, srcRGBA, CatmullRom) } +func BenchmarkScaleCRSrcYCbCr(b *testing.B) { benchScale(b, 200, 150, Src, srcYCbCr, CatmullRom) } + +func BenchmarkScaleCROverGray(b *testing.B) { benchScale(b, 200, 150, Over, srcGray, CatmullRom) } +func BenchmarkScaleCROverNRGBA(b *testing.B) { benchScale(b, 200, 150, Over, srcNRGBA, CatmullRom) } +func BenchmarkScaleCROverRGBA(b *testing.B) { benchScale(b, 200, 150, Over, srcRGBA, CatmullRom) } +func BenchmarkScaleCROverYCbCr(b *testing.B) { benchScale(b, 200, 150, Over, srcYCbCr, CatmullRom) } + +func BenchmarkTformCRSrcGray(b *testing.B) { benchTform(b, 200, 150, Src, srcGray, CatmullRom) } +func BenchmarkTformCRSrcNRGBA(b *testing.B) { benchTform(b, 200, 150, Src, srcNRGBA, CatmullRom) } +func BenchmarkTformCRSrcRGBA(b *testing.B) { benchTform(b, 200, 150, Src, srcRGBA, CatmullRom) } +func BenchmarkTformCRSrcYCbCr(b *testing.B) { benchTform(b, 200, 150, Src, srcYCbCr, CatmullRom) } + +func BenchmarkTformCROverGray(b *testing.B) { benchTform(b, 200, 150, Over, srcGray, CatmullRom) } +func BenchmarkTformCROverNRGBA(b *testing.B) { benchTform(b, 200, 150, Over, srcNRGBA, CatmullRom) } +func BenchmarkTformCROverRGBA(b *testing.B) { benchTform(b, 200, 150, Over, srcRGBA, CatmullRom) } +func BenchmarkTformCROverYCbCr(b *testing.B) { benchTform(b, 200, 150, Over, srcYCbCr, CatmullRom) } diff --git a/_third_party/golang.org/x/image/draw/stdlib_test.go b/_third_party/golang.org/x/image/draw/stdlib_test.go new file mode 100644 index 0000000000..c45f78c2e4 --- /dev/null +++ b/_third_party/golang.org/x/image/draw/stdlib_test.go @@ -0,0 +1,96 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package draw + +// This file contains tests that depend on the exact behavior of the +// image/color package in the standard library. The color conversion formula +// from YCbCr to RGBA changed between Go 1.4 and Go 1.5, so this file's tests +// are only enabled for Go 1.5 and above. + +import ( + "bytes" + "image" + "image/color" + "testing" +) + +// TestFastPaths tests that the fast path implementations produce identical +// results to the generic implementation. +func TestFastPaths(t *testing.T) { + drs := []image.Rectangle{ + image.Rect(0, 0, 10, 10), // The dst bounds. + image.Rect(3, 4, 8, 6), // A strict subset of the dst bounds. + image.Rect(-3, -5, 2, 4), // Partial out-of-bounds #0. + image.Rect(4, -2, 6, 12), // Partial out-of-bounds #1. + image.Rect(12, 14, 23, 45), // Complete out-of-bounds. + image.Rect(5, 5, 5, 5), // Empty. + } + srs := []image.Rectangle{ + image.Rect(0, 0, 12, 9), // The src bounds. + image.Rect(2, 2, 10, 8), // A strict subset of the src bounds. + image.Rect(10, 5, 20, 20), // Partial out-of-bounds #0. + image.Rect(-40, 0, 40, 8), // Partial out-of-bounds #1. + image.Rect(-8, -8, -4, -4), // Complete out-of-bounds. + image.Rect(5, 5, 5, 5), // Empty. + } + srcfs := []func(image.Rectangle) (image.Image, error){ + srcGray, + srcNRGBA, + srcRGBA, + srcUnif, + srcYCbCr, + } + var srcs []image.Image + for _, srcf := range srcfs { + src, err := srcf(srs[0]) + if err != nil { + t.Fatal(err) + } + srcs = append(srcs, src) + } + qs := []Interpolator{ + NearestNeighbor, + ApproxBiLinear, + CatmullRom, + } + ops := []Op{ + Over, + Src, + } + blue := image.NewUniform(color.RGBA{0x11, 0x22, 0x44, 0x7f}) + + for _, dr := range drs { + for _, src := range srcs { + for _, sr := range srs { + for _, transform := range []bool{false, true} { + for _, q := range qs { + for _, op := range ops { + dst0 := image.NewRGBA(drs[0]) + dst1 := image.NewRGBA(drs[0]) + Draw(dst0, dst0.Bounds(), blue, image.Point{}, Src) + Draw(dstWrapper{dst1}, dst1.Bounds(), srcWrapper{blue}, image.Point{}, Src) + + if transform { + m := transformMatrix(3.75, 2, 1) + q.Transform(dst0, m, src, sr, op, nil) + q.Transform(dstWrapper{dst1}, m, srcWrapper{src}, sr, op, nil) + } else { + q.Scale(dst0, dr, src, sr, op, nil) + q.Scale(dstWrapper{dst1}, dr, srcWrapper{src}, sr, op, nil) + } + + if !bytes.Equal(dst0.Pix, dst1.Pix) { + t.Errorf("pix differ for dr=%v, src=%T, sr=%v, transform=%t, q=%T", + dr, src, sr, transform, q) + } + } + } + } + } + } + } +} diff --git a/_third_party/golang.org/x/image/font/font.go b/_third_party/golang.org/x/image/font/font.go new file mode 100644 index 0000000000..9db880452f --- /dev/null +++ b/_third_party/golang.org/x/image/font/font.go @@ -0,0 +1,202 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package font defines an interface for font faces, for drawing text on an +// image. +// +// Other packages provide font face implementations. For example, a truetype +// package would provide one based on .ttf font files. +package font // import "bosun.org/_third_party/golang.org/x/image/font" + +import ( + "image" + "image/draw" + "io" + + "bosun.org/_third_party/golang.org/x/image/math/fixed" +) + +// TODO: who is responsible for caches (glyph images, glyph indices, kerns)? +// The Drawer or the Face? + +// Face is a font face. Its glyphs are often derived from a font file, such as +// "Comic_Sans_MS.ttf", but a face has a specific size, style, weight and +// hinting. For example, the 12pt and 18pt versions of Comic Sans are two +// different faces, even if derived from the same font file. +// +// A Face is not safe for concurrent use by multiple goroutines, as its methods +// may re-use implementation-specific caches and mask image buffers. +// +// To create a Face, look to other packages that implement specific font file +// formats. +type Face interface { + io.Closer + + // Glyph returns the draw.DrawMask parameters (dr, mask, maskp) to draw r's + // glyph at the sub-pixel destination location dot, and that glyph's + // advance width. + // + // It returns !ok if the face does not contain a glyph for r. + // + // The contents of the mask image returned by one Glyph call may change + // after the next Glyph call. Callers that want to cache the mask must make + // a copy. + Glyph(dot fixed.Point26_6, r rune) ( + dr image.Rectangle, mask image.Image, maskp image.Point, advance fixed.Int26_6, ok bool) + + // GlyphBounds returns the bounding box of r's glyph, drawn at a dot equal + // to the origin, and that glyph's advance width. + // + // It returns !ok if the face does not contain a glyph for r. + // + // The glyph's ascent and descent equal -bounds.Min.Y and +bounds.Max.Y. A + // visual depiction of what these metrics are is at + // https://developer.apple.com/library/mac/documentation/TextFonts/Conceptual/CocoaTextArchitecture/Art/glyph_metrics_2x.png + GlyphBounds(r rune) (bounds fixed.Rectangle26_6, advance fixed.Int26_6, ok bool) + + // GlyphAdvance returns the advance width of r's glyph. + // + // It returns !ok if the face does not contain a glyph for r. + GlyphAdvance(r rune) (advance fixed.Int26_6, ok bool) + + // Kern returns the horizontal adjustment for the kerning pair (r0, r1). A + // positive kern means to move the glyphs further apart. + Kern(r0, r1 rune) fixed.Int26_6 + + // TODO: per-font Metrics. + // TODO: ColoredGlyph for various emoji? + // TODO: Ligatures? Shaping? +} + +// TODO: Drawer.Layout or Drawer.Measure methods to measure text without +// drawing? + +// Drawer draws text on a destination image. +// +// A Drawer is not safe for concurrent use by multiple goroutines, since its +// Face is not. +type Drawer struct { + // Dst is the destination image. + Dst draw.Image + // Src is the source image. + Src image.Image + // Face provides the glyph mask images. + Face Face + // Dot is the baseline location to draw the next glyph. The majority of the + // affected pixels will be above and to the right of the dot, but some may + // be below or to the left. For example, drawing a 'j' in an italic face + // may affect pixels below and to the left of the dot. + Dot fixed.Point26_6 + + // TODO: Clip image.Image? + // TODO: SrcP image.Point for Src images other than *image.Uniform? How + // does it get updated during DrawString? +} + +// TODO: should DrawString return the last rune drawn, so the next DrawString +// call can kern beforehand? Or should that be the responsibility of the caller +// if they really want to do that, since they have to explicitly shift d.Dot +// anyway? +// +// In general, we'd have a DrawBytes([]byte) and DrawRuneReader(io.RuneReader) +// and the last case can't assume that you can rewind the stream. +// +// TODO: how does this work with line breaking: drawing text up until a +// vertical line? Should DrawString return the number of runes drawn? + +// DrawString draws s at the dot and advances the dot's location. +func (d *Drawer) DrawString(s string) { + var prevC rune + for i, c := range s { + if i != 0 { + d.Dot.X += d.Face.Kern(prevC, c) + } + dr, mask, maskp, advance, ok := d.Face.Glyph(d.Dot, c) + if !ok { + // TODO: is falling back on the U+FFFD glyph the responsibility of + // the Drawer or the Face? + // TODO: set prevC = '\ufffd'? + continue + } + draw.DrawMask(d.Dst, dr, d.Src, image.Point{}, mask, maskp, draw.Over) + d.Dot.X += advance + prevC = c + } +} + +// MeasureString returns how far dot would advance by drawing s. +func (d *Drawer) MeasureString(s string) (advance fixed.Int26_6) { + var prevC rune + for i, c := range s { + if i != 0 { + advance += d.Face.Kern(prevC, c) + } + a, ok := d.Face.GlyphAdvance(c) + if !ok { + // TODO: is falling back on the U+FFFD glyph the responsibility of + // the Drawer or the Face? + // TODO: set prevC = '\ufffd'? + continue + } + advance += a + prevC = c + } + return advance +} + +// Hinting selects how to quantize a vector font's glyph nodes. +// +// Not all fonts support hinting. +type Hinting int + +const ( + HintingNone Hinting = iota + HintingVertical + HintingFull +) + +// Stretch selects a normal, condensed, or expanded face. +// +// Not all fonts support stretches. +type Stretch int + +const ( + StretchUltraCondensed Stretch = -4 + StretchExtraCondensed Stretch = -3 + StretchCondensed Stretch = -2 + StretchSemiCondensed Stretch = -1 + StretchNormal Stretch = +0 + StretchSemiExpanded Stretch = +1 + StretchExpanded Stretch = +2 + StretchExtraExpanded Stretch = +3 + StretchUltraExpanded Stretch = +4 +) + +// Style selects a normal, italic, or oblique face. +// +// Not all fonts support styles. +type Style int + +const ( + StyleNormal Style = iota + StyleItalic + StyleOblique +) + +// Weight selects a normal, light or bold face. +// +// Not all fonts support weights. +type Weight int + +const ( + WeightThin Weight = 100 + WeightExtraLight Weight = 200 + WeightLight Weight = 300 + WeightNormal Weight = 400 + WeightMedium Weight = 500 + WeightSemiBold Weight = 600 + WeightBold Weight = 700 + WeightExtraBold Weight = 800 + WeightBlack Weight = 900 +) diff --git a/_third_party/golang.org/x/image/math/f64/f64.go b/_third_party/golang.org/x/image/math/f64/f64.go new file mode 100644 index 0000000000..a97da151eb --- /dev/null +++ b/_third_party/golang.org/x/image/math/f64/f64.go @@ -0,0 +1,37 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package f64 implements float64 vector and matrix types. +package f64 // import "bosun.org/_third_party/golang.org/x/image/math/f64" + +// Vec2 is a 2-element vector. +type Vec2 [2]float64 + +// Vec3 is a 3-element vector. +type Vec3 [3]float64 + +// Vec4 is a 4-element vector. +type Vec4 [4]float64 + +// Mat3 is a 3x3 matrix in row major order. +// +// m[3*r + c] is the element in the r'th row and c'th column. +type Mat3 [9]float64 + +// Mat4 is a 4x4 matrix in row major order. +// +// m[4*r + c] is the element in the r'th row and c'th column. +type Mat4 [16]float64 + +// Aff3 is a 3x3 affine transformation matrix in row major order, where the +// bottom row is implicitly [0 0 1]. +// +// m[3*r + c] is the element in the r'th row and c'th column. +type Aff3 [6]float64 + +// Aff4 is a 4x4 affine transformation matrix in row major order, where the +// bottom row is implicitly [0 0 0 1]. +// +// m[4*r + c] is the element in the r'th row and c'th column. +type Aff4 [12]float64 diff --git a/_third_party/golang.org/x/image/math/fixed/fixed.go b/_third_party/golang.org/x/image/math/fixed/fixed.go new file mode 100644 index 0000000000..f92d553d6f --- /dev/null +++ b/_third_party/golang.org/x/image/math/fixed/fixed.go @@ -0,0 +1,172 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fixed implements fixed-point integer types. +package fixed // import "bosun.org/_third_party/golang.org/x/image/math/fixed" + +import ( + "fmt" +) + +// TODO: implement fmt.Formatter for %f and %g. + +// I returns the integer value i as an Int26_6. +// +// For example, passing the integer value 2 yields Int26_6(128). +func I(i int) Int26_6 { + return Int26_6(i << 6) +} + +// Int26_6 is a signed 26.6 fixed-point number. +// +// The integer part ranges from -33554432 to 33554431, inclusive. The +// fractional part has 6 bits of precision. +// +// For example, the number one-and-a-quarter is Int26_6(1<<6 + 1<<4). +type Int26_6 int32 + +// String returns a human-readable representation of a 26.6 fixed-point number. +// +// For example, the number one-and-a-quarter becomes "1:16". +func (x Int26_6) String() string { + const shift, mask = 6, 1<<6 - 1 + if x >= 0 { + return fmt.Sprintf("%d:%02d", int32(x>>shift), int32(x&mask)) + } + x = -x + if x >= 0 { + return fmt.Sprintf("-%d:%02d", int32(x>>shift), int32(x&mask)) + } + return "-33554432:00" // The minimum value is -(1<<25). +} + +// Int52_12 is a signed 52.12 fixed-point number. +// +// The integer part ranges from -2251799813685248 to 2251799813685247, +// inclusive. The fractional part has 12 bits of precision. +// +// For example, the number one-and-a-quarter is Int52_12(1<<12 + 1<<10). +type Int52_12 int64 + +// String returns a human-readable representation of a 52.12 fixed-point +// number. +// +// For example, the number one-and-a-quarter becomes "1:1024". +func (x Int52_12) String() string { + const shift, mask = 12, 1<<12 - 1 + if x >= 0 { + return fmt.Sprintf("%d:%04d", int64(x>>shift), int64(x&mask)) + } + x = -x + if x >= 0 { + return fmt.Sprintf("-%d:%04d", int64(x>>shift), int64(x&mask)) + } + return "-2251799813685248:0000" // The minimum value is -(1<<51). +} + +// P returns the integer values x and y as a Point26_6. +// +// For example, passing the integer values (2, -3) yields Point26_6{128, -192}. +func P(x, y int) Point26_6 { + return Point26_6{Int26_6(x << 6), Int26_6(y << 6)} +} + +// Point26_6 is a 26.6 fixed-point coordinate pair. +// +// It is analogous to the image.Point type in the standard library. +type Point26_6 struct { + X, Y Int26_6 +} + +// Add returns the vector p+q. +func (p Point26_6) Add(q Point26_6) Point26_6 { + return Point26_6{p.X + q.X, p.Y + q.Y} +} + +// Sub returns the vector p-q. +func (p Point26_6) Sub(q Point26_6) Point26_6 { + return Point26_6{p.X - q.X, p.Y - q.Y} +} + +// Mul returns the vector p*k. +func (p Point26_6) Mul(k Int26_6) Point26_6 { + return Point26_6{p.X * k / 64, p.Y * k / 64} +} + +// Div returns the vector p/k. +func (p Point26_6) Div(k Int26_6) Point26_6 { + return Point26_6{p.X * 64 / k, p.Y * 64 / k} +} + +// Point52_12 is a 52.12 fixed-point coordinate pair. +// +// It is analogous to the image.Point type in the standard library. +type Point52_12 struct { + X, Y Int52_12 +} + +// Add returns the vector p+q. +func (p Point52_12) Add(q Point52_12) Point52_12 { + return Point52_12{p.X + q.X, p.Y + q.Y} +} + +// Sub returns the vector p-q. +func (p Point52_12) Sub(q Point52_12) Point52_12 { + return Point52_12{p.X - q.X, p.Y - q.Y} +} + +// Mul returns the vector p*k. +func (p Point52_12) Mul(k Int52_12) Point52_12 { + return Point52_12{p.X * k / 4096, p.Y * k / 4096} +} + +// Div returns the vector p/k. +func (p Point52_12) Div(k Int52_12) Point52_12 { + return Point52_12{p.X * 4096 / k, p.Y * 4096 / k} +} + +// R returns the integer values minX, minY, maxX, maxY as a Rectangle26_6. +// +// For example, passing the integer values (0, 1, 2, 3) yields +// Rectangle26_6{Point26_6{0, 64}, Point26_6{128, 192}}. +// +// Like the image.Rect function in the standard library, the returned rectangle +// has minimum and maximum coordinates swapped if necessary so that it is +// well-formed. +func R(minX, minY, maxX, maxY int) Rectangle26_6 { + if minX > maxX { + minX, maxX = maxX, minX + } + if minY > maxY { + minY, maxY = maxY, minY + } + return Rectangle26_6{ + Point26_6{ + Int26_6(minX << 6), + Int26_6(minY << 6), + }, + Point26_6{ + Int26_6(maxX << 6), + Int26_6(maxY << 6), + }, + } +} + +// Rectangle26_6 is a 26.6 fixed-point coordinate rectangle. The Min bound is +// inclusive and the Max bound is exclusive. It is well-formed if Min.X <= +// Max.X and likewise for Y. +// +// It is analogous to the image.Rectangle type in the standard library. +type Rectangle26_6 struct { + Min, Max Point26_6 +} + +// Rectangle52_12 is a 52.12 fixed-point coordinate rectangle. The Min bound is +// inclusive and the Max bound is exclusive. It is well-formed if Min.X <= +// Max.X and likewise for Y. +// +// It is analogous to the image.Rectangle type in the standard library. +type Rectangle52_12 struct { + Min, Max Point52_12 +} diff --git a/_third_party/golang.org/x/image/math/fixed/fixed_test.go b/_third_party/golang.org/x/image/math/fixed/fixed_test.go new file mode 100644 index 0000000000..e252de7cb8 --- /dev/null +++ b/_third_party/golang.org/x/image/math/fixed/fixed_test.go @@ -0,0 +1,25 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fixed + +import ( + "testing" +) + +func TestInt26_6(t *testing.T) { + got := Int26_6(1<<6 + 1<<4).String() + want := "1:16" + if got != want { + t.Fatalf("got %q, want %q", got, want) + } +} + +func TestInt52_12(t *testing.T) { + got := Int52_12(1<<12 + 1<<10).String() + want := "1:1024" + if got != want { + t.Fatalf("got %q, want %q", got, want) + } +} diff --git a/_third_party/golang.org/x/net/html/example_test.go b/_third_party/golang.org/x/net/html/example_test.go index 0b06ed7730..e1a87d4dcf 100644 --- a/_third_party/golang.org/x/net/html/example_test.go +++ b/_third_party/golang.org/x/net/html/example_test.go @@ -10,7 +10,7 @@ import ( "log" "strings" - "golang.org/x/net/html" + "bosun.org/_third_party/golang.org/x/net/html" ) func ExampleParse() { diff --git a/_third_party/golang.org/x/net/html/node.go b/_third_party/golang.org/x/net/html/node.go index 26b657aec8..e6d2de2a98 100644 --- a/_third_party/golang.org/x/net/html/node.go +++ b/_third_party/golang.org/x/net/html/node.go @@ -5,7 +5,7 @@ package html import ( - "golang.org/x/net/html/atom" + "bosun.org/_third_party/golang.org/x/net/html/atom" ) // A NodeType is the type of a Node. diff --git a/_third_party/golang.org/x/net/html/parse.go b/_third_party/golang.org/x/net/html/parse.go index be4b2bf5aa..8ff072b3d7 100644 --- a/_third_party/golang.org/x/net/html/parse.go +++ b/_third_party/golang.org/x/net/html/parse.go @@ -10,7 +10,7 @@ import ( "io" "strings" - a "golang.org/x/net/html/atom" + a "bosun.org/_third_party/golang.org/x/net/html/atom" ) // A parser implements the HTML5 parsing algorithm: diff --git a/_third_party/golang.org/x/net/html/parse_test.go b/_third_party/golang.org/x/net/html/parse_test.go index 7e47d11be8..7626583c55 100644 --- a/_third_party/golang.org/x/net/html/parse_test.go +++ b/_third_party/golang.org/x/net/html/parse_test.go @@ -18,7 +18,7 @@ import ( "strings" "testing" - "golang.org/x/net/html/atom" + "bosun.org/_third_party/golang.org/x/net/html/atom" ) // readParseTest reads a single test case from r. diff --git a/_third_party/golang.org/x/net/html/token.go b/_third_party/golang.org/x/net/html/token.go index 893e272a9e..b0fce5f69c 100644 --- a/_third_party/golang.org/x/net/html/token.go +++ b/_third_party/golang.org/x/net/html/token.go @@ -11,7 +11,7 @@ import ( "strconv" "strings" - "golang.org/x/net/html/atom" + "bosun.org/_third_party/golang.org/x/net/html/atom" ) // A TokenType is the type of a Token. diff --git a/_third_party/golang.org/x/net/icmp/endpoint.go b/_third_party/golang.org/x/net/icmp/endpoint.go index 0213d1a134..5d48fc4138 100644 --- a/_third_party/golang.org/x/net/icmp/endpoint.go +++ b/_third_party/golang.org/x/net/icmp/endpoint.go @@ -10,8 +10,8 @@ import ( "syscall" "time" - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" + "bosun.org/_third_party/golang.org/x/net/ipv4" + "bosun.org/_third_party/golang.org/x/net/ipv6" ) var _ net.PacketConn = &PacketConn{} diff --git a/_third_party/golang.org/x/net/icmp/example_test.go b/_third_party/golang.org/x/net/icmp/example_test.go index 1df4ceccdd..2baf04bbf8 100644 --- a/_third_party/golang.org/x/net/icmp/example_test.go +++ b/_third_party/golang.org/x/net/icmp/example_test.go @@ -10,8 +10,8 @@ import ( "os" "runtime" - "golang.org/x/net/icmp" - "golang.org/x/net/ipv6" + "bosun.org/_third_party/golang.org/x/net/icmp" + "bosun.org/_third_party/golang.org/x/net/ipv6" ) func ExamplePacketConn_nonPrivilegedPing() { diff --git a/_third_party/golang.org/x/net/icmp/extension_test.go b/_third_party/golang.org/x/net/icmp/extension_test.go index 0b3f7b9e15..f8546fb893 100644 --- a/_third_party/golang.org/x/net/icmp/extension_test.go +++ b/_third_party/golang.org/x/net/icmp/extension_test.go @@ -9,7 +9,7 @@ import ( "reflect" "testing" - "golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/iana" ) var marshalAndParseExtensionTests = []struct { diff --git a/_third_party/golang.org/x/net/icmp/interface.go b/_third_party/golang.org/x/net/icmp/interface.go index c7bf8dd1a6..c691a3f147 100644 --- a/_third_party/golang.org/x/net/icmp/interface.go +++ b/_third_party/golang.org/x/net/icmp/interface.go @@ -8,7 +8,7 @@ import ( "net" "strings" - "golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/iana" ) const ( diff --git a/_third_party/golang.org/x/net/icmp/ipv4.go b/_third_party/golang.org/x/net/icmp/ipv4.go index a252d730ed..72309ca41b 100644 --- a/_third_party/golang.org/x/net/icmp/ipv4.go +++ b/_third_party/golang.org/x/net/icmp/ipv4.go @@ -9,7 +9,7 @@ import ( "runtime" "unsafe" - "golang.org/x/net/ipv4" + "bosun.org/_third_party/golang.org/x/net/ipv4" ) // See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. diff --git a/_third_party/golang.org/x/net/icmp/ipv4_test.go b/_third_party/golang.org/x/net/icmp/ipv4_test.go index b05c697394..e0271a7ae7 100644 --- a/_third_party/golang.org/x/net/icmp/ipv4_test.go +++ b/_third_party/golang.org/x/net/icmp/ipv4_test.go @@ -10,7 +10,7 @@ import ( "runtime" "testing" - "golang.org/x/net/ipv4" + "bosun.org/_third_party/golang.org/x/net/ipv4" ) var ( diff --git a/_third_party/golang.org/x/net/icmp/ipv6.go b/_third_party/golang.org/x/net/icmp/ipv6.go index fe4031a2f2..839f39a419 100644 --- a/_third_party/golang.org/x/net/icmp/ipv6.go +++ b/_third_party/golang.org/x/net/icmp/ipv6.go @@ -7,7 +7,7 @@ package icmp import ( "net" - "golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/iana" ) const ipv6PseudoHeaderLen = 2*net.IPv6len + 8 diff --git a/_third_party/golang.org/x/net/icmp/listen_posix.go b/_third_party/golang.org/x/net/icmp/listen_posix.go index b9f260796e..78950e88ef 100644 --- a/_third_party/golang.org/x/net/icmp/listen_posix.go +++ b/_third_party/golang.org/x/net/icmp/listen_posix.go @@ -12,9 +12,9 @@ import ( "runtime" "syscall" - "golang.org/x/net/internal/iana" - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" + "bosun.org/_third_party/golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/ipv4" + "bosun.org/_third_party/golang.org/x/net/ipv6" ) const sysIP_STRIPHDR = 0x17 // for now only darwin supports this option diff --git a/_third_party/golang.org/x/net/icmp/message.go b/_third_party/golang.org/x/net/icmp/message.go index 6fd68ab070..b6d019aa73 100644 --- a/_third_party/golang.org/x/net/icmp/message.go +++ b/_third_party/golang.org/x/net/icmp/message.go @@ -18,9 +18,9 @@ import ( "net" "syscall" - "golang.org/x/net/internal/iana" - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" + "bosun.org/_third_party/golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/ipv4" + "bosun.org/_third_party/golang.org/x/net/ipv6" ) var ( diff --git a/_third_party/golang.org/x/net/icmp/message_test.go b/_third_party/golang.org/x/net/icmp/message_test.go index 5d2605f8d1..71a6efad60 100644 --- a/_third_party/golang.org/x/net/icmp/message_test.go +++ b/_third_party/golang.org/x/net/icmp/message_test.go @@ -9,10 +9,10 @@ import ( "reflect" "testing" - "golang.org/x/net/icmp" - "golang.org/x/net/internal/iana" - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" + "bosun.org/_third_party/golang.org/x/net/icmp" + "bosun.org/_third_party/golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/ipv4" + "bosun.org/_third_party/golang.org/x/net/ipv6" ) var marshalAndParseMessageForIPv4Tests = []icmp.Message{ diff --git a/_third_party/golang.org/x/net/icmp/multipart.go b/_third_party/golang.org/x/net/icmp/multipart.go index 54ac8bc12a..eb1eb8cdeb 100644 --- a/_third_party/golang.org/x/net/icmp/multipart.go +++ b/_third_party/golang.org/x/net/icmp/multipart.go @@ -4,7 +4,7 @@ package icmp -import "golang.org/x/net/internal/iana" +import "bosun.org/_third_party/golang.org/x/net/internal/iana" // multipartMessageBodyDataLen takes b as an original datagram and // exts as extensions, and returns a required length for message body diff --git a/_third_party/golang.org/x/net/icmp/multipart_test.go b/_third_party/golang.org/x/net/icmp/multipart_test.go index 9248e475fa..597a676942 100644 --- a/_third_party/golang.org/x/net/icmp/multipart_test.go +++ b/_third_party/golang.org/x/net/icmp/multipart_test.go @@ -10,10 +10,10 @@ import ( "reflect" "testing" - "golang.org/x/net/icmp" - "golang.org/x/net/internal/iana" - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" + "bosun.org/_third_party/golang.org/x/net/icmp" + "bosun.org/_third_party/golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/ipv4" + "bosun.org/_third_party/golang.org/x/net/ipv6" ) var marshalAndParseMultipartMessageForIPv4Tests = []icmp.Message{ diff --git a/_third_party/golang.org/x/net/icmp/paramprob.go b/_third_party/golang.org/x/net/icmp/paramprob.go index f200a7c29c..bac8c0ce5e 100644 --- a/_third_party/golang.org/x/net/icmp/paramprob.go +++ b/_third_party/golang.org/x/net/icmp/paramprob.go @@ -4,7 +4,7 @@ package icmp -import "golang.org/x/net/internal/iana" +import "bosun.org/_third_party/golang.org/x/net/internal/iana" // A ParamProb represents an ICMP parameter problem message body. type ParamProb struct { diff --git a/_third_party/golang.org/x/net/icmp/ping_test.go b/_third_party/golang.org/x/net/icmp/ping_test.go index 4ec269284f..9e8c487ffd 100644 --- a/_third_party/golang.org/x/net/icmp/ping_test.go +++ b/_third_party/golang.org/x/net/icmp/ping_test.go @@ -13,11 +13,11 @@ import ( "testing" "time" - "golang.org/x/net/icmp" - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" + "bosun.org/_third_party/golang.org/x/net/icmp" + "bosun.org/_third_party/golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/nettest" + "bosun.org/_third_party/golang.org/x/net/ipv4" + "bosun.org/_third_party/golang.org/x/net/ipv6" ) func googleAddr(c *icmp.PacketConn, protocol int) (net.Addr, error) { diff --git a/_third_party/golang.org/x/net/ipv4/control_bsd.go b/_third_party/golang.org/x/net/ipv4/control_bsd.go index 33d8bc8b38..a4340647c1 100644 --- a/_third_party/golang.org/x/net/ipv4/control_bsd.go +++ b/_third_party/golang.org/x/net/ipv4/control_bsd.go @@ -11,7 +11,7 @@ import ( "syscall" "unsafe" - "golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/iana" ) func marshalDst(b []byte, cm *ControlMessage) []byte { diff --git a/_third_party/golang.org/x/net/ipv4/control_pktinfo.go b/_third_party/golang.org/x/net/ipv4/control_pktinfo.go index 444782f397..525f14210e 100644 --- a/_third_party/golang.org/x/net/ipv4/control_pktinfo.go +++ b/_third_party/golang.org/x/net/ipv4/control_pktinfo.go @@ -10,7 +10,7 @@ import ( "syscall" "unsafe" - "golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/iana" ) func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { diff --git a/_third_party/golang.org/x/net/ipv4/control_unix.go b/_third_party/golang.org/x/net/ipv4/control_unix.go index 3000c52e40..bbd7a1d4be 100644 --- a/_third_party/golang.org/x/net/ipv4/control_unix.go +++ b/_third_party/golang.org/x/net/ipv4/control_unix.go @@ -11,7 +11,7 @@ import ( "syscall" "unsafe" - "golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/iana" ) func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { diff --git a/_third_party/golang.org/x/net/ipv4/example_test.go b/_third_party/golang.org/x/net/ipv4/example_test.go index 2fdc6c6042..13095a63ef 100644 --- a/_third_party/golang.org/x/net/ipv4/example_test.go +++ b/_third_party/golang.org/x/net/ipv4/example_test.go @@ -12,8 +12,8 @@ import ( "runtime" "time" - "golang.org/x/net/icmp" - "golang.org/x/net/ipv4" + "bosun.org/_third_party/golang.org/x/net/icmp" + "bosun.org/_third_party/golang.org/x/net/ipv4" ) func ExampleConn_markingTCP() { diff --git a/_third_party/golang.org/x/net/ipv4/icmp.go b/_third_party/golang.org/x/net/ipv4/icmp.go index dbd05cff2c..919fa9b9b9 100644 --- a/_third_party/golang.org/x/net/ipv4/icmp.go +++ b/_third_party/golang.org/x/net/ipv4/icmp.go @@ -4,7 +4,7 @@ package ipv4 -import "golang.org/x/net/internal/iana" +import "bosun.org/_third_party/golang.org/x/net/internal/iana" // An ICMPType represents a type of ICMP message. type ICMPType int diff --git a/_third_party/golang.org/x/net/ipv4/icmp_test.go b/_third_party/golang.org/x/net/ipv4/icmp_test.go index 3324b54df6..b917572add 100644 --- a/_third_party/golang.org/x/net/ipv4/icmp_test.go +++ b/_third_party/golang.org/x/net/ipv4/icmp_test.go @@ -10,8 +10,8 @@ import ( "runtime" "testing" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv4" + "bosun.org/_third_party/golang.org/x/net/internal/nettest" + "bosun.org/_third_party/golang.org/x/net/ipv4" ) var icmpStringTests = []struct { diff --git a/_third_party/golang.org/x/net/ipv4/multicast_test.go b/_third_party/golang.org/x/net/ipv4/multicast_test.go index 3f0304833d..8b89a41860 100644 --- a/_third_party/golang.org/x/net/ipv4/multicast_test.go +++ b/_third_party/golang.org/x/net/ipv4/multicast_test.go @@ -12,10 +12,10 @@ import ( "testing" "time" - "golang.org/x/net/icmp" - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv4" + "bosun.org/_third_party/golang.org/x/net/icmp" + "bosun.org/_third_party/golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/nettest" + "bosun.org/_third_party/golang.org/x/net/ipv4" ) var packetConnReadWriteMulticastUDPTests = []struct { diff --git a/_third_party/golang.org/x/net/ipv4/multicastlistener_test.go b/_third_party/golang.org/x/net/ipv4/multicastlistener_test.go index e342bf1d90..83e2e9d877 100644 --- a/_third_party/golang.org/x/net/ipv4/multicastlistener_test.go +++ b/_third_party/golang.org/x/net/ipv4/multicastlistener_test.go @@ -9,8 +9,8 @@ import ( "runtime" "testing" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv4" + "bosun.org/_third_party/golang.org/x/net/internal/nettest" + "bosun.org/_third_party/golang.org/x/net/ipv4" ) var udpMultipleGroupListenerTests = []net.Addr{ diff --git a/_third_party/golang.org/x/net/ipv4/multicastsockopt_test.go b/_third_party/golang.org/x/net/ipv4/multicastsockopt_test.go index c76dbe4def..89fe46edee 100644 --- a/_third_party/golang.org/x/net/ipv4/multicastsockopt_test.go +++ b/_third_party/golang.org/x/net/ipv4/multicastsockopt_test.go @@ -9,8 +9,8 @@ import ( "runtime" "testing" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv4" + "bosun.org/_third_party/golang.org/x/net/internal/nettest" + "bosun.org/_third_party/golang.org/x/net/ipv4" ) var packetConnMulticastSocketOptionTests = []struct { diff --git a/_third_party/golang.org/x/net/ipv4/readwrite_test.go b/_third_party/golang.org/x/net/ipv4/readwrite_test.go index 5e6533ef8b..80cb700fbf 100644 --- a/_third_party/golang.org/x/net/ipv4/readwrite_test.go +++ b/_third_party/golang.org/x/net/ipv4/readwrite_test.go @@ -11,8 +11,8 @@ import ( "sync" "testing" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv4" + "bosun.org/_third_party/golang.org/x/net/internal/nettest" + "bosun.org/_third_party/golang.org/x/net/ipv4" ) func benchmarkUDPListener() (net.PacketConn, net.Addr, error) { diff --git a/_third_party/golang.org/x/net/ipv4/sockopt_asmreq_unix.go b/_third_party/golang.org/x/net/ipv4/sockopt_asmreq_unix.go index fefa901e6d..f3a790264d 100644 --- a/_third_party/golang.org/x/net/ipv4/sockopt_asmreq_unix.go +++ b/_third_party/golang.org/x/net/ipv4/sockopt_asmreq_unix.go @@ -11,7 +11,7 @@ import ( "os" "unsafe" - "golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/iana" ) func setsockoptIPMreq(fd, name int, ifi *net.Interface, grp net.IP) error { diff --git a/_third_party/golang.org/x/net/ipv4/sockopt_asmreq_windows.go b/_third_party/golang.org/x/net/ipv4/sockopt_asmreq_windows.go index 431930df75..16c3ea2e7e 100644 --- a/_third_party/golang.org/x/net/ipv4/sockopt_asmreq_windows.go +++ b/_third_party/golang.org/x/net/ipv4/sockopt_asmreq_windows.go @@ -10,7 +10,7 @@ import ( "syscall" "unsafe" - "golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/iana" ) func setsockoptIPMreq(fd syscall.Handle, name int, ifi *net.Interface, grp net.IP) error { diff --git a/_third_party/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go b/_third_party/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go index 92c8e34cfa..6005522d2a 100644 --- a/_third_party/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go +++ b/_third_party/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go @@ -11,7 +11,7 @@ import ( "os" "unsafe" - "golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/iana" ) func getsockoptIPMreqn(fd, name int) (*net.Interface, error) { diff --git a/_third_party/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go b/_third_party/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go index 6f647bc58a..16127d079f 100644 --- a/_third_party/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go +++ b/_third_party/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go @@ -11,7 +11,7 @@ import ( "os" "unsafe" - "golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/iana" ) var freebsd32o64 bool diff --git a/_third_party/golang.org/x/net/ipv4/sockopt_unix.go b/_third_party/golang.org/x/net/ipv4/sockopt_unix.go index 50cdbd81e2..ca838b0d50 100644 --- a/_third_party/golang.org/x/net/ipv4/sockopt_unix.go +++ b/_third_party/golang.org/x/net/ipv4/sockopt_unix.go @@ -11,7 +11,7 @@ import ( "os" "unsafe" - "golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/iana" ) func getInt(fd int, opt *sockOpt) (int, error) { diff --git a/_third_party/golang.org/x/net/ipv4/sockopt_windows.go b/_third_party/golang.org/x/net/ipv4/sockopt_windows.go index c4c2441ec5..3175bfa575 100644 --- a/_third_party/golang.org/x/net/ipv4/sockopt_windows.go +++ b/_third_party/golang.org/x/net/ipv4/sockopt_windows.go @@ -10,7 +10,7 @@ import ( "syscall" "unsafe" - "golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/iana" ) func getInt(fd syscall.Handle, opt *sockOpt) (int, error) { diff --git a/_third_party/golang.org/x/net/ipv4/unicast_test.go b/_third_party/golang.org/x/net/ipv4/unicast_test.go index 255096a8c3..ee14fb52ad 100644 --- a/_third_party/golang.org/x/net/ipv4/unicast_test.go +++ b/_third_party/golang.org/x/net/ipv4/unicast_test.go @@ -12,10 +12,10 @@ import ( "testing" "time" - "golang.org/x/net/icmp" - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv4" + "bosun.org/_third_party/golang.org/x/net/icmp" + "bosun.org/_third_party/golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/nettest" + "bosun.org/_third_party/golang.org/x/net/ipv4" ) func TestPacketConnReadWriteUnicastUDP(t *testing.T) { diff --git a/_third_party/golang.org/x/net/ipv4/unicastsockopt_test.go b/_third_party/golang.org/x/net/ipv4/unicastsockopt_test.go index 25606f21da..428138aded 100644 --- a/_third_party/golang.org/x/net/ipv4/unicastsockopt_test.go +++ b/_third_party/golang.org/x/net/ipv4/unicastsockopt_test.go @@ -9,9 +9,9 @@ import ( "runtime" "testing" - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv4" + "bosun.org/_third_party/golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/nettest" + "bosun.org/_third_party/golang.org/x/net/ipv4" ) func TestConnUnicastSocketOptions(t *testing.T) { diff --git a/_third_party/golang.org/x/net/ipv6/control_rfc2292_unix.go b/_third_party/golang.org/x/net/ipv6/control_rfc2292_unix.go index ce201ce363..cc5649a160 100644 --- a/_third_party/golang.org/x/net/ipv6/control_rfc2292_unix.go +++ b/_third_party/golang.org/x/net/ipv6/control_rfc2292_unix.go @@ -10,7 +10,7 @@ import ( "syscall" "unsafe" - "golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/iana" ) func marshal2292HopLimit(b []byte, cm *ControlMessage) []byte { diff --git a/_third_party/golang.org/x/net/ipv6/control_rfc3542_unix.go b/_third_party/golang.org/x/net/ipv6/control_rfc3542_unix.go index e55c4aa973..f71bb81aa3 100644 --- a/_third_party/golang.org/x/net/ipv6/control_rfc3542_unix.go +++ b/_third_party/golang.org/x/net/ipv6/control_rfc3542_unix.go @@ -10,7 +10,7 @@ import ( "syscall" "unsafe" - "golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/iana" ) func marshalTrafficClass(b []byte, cm *ControlMessage) []byte { diff --git a/_third_party/golang.org/x/net/ipv6/control_unix.go b/_third_party/golang.org/x/net/ipv6/control_unix.go index 2af5beb43e..a562426587 100644 --- a/_third_party/golang.org/x/net/ipv6/control_unix.go +++ b/_third_party/golang.org/x/net/ipv6/control_unix.go @@ -10,7 +10,7 @@ import ( "os" "syscall" - "golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/iana" ) func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { diff --git a/_third_party/golang.org/x/net/ipv6/example_test.go b/_third_party/golang.org/x/net/ipv6/example_test.go index a2a3030c1f..de4cfecad2 100644 --- a/_third_party/golang.org/x/net/ipv6/example_test.go +++ b/_third_party/golang.org/x/net/ipv6/example_test.go @@ -11,8 +11,8 @@ import ( "os" "time" - "golang.org/x/net/icmp" - "golang.org/x/net/ipv6" + "bosun.org/_third_party/golang.org/x/net/icmp" + "bosun.org/_third_party/golang.org/x/net/ipv6" ) func ExampleConn_markingTCP() { diff --git a/_third_party/golang.org/x/net/ipv6/header_test.go b/_third_party/golang.org/x/net/ipv6/header_test.go index 18e0023ef2..2cc0636b53 100644 --- a/_third_party/golang.org/x/net/ipv6/header_test.go +++ b/_third_party/golang.org/x/net/ipv6/header_test.go @@ -9,8 +9,8 @@ import ( "reflect" "testing" - "golang.org/x/net/internal/iana" - "golang.org/x/net/ipv6" + "bosun.org/_third_party/golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/ipv6" ) var ( diff --git a/_third_party/golang.org/x/net/ipv6/icmp.go b/_third_party/golang.org/x/net/ipv6/icmp.go index a2de65a08c..0845d4de71 100644 --- a/_third_party/golang.org/x/net/ipv6/icmp.go +++ b/_third_party/golang.org/x/net/ipv6/icmp.go @@ -4,7 +4,7 @@ package ipv6 -import "golang.org/x/net/internal/iana" +import "bosun.org/_third_party/golang.org/x/net/internal/iana" // An ICMPType represents a type of ICMP message. type ICMPType int diff --git a/_third_party/golang.org/x/net/ipv6/icmp_test.go b/_third_party/golang.org/x/net/ipv6/icmp_test.go index e192d6d8c2..f40c2adf69 100644 --- a/_third_party/golang.org/x/net/ipv6/icmp_test.go +++ b/_third_party/golang.org/x/net/ipv6/icmp_test.go @@ -10,8 +10,8 @@ import ( "runtime" "testing" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv6" + "bosun.org/_third_party/golang.org/x/net/internal/nettest" + "bosun.org/_third_party/golang.org/x/net/ipv6" ) var icmpStringTests = []struct { diff --git a/_third_party/golang.org/x/net/ipv6/multicast_test.go b/_third_party/golang.org/x/net/ipv6/multicast_test.go index fc10ce109f..ea7d563106 100644 --- a/_third_party/golang.org/x/net/ipv6/multicast_test.go +++ b/_third_party/golang.org/x/net/ipv6/multicast_test.go @@ -12,10 +12,10 @@ import ( "testing" "time" - "golang.org/x/net/icmp" - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv6" + "bosun.org/_third_party/golang.org/x/net/icmp" + "bosun.org/_third_party/golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/nettest" + "bosun.org/_third_party/golang.org/x/net/ipv6" ) var packetConnReadWriteMulticastUDPTests = []struct { diff --git a/_third_party/golang.org/x/net/ipv6/multicastlistener_test.go b/_third_party/golang.org/x/net/ipv6/multicastlistener_test.go index 9711f7513f..306e9a9cde 100644 --- a/_third_party/golang.org/x/net/ipv6/multicastlistener_test.go +++ b/_third_party/golang.org/x/net/ipv6/multicastlistener_test.go @@ -10,8 +10,8 @@ import ( "runtime" "testing" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv6" + "bosun.org/_third_party/golang.org/x/net/internal/nettest" + "bosun.org/_third_party/golang.org/x/net/ipv6" ) var udpMultipleGroupListenerTests = []net.Addr{ diff --git a/_third_party/golang.org/x/net/ipv6/multicastsockopt_test.go b/_third_party/golang.org/x/net/ipv6/multicastsockopt_test.go index fe0e6e1b14..b797aea588 100644 --- a/_third_party/golang.org/x/net/ipv6/multicastsockopt_test.go +++ b/_third_party/golang.org/x/net/ipv6/multicastsockopt_test.go @@ -9,8 +9,8 @@ import ( "runtime" "testing" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv6" + "bosun.org/_third_party/golang.org/x/net/internal/nettest" + "bosun.org/_third_party/golang.org/x/net/ipv6" ) var packetConnMulticastSocketOptionTests = []struct { diff --git a/_third_party/golang.org/x/net/ipv6/readwrite_test.go b/_third_party/golang.org/x/net/ipv6/readwrite_test.go index ff4ea2b590..57454efeea 100644 --- a/_third_party/golang.org/x/net/ipv6/readwrite_test.go +++ b/_third_party/golang.org/x/net/ipv6/readwrite_test.go @@ -11,9 +11,9 @@ import ( "sync" "testing" - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv6" + "bosun.org/_third_party/golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/nettest" + "bosun.org/_third_party/golang.org/x/net/ipv6" ) func benchmarkUDPListener() (net.PacketConn, net.Addr, error) { diff --git a/_third_party/golang.org/x/net/ipv6/sockopt_test.go b/_third_party/golang.org/x/net/ipv6/sockopt_test.go index 9c21903160..edc75e511a 100644 --- a/_third_party/golang.org/x/net/ipv6/sockopt_test.go +++ b/_third_party/golang.org/x/net/ipv6/sockopt_test.go @@ -10,9 +10,9 @@ import ( "runtime" "testing" - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv6" + "bosun.org/_third_party/golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/nettest" + "bosun.org/_third_party/golang.org/x/net/ipv6" ) var supportsIPv6 bool = nettest.SupportsIPv6() diff --git a/_third_party/golang.org/x/net/ipv6/sys_bsd.go b/_third_party/golang.org/x/net/ipv6/sys_bsd.go index 75a8863b3e..83a3e3d8d7 100644 --- a/_third_party/golang.org/x/net/ipv6/sys_bsd.go +++ b/_third_party/golang.org/x/net/ipv6/sys_bsd.go @@ -10,7 +10,7 @@ import ( "net" "syscall" - "golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/iana" ) type sysSockoptLen int32 diff --git a/_third_party/golang.org/x/net/ipv6/sys_darwin.go b/_third_party/golang.org/x/net/ipv6/sys_darwin.go index 411fb498c8..531486adeb 100644 --- a/_third_party/golang.org/x/net/ipv6/sys_darwin.go +++ b/_third_party/golang.org/x/net/ipv6/sys_darwin.go @@ -9,7 +9,7 @@ import ( "syscall" "unsafe" - "golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/iana" ) type sysSockoptLen int32 diff --git a/_third_party/golang.org/x/net/ipv6/sys_freebsd.go b/_third_party/golang.org/x/net/ipv6/sys_freebsd.go index b68725cba6..3acbc592cc 100644 --- a/_third_party/golang.org/x/net/ipv6/sys_freebsd.go +++ b/_third_party/golang.org/x/net/ipv6/sys_freebsd.go @@ -11,7 +11,7 @@ import ( "syscall" "unsafe" - "golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/iana" ) type sysSockoptLen int32 diff --git a/_third_party/golang.org/x/net/ipv6/sys_linux.go b/_third_party/golang.org/x/net/ipv6/sys_linux.go index 2fa6088d0f..fcd1f8e3ce 100644 --- a/_third_party/golang.org/x/net/ipv6/sys_linux.go +++ b/_third_party/golang.org/x/net/ipv6/sys_linux.go @@ -9,7 +9,7 @@ import ( "syscall" "unsafe" - "golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/iana" ) type sysSockoptLen int32 diff --git a/_third_party/golang.org/x/net/ipv6/sys_windows.go b/_third_party/golang.org/x/net/ipv6/sys_windows.go index fda875736f..0bc0150181 100644 --- a/_third_party/golang.org/x/net/ipv6/sys_windows.go +++ b/_third_party/golang.org/x/net/ipv6/sys_windows.go @@ -8,7 +8,7 @@ import ( "net" "syscall" - "golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/iana" ) const ( diff --git a/_third_party/golang.org/x/net/ipv6/unicast_test.go b/_third_party/golang.org/x/net/ipv6/unicast_test.go index 61656983b4..97f60e2325 100644 --- a/_third_party/golang.org/x/net/ipv6/unicast_test.go +++ b/_third_party/golang.org/x/net/ipv6/unicast_test.go @@ -12,10 +12,10 @@ import ( "testing" "time" - "golang.org/x/net/icmp" - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv6" + "bosun.org/_third_party/golang.org/x/net/icmp" + "bosun.org/_third_party/golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/nettest" + "bosun.org/_third_party/golang.org/x/net/ipv6" ) func TestPacketConnReadWriteUnicastUDP(t *testing.T) { diff --git a/_third_party/golang.org/x/net/ipv6/unicastsockopt_test.go b/_third_party/golang.org/x/net/ipv6/unicastsockopt_test.go index 7bb2e440ac..72ce6b270a 100644 --- a/_third_party/golang.org/x/net/ipv6/unicastsockopt_test.go +++ b/_third_party/golang.org/x/net/ipv6/unicastsockopt_test.go @@ -9,9 +9,9 @@ import ( "runtime" "testing" - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv6" + "bosun.org/_third_party/golang.org/x/net/internal/iana" + "bosun.org/_third_party/golang.org/x/net/internal/nettest" + "bosun.org/_third_party/golang.org/x/net/ipv6" ) func TestConnUnicastSocketOptions(t *testing.T) { diff --git a/_third_party/golang.org/x/sys/unix/creds_test.go b/_third_party/golang.org/x/sys/unix/creds_test.go index eaae7c367f..4c55d7d3fd 100644 --- a/_third_party/golang.org/x/sys/unix/creds_test.go +++ b/_third_party/golang.org/x/sys/unix/creds_test.go @@ -13,7 +13,7 @@ import ( "syscall" "testing" - "golang.org/x/sys/unix" + "bosun.org/_third_party/golang.org/x/sys/unix" ) // TestSCMCredentials tests the sending and receiving of credentials diff --git a/_third_party/golang.org/x/sys/unix/mmap_unix_test.go b/_third_party/golang.org/x/sys/unix/mmap_unix_test.go index 18ccec05f1..79e1733c10 100644 --- a/_third_party/golang.org/x/sys/unix/mmap_unix_test.go +++ b/_third_party/golang.org/x/sys/unix/mmap_unix_test.go @@ -9,7 +9,7 @@ package unix_test import ( "testing" - "golang.org/x/sys/unix" + "bosun.org/_third_party/golang.org/x/sys/unix" ) func TestMmap(t *testing.T) { diff --git a/_third_party/golang.org/x/sys/unix/syscall_bsd_test.go b/_third_party/golang.org/x/sys/unix/syscall_bsd_test.go index 55d8843094..e7ba5abeea 100644 --- a/_third_party/golang.org/x/sys/unix/syscall_bsd_test.go +++ b/_third_party/golang.org/x/sys/unix/syscall_bsd_test.go @@ -9,7 +9,7 @@ package unix_test import ( "testing" - "golang.org/x/sys/unix" + "bosun.org/_third_party/golang.org/x/sys/unix" ) const MNT_WAIT = 1 diff --git a/_third_party/golang.org/x/sys/unix/syscall_test.go b/_third_party/golang.org/x/sys/unix/syscall_test.go index 95eac92aca..75f200af2a 100644 --- a/_third_party/golang.org/x/sys/unix/syscall_test.go +++ b/_third_party/golang.org/x/sys/unix/syscall_test.go @@ -10,7 +10,7 @@ import ( "fmt" "testing" - "golang.org/x/sys/unix" + "bosun.org/_third_party/golang.org/x/sys/unix" ) func testSetGetenv(t *testing.T, key, value string) { diff --git a/_third_party/golang.org/x/sys/unix/syscall_unix_test.go b/_third_party/golang.org/x/sys/unix/syscall_unix_test.go index bcc79d19ca..8dec398eb0 100644 --- a/_third_party/golang.org/x/sys/unix/syscall_unix_test.go +++ b/_third_party/golang.org/x/sys/unix/syscall_unix_test.go @@ -18,7 +18,7 @@ import ( "testing" "time" - "golang.org/x/sys/unix" + "bosun.org/_third_party/golang.org/x/sys/unix" ) // Tests that below functions, structures and constants are consistent diff --git a/_third_party/golang.org/x/sys/windows/registry/registry_test.go b/_third_party/golang.org/x/sys/windows/registry/registry_test.go index 6547a45b28..4e21b7c031 100644 --- a/_third_party/golang.org/x/sys/windows/registry/registry_test.go +++ b/_third_party/golang.org/x/sys/windows/registry/registry_test.go @@ -15,7 +15,7 @@ import ( "time" "unsafe" - "golang.org/x/sys/windows/registry" + "bosun.org/_third_party/golang.org/x/sys/windows/registry" ) func randKeyName(prefix string) string { diff --git a/_third_party/golang.org/x/sys/windows/svc/debug/service.go b/_third_party/golang.org/x/sys/windows/svc/debug/service.go index d5ab94b2c7..c6e2bd3db1 100644 --- a/_third_party/golang.org/x/sys/windows/svc/debug/service.go +++ b/_third_party/golang.org/x/sys/windows/svc/debug/service.go @@ -13,7 +13,7 @@ import ( "os/signal" "syscall" - "golang.org/x/sys/windows/svc" + "bosun.org/_third_party/golang.org/x/sys/windows/svc" ) // Run executes service name by calling appropriate handler function. diff --git a/_third_party/golang.org/x/sys/windows/svc/event.go b/_third_party/golang.org/x/sys/windows/svc/event.go index 0508e22881..74571ebdb8 100644 --- a/_third_party/golang.org/x/sys/windows/svc/event.go +++ b/_third_party/golang.org/x/sys/windows/svc/event.go @@ -9,7 +9,7 @@ package svc import ( "errors" - "golang.org/x/sys/windows" + "bosun.org/_third_party/golang.org/x/sys/windows" ) // event represents auto-reset, initially non-signaled Windows event. diff --git a/_third_party/golang.org/x/sys/windows/svc/eventlog/install.go b/_third_party/golang.org/x/sys/windows/svc/eventlog/install.go index c76a3760a4..0c6f724481 100644 --- a/_third_party/golang.org/x/sys/windows/svc/eventlog/install.go +++ b/_third_party/golang.org/x/sys/windows/svc/eventlog/install.go @@ -9,8 +9,8 @@ package eventlog import ( "errors" - "golang.org/x/sys/windows" - "golang.org/x/sys/windows/registry" + "bosun.org/_third_party/golang.org/x/sys/windows" + "bosun.org/_third_party/golang.org/x/sys/windows/registry" ) const ( diff --git a/_third_party/golang.org/x/sys/windows/svc/eventlog/log.go b/_third_party/golang.org/x/sys/windows/svc/eventlog/log.go index 46e5153d02..88e3232cb0 100644 --- a/_third_party/golang.org/x/sys/windows/svc/eventlog/log.go +++ b/_third_party/golang.org/x/sys/windows/svc/eventlog/log.go @@ -12,7 +12,7 @@ import ( "errors" "syscall" - "golang.org/x/sys/windows" + "bosun.org/_third_party/golang.org/x/sys/windows" ) // Log provides access to the system log. diff --git a/_third_party/golang.org/x/sys/windows/svc/eventlog/log_test.go b/_third_party/golang.org/x/sys/windows/svc/eventlog/log_test.go index 4dd8ad9e74..25e42716fc 100644 --- a/_third_party/golang.org/x/sys/windows/svc/eventlog/log_test.go +++ b/_third_party/golang.org/x/sys/windows/svc/eventlog/log_test.go @@ -9,7 +9,7 @@ package eventlog_test import ( "testing" - "golang.org/x/sys/windows/svc/eventlog" + "bosun.org/_third_party/golang.org/x/sys/windows/svc/eventlog" ) func TestLog(t *testing.T) { diff --git a/_third_party/golang.org/x/sys/windows/svc/mgr/config.go b/_third_party/golang.org/x/sys/windows/svc/mgr/config.go index 0a6edba4f5..50ebafc05c 100644 --- a/_third_party/golang.org/x/sys/windows/svc/mgr/config.go +++ b/_third_party/golang.org/x/sys/windows/svc/mgr/config.go @@ -11,7 +11,7 @@ import ( "unicode/utf16" "unsafe" - "golang.org/x/sys/windows" + "bosun.org/_third_party/golang.org/x/sys/windows" ) const ( diff --git a/_third_party/golang.org/x/sys/windows/svc/mgr/mgr.go b/_third_party/golang.org/x/sys/windows/svc/mgr/mgr.go index 4d7e72ec46..d7749269b4 100644 --- a/_third_party/golang.org/x/sys/windows/svc/mgr/mgr.go +++ b/_third_party/golang.org/x/sys/windows/svc/mgr/mgr.go @@ -15,7 +15,7 @@ import ( "syscall" "unicode/utf16" - "golang.org/x/sys/windows" + "bosun.org/_third_party/golang.org/x/sys/windows" ) // Mgr is used to manage Windows service. diff --git a/_third_party/golang.org/x/sys/windows/svc/mgr/mgr_test.go b/_third_party/golang.org/x/sys/windows/svc/mgr/mgr_test.go index 78be970c05..4876e4548b 100644 --- a/_third_party/golang.org/x/sys/windows/svc/mgr/mgr_test.go +++ b/_third_party/golang.org/x/sys/windows/svc/mgr/mgr_test.go @@ -15,7 +15,7 @@ import ( "testing" "time" - "golang.org/x/sys/windows/svc/mgr" + "bosun.org/_third_party/golang.org/x/sys/windows/svc/mgr" ) func TestOpenLanManServer(t *testing.T) { diff --git a/_third_party/golang.org/x/sys/windows/svc/mgr/service.go b/_third_party/golang.org/x/sys/windows/svc/mgr/service.go index 465f3c3d23..c8994e1bee 100644 --- a/_third_party/golang.org/x/sys/windows/svc/mgr/service.go +++ b/_third_party/golang.org/x/sys/windows/svc/mgr/service.go @@ -9,8 +9,8 @@ package mgr import ( "syscall" - "golang.org/x/sys/windows" - "golang.org/x/sys/windows/svc" + "bosun.org/_third_party/golang.org/x/sys/windows" + "bosun.org/_third_party/golang.org/x/sys/windows/svc" ) // TODO(brainman): Use EnumDependentServices to enumerate dependent services. diff --git a/_third_party/golang.org/x/sys/windows/svc/security.go b/_third_party/golang.org/x/sys/windows/svc/security.go index 6fbc9236ed..08cb78e0e0 100644 --- a/_third_party/golang.org/x/sys/windows/svc/security.go +++ b/_third_party/golang.org/x/sys/windows/svc/security.go @@ -9,7 +9,7 @@ package svc import ( "unsafe" - "golang.org/x/sys/windows" + "bosun.org/_third_party/golang.org/x/sys/windows" ) func allocSid(subAuth0 uint32) (*windows.SID, error) { diff --git a/_third_party/golang.org/x/sys/windows/svc/service.go b/_third_party/golang.org/x/sys/windows/svc/service.go index 9864f7a72f..6699fe82f7 100644 --- a/_third_party/golang.org/x/sys/windows/svc/service.go +++ b/_third_party/golang.org/x/sys/windows/svc/service.go @@ -14,7 +14,7 @@ import ( "syscall" "unsafe" - "golang.org/x/sys/windows" + "bosun.org/_third_party/golang.org/x/sys/windows" ) // State describes service execution state (Stopped, Running and so on). diff --git a/_third_party/golang.org/x/sys/windows/svc/svc_test.go b/_third_party/golang.org/x/sys/windows/svc/svc_test.go index 764da54a54..070cd9f9a3 100644 --- a/_third_party/golang.org/x/sys/windows/svc/svc_test.go +++ b/_third_party/golang.org/x/sys/windows/svc/svc_test.go @@ -14,8 +14,8 @@ import ( "testing" "time" - "golang.org/x/sys/windows/svc" - "golang.org/x/sys/windows/svc/mgr" + "bosun.org/_third_party/golang.org/x/sys/windows/svc" + "bosun.org/_third_party/golang.org/x/sys/windows/svc/mgr" ) func getState(t *testing.T, s *mgr.Service) svc.State { diff --git a/_third_party/golang.org/x/sys/windows/syscall_test.go b/_third_party/golang.org/x/sys/windows/syscall_test.go index 62588b91bb..86d2e4823a 100644 --- a/_third_party/golang.org/x/sys/windows/syscall_test.go +++ b/_third_party/golang.org/x/sys/windows/syscall_test.go @@ -9,7 +9,7 @@ package windows_test import ( "testing" - "golang.org/x/sys/windows" + "bosun.org/_third_party/golang.org/x/sys/windows" ) func testSetGetenv(t *testing.T, key, value string) { diff --git a/_third_party/golang.org/x/sys/windows/syscall_windows_test.go b/_third_party/golang.org/x/sys/windows/syscall_windows_test.go index 0f73c11ba4..2adaa86efb 100644 --- a/_third_party/golang.org/x/sys/windows/syscall_windows_test.go +++ b/_third_party/golang.org/x/sys/windows/syscall_windows_test.go @@ -12,7 +12,7 @@ import ( "testing" "unsafe" - "golang.org/x/sys/windows" + "bosun.org/_third_party/golang.org/x/sys/windows" ) func TestWin32finddata(t *testing.T) { diff --git a/_third_party/gopkg.in/yaml.v1/decode_test.go b/_third_party/gopkg.in/yaml.v1/decode_test.go index ef3d37fb30..e649dd84d6 100644 --- a/_third_party/gopkg.in/yaml.v1/decode_test.go +++ b/_third_party/gopkg.in/yaml.v1/decode_test.go @@ -1,8 +1,8 @@ package yaml_test import ( - . "gopkg.in/check.v1" - "gopkg.in/yaml.v1" + . "bosun.org/_third_party/gopkg.in/check.v1" + "bosun.org/_third_party/gopkg.in/yaml.v1" "math" "reflect" "strings" diff --git a/_third_party/gopkg.in/yaml.v1/encode_test.go b/_third_party/gopkg.in/yaml.v1/encode_test.go index c9febc22a4..4d25f48ee5 100644 --- a/_third_party/gopkg.in/yaml.v1/encode_test.go +++ b/_third_party/gopkg.in/yaml.v1/encode_test.go @@ -7,8 +7,8 @@ import ( "strings" "time" - . "gopkg.in/check.v1" - "gopkg.in/yaml.v1" + . "bosun.org/_third_party/gopkg.in/check.v1" + "bosun.org/_third_party/gopkg.in/yaml.v1" ) var marshalIntTest = 123 diff --git a/_third_party/gopkg.in/yaml.v1/suite_test.go b/_third_party/gopkg.in/yaml.v1/suite_test.go index c5cf1ed4f6..3be8e1fe86 100644 --- a/_third_party/gopkg.in/yaml.v1/suite_test.go +++ b/_third_party/gopkg.in/yaml.v1/suite_test.go @@ -1,7 +1,7 @@ package yaml_test import ( - . "gopkg.in/check.v1" + . "bosun.org/_third_party/gopkg.in/check.v1" "testing" ) diff --git a/cmd/bosun/expr/influx.go b/cmd/bosun/expr/influx.go index bc5e5e4af7..ff2d9661e0 100644 --- a/cmd/bosun/expr/influx.go +++ b/cmd/bosun/expr/influx.go @@ -9,6 +9,7 @@ import ( "bosun.org/_third_party/github.com/MiniProfiler/go/miniprofiler" "bosun.org/_third_party/github.com/influxdb/influxdb/client" "bosun.org/_third_party/github.com/influxdb/influxdb/influxql" + "bosun.org/_third_party/github.com/influxdb/influxdb/models" "bosun.org/cmd/bosun/expr/parse" "bosun.org/opentsdb" ) @@ -185,7 +186,7 @@ func influxQueryDuration(now time.Time, query, start, end, groupByInterval strin return s.String(), nil } -func timeInfluxRequest(e *State, T miniprofiler.Timer, db, query, startDuration, endDuration, groupByInterval string) (s []influxql.Row, err error) { +func timeInfluxRequest(e *State, T miniprofiler.Timer, db, query, startDuration, endDuration, groupByInterval string) (s []models.Row, err error) { q, err := influxQueryDuration(e.now, query, startDuration, endDuration, groupByInterval) if err != nil { return nil, err @@ -215,7 +216,7 @@ func timeInfluxRequest(e *State, T miniprofiler.Timer, db, query, startDuration, var val interface{} var ok bool val, err = e.cache.Get(q, getFn) - if s, ok = val.([]influxql.Row); !ok { + if s, ok = val.([]models.Row); !ok { err = fmt.Errorf("influx: did not get a valid result from InfluxDB") } })