diff --git a/go.mod b/go.mod index 0846edb0d0..ebb70b77b9 100644 --- a/go.mod +++ b/go.mod @@ -57,7 +57,7 @@ require ( github.com/mitchellh/mapstructure v1.5.0 github.com/mna/pigeon v1.3.0 github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 - github.com/nats-io/nats-server/v2 v2.10.25 + github.com/nats-io/nats-server/v2 v2.10.26 github.com/nats-io/nats.go v1.39.1 github.com/oklog/run v1.1.0 github.com/olekukonko/tablewriter v0.0.5 @@ -65,7 +65,7 @@ require ( github.com/onsi/ginkgo/v2 v2.22.2 github.com/onsi/gomega v1.36.2 github.com/open-policy-agent/opa v1.1.0 - github.com/opencloud-eu/reva/v2 v2.27.3-0.20250227091157-0f6d58900e83 + github.com/opencloud-eu/reva/v2 v2.27.3-0.20250228155248-34dee069adce github.com/orcaman/concurrent-map v1.0.0 github.com/owncloud/libre-graph-api-go v1.0.5-0.20240829135935-80dc00d6f5ea github.com/pkg/errors v0.9.1 @@ -98,7 +98,7 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 go.opentelemetry.io/otel/sdk v1.34.0 go.opentelemetry.io/otel/trace v1.34.0 - golang.org/x/crypto v0.33.0 + golang.org/x/crypto v0.34.0 golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c golang.org/x/image v0.24.0 golang.org/x/net v0.35.0 @@ -200,7 +200,7 @@ require ( github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-redis/redis/v8 v8.11.5 // indirect github.com/go-resty/resty/v2 v2.7.0 // indirect - github.com/go-sql-driver/mysql v1.8.1 // indirect + github.com/go-sql-driver/mysql v1.9.0 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/go-test/deep v1.1.0 // indirect @@ -238,7 +238,7 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/juliangruber/go-intersect v1.1.0 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/compress v1.18.0 // indirect github.com/klauspost/cpuid/v2 v2.2.9 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/libregraph/oidc-go v1.1.0 // indirect @@ -264,7 +264,7 @@ require ( github.com/mschoch/smat v0.2.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nats-io/jwt/v2 v2.7.3 // indirect - github.com/nats-io/nkeys v0.4.9 // indirect + github.com/nats-io/nkeys v0.4.10 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/nxadm/tail v1.4.8 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect @@ -323,7 +323,7 @@ require ( go.uber.org/zap v1.23.0 // indirect golang.org/x/mod v0.22.0 // indirect golang.org/x/sys v0.30.0 // indirect - golang.org/x/time v0.9.0 // indirect + golang.org/x/time v0.10.0 // indirect golang.org/x/tools v0.28.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect diff --git a/go.sum b/go.sum index 6e12be9673..e6ebfc4127 100644 --- a/go.sum +++ b/go.sum @@ -419,8 +419,8 @@ github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8= github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= -github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= -github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-sql-driver/mysql v1.9.0 h1:Y0zIbQXhQKmQgTp44Y1dp3wTXcn804QoTptLZT1vtvo= +github.com/go-sql-driver/mysql v1.9.0/go.mod h1:pDetrLJeA3oMujJuvXc8RJoasr589B6A9fwzD3QMrqw= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= @@ -684,8 +684,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= @@ -825,12 +825,12 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/namedotcom/go v0.0.0-20180403034216-08470befbe04/go.mod h1:5sN+Lt1CaY4wsPvgQH/jsuJi4XO2ssZbdsIizr4CVC8= github.com/nats-io/jwt/v2 v2.7.3 h1:6bNPK+FXgBeAqdj4cYQ0F8ViHRbi7woQLq4W29nUAzE= github.com/nats-io/jwt/v2 v2.7.3/go.mod h1:GvkcbHhKquj3pkioy5put1wvPxs78UlZ7D/pY+BgZk4= -github.com/nats-io/nats-server/v2 v2.10.25 h1:J0GWLDDXo5HId7ti/lTmBfs+lzhmu8RPkoKl0eSCqwc= -github.com/nats-io/nats-server/v2 v2.10.25/go.mod h1:/YYYQO7cuoOBt+A7/8cVjuhWTaTUEAlZbJT+3sMAfFU= +github.com/nats-io/nats-server/v2 v2.10.26 h1:2i3rAsn4x5/2eOt2NEmuI/iSb8zfHpIUI7yiaOWbo2c= +github.com/nats-io/nats-server/v2 v2.10.26/go.mod h1:SGzoWGU8wUVnMr/HJhEMv4R8U4f7hF4zDygmRxpNsvg= github.com/nats-io/nats.go v1.39.1 h1:oTkfKBmz7W047vRxV762M67ZdXeOtUgvbBaNoQ+3PPk= github.com/nats-io/nats.go v1.39.1/go.mod h1:MgRb8oOdigA6cYpEPhXJuRVH6UE/V4jblJ2jQ27IXYM= -github.com/nats-io/nkeys v0.4.9 h1:qe9Faq2Gxwi6RZnZMXfmGMZkg3afLLOtrU+gDZJ35b0= -github.com/nats-io/nkeys v0.4.9/go.mod h1:jcMqs+FLG+W5YO36OX6wFIFcmpdAns+w1Wm6D3I/evE= +github.com/nats-io/nkeys v0.4.10 h1:glmRrpCmYLHByYcePvnTBEAwawwapjCPMjy2huw20wc= +github.com/nats-io/nkeys v0.4.10/go.mod h1:OjRrnIKnWBFl+s4YK5ChQfvHP2fxqZexrKJoVVyWB3U= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= @@ -863,8 +863,8 @@ github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= github.com/open-policy-agent/opa v1.1.0 h1:HMz2evdEMTyNqtdLjmu3Vyx06BmhNYAx67Yz3Ll9q2s= github.com/open-policy-agent/opa v1.1.0/go.mod h1:T1pASQ1/vwfTa+e2fYcfpLCvWgYtqtiUv+IuA/dLPQs= -github.com/opencloud-eu/reva/v2 v2.27.3-0.20250227091157-0f6d58900e83 h1:oaOoTDZboll2njzvnEhF4/K76IhrJA9wLr7Ii6WcPGY= -github.com/opencloud-eu/reva/v2 v2.27.3-0.20250227091157-0f6d58900e83/go.mod h1:BQdl4BybewOQRtKtmM57qg05IsWXETCItuHPsnYvhZg= +github.com/opencloud-eu/reva/v2 v2.27.3-0.20250228155248-34dee069adce h1:Ovd0LG1qpsPUXLqE/XY03qfymsqe5OpWlHU3QCSMmTY= +github.com/opencloud-eu/reva/v2 v2.27.3-0.20250228155248-34dee069adce/go.mod h1:Zi6h/WupAKzY/umvteGJCY3q4GOvTyrlm4JZfiuHeds= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= @@ -1224,8 +1224,8 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= -golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/crypto v0.34.0 h1:+/C6tk6rf/+t5DhUketUbD1aNGqiSX3j15Z6xuIDlBA= +golang.org/x/crypto v0.34.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1477,8 +1477,8 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= -golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4= +golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS index 4021b96cc0..123b5dc50e 100644 --- a/vendor/github.com/go-sql-driver/mysql/AUTHORS +++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS @@ -20,7 +20,10 @@ Andrew Reid Animesh Ray Arne Hormann Ariel Mashraki +Artur Melanchyk Asta Xie +B Lamarche +Bes Dollma Brian Hendriks Bulat Gaifullin Caine Jette @@ -33,6 +36,7 @@ Daniel Montoya Daniel Nichter Daniël van Eeden Dave Protasowski +Dirkjan Bussink DisposaBoy Egor Smolyakov Erwan Martin @@ -50,6 +54,7 @@ ICHINOSE Shogo Ilia Cimpoes INADA Naoki Jacek Szwec +Jakub Adamus James Harr Janek Vedock Jason Ng @@ -60,6 +65,7 @@ Jennifer Purevsuren Jerome Meyer Jiajia Zhong Jian Zhen +Joe Mann Joshua Prunier Julien Lefevre Julien Schmidt @@ -80,6 +86,7 @@ Lunny Xiao Luke Scott Maciej Zimnoch Michael Woolnough +Nao Yokotsuka Nathanial Murphy Nicola Peduzzi Oliver Bone @@ -89,6 +96,7 @@ Paul Bonser Paulius Lozys Peter Schultz Phil Porada +Minh Quang Rebecca Chin Reed Allman Richard Wilkes @@ -139,4 +147,5 @@ PingCAP Inc. Pivotal Inc. Shattered Silicon Ltd. Stripe Inc. +ThousandEyes Zendesk Inc. diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md index 0c9bd9b10a..d8c3aac1e4 100644 --- a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md +++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md @@ -1,3 +1,28 @@ +# Changelog + +## v1.9.0 (2025-02-18) + +### Major Changes + +- Implement zlib compression. (#1487) +- Supported Go version is updated to Go 1.21+. (#1639) +- Add support for VECTOR type introduced in MySQL 9.0. (#1609) +- Config object can have custom dial function. (#1527) + +### Bugfixes + +- Fix auth errors when username/password are too long. (#1625) +- Check if MySQL supports CLIENT_CONNECT_ATTRS before sending client attributes. (#1640) +- Fix auth switch request handling. (#1666) + +### Other changes + +- Add "filename:line" prefix to log in go-mysql. Custom loggers now show it. (#1589) +- Improve error handling. It reduces the "busy buffer" errors. (#1595, #1601, #1641) +- Use `strconv.Atoi` to parse max_allowed_packet. (#1661) +- `rejectReadOnly` option now handles ER_READ_ONLY_MODE (1290) error too. (#1660) + + ## Version 1.8.1 (2024-03-26) Bugfixes: diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md index 4968cb0606..da4593ccf8 100644 --- a/vendor/github.com/go-sql-driver/mysql/README.md +++ b/vendor/github.com/go-sql-driver/mysql/README.md @@ -38,11 +38,12 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac * Secure `LOAD DATA LOCAL INFILE` support with file allowlisting and `io.Reader` support * Optional `time.Time` parsing * Optional placeholder interpolation + * Supports zlib compression. ## Requirements -* Go 1.19 or higher. We aim to support the 3 latest versions of Go. -* MySQL (5.7+) and MariaDB (10.3+) are supported. +* Go 1.21 or higher. We aim to support the 3 latest versions of Go. +* MySQL (5.7+) and MariaDB (10.5+) are supported. * [TiDB](https://github.com/pingcap/tidb) is supported by PingCAP. * Do not ask questions about TiDB in our issue tracker or forum. * [Document](https://docs.pingcap.com/tidb/v6.1/dev-guide-sample-application-golang) @@ -267,6 +268,16 @@ SELECT u.id FROM users as u will return `u.id` instead of just `id` if `columnsWithAlias=true`. +##### `compress` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +Toggles zlib compression. false by default. + ##### `interpolateParams` ``` @@ -519,6 +530,9 @@ This driver supports the [`ColumnType` interface](https://golang.org/pkg/databas Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts. See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details. +> [!IMPORTANT] +> The `QueryContext`, `ExecContext`, etc. variants provided by `database/sql` will cause the connection to be closed if the provided context is cancelled or timed out before the result is received by the driver. + ### `LOAD DATA LOCAL INFILE` support For this feature you need direct access to the package. Therefore you must change the import path (no `_`): diff --git a/vendor/github.com/go-sql-driver/mysql/atomic_bool.go b/vendor/github.com/go-sql-driver/mysql/atomic_bool.go deleted file mode 100644 index 1b7e19f3e3..0000000000 --- a/vendor/github.com/go-sql-driver/mysql/atomic_bool.go +++ /dev/null @@ -1,19 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package. -// -// Copyright 2022 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. -//go:build go1.19 -// +build go1.19 - -package mysql - -import "sync/atomic" - -/****************************************************************************** -* Sync utils * -******************************************************************************/ - -type atomicBool = atomic.Bool diff --git a/vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go b/vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go deleted file mode 100644 index 2e9a7f0b61..0000000000 --- a/vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go +++ /dev/null @@ -1,47 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package. -// -// Copyright 2022 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. -//go:build !go1.19 -// +build !go1.19 - -package mysql - -import "sync/atomic" - -/****************************************************************************** -* Sync utils * -******************************************************************************/ - -// atomicBool is an implementation of atomic.Bool for older version of Go. -// it is a wrapper around uint32 for usage as a boolean value with -// atomic access. -type atomicBool struct { - _ noCopy - value uint32 -} - -// Load returns whether the current boolean value is true -func (ab *atomicBool) Load() bool { - return atomic.LoadUint32(&ab.value) > 0 -} - -// Store sets the value of the bool regardless of the previous value -func (ab *atomicBool) Store(value bool) { - if value { - atomic.StoreUint32(&ab.value, 1) - } else { - atomic.StoreUint32(&ab.value, 0) - } -} - -// Swap sets the value of the bool and returns the old value. -func (ab *atomicBool) Swap(value bool) bool { - if value { - return atomic.SwapUint32(&ab.value, 1) > 0 - } - return atomic.SwapUint32(&ab.value, 0) > 0 -} diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go index 0774c5c8c2..a653243159 100644 --- a/vendor/github.com/go-sql-driver/mysql/buffer.go +++ b/vendor/github.com/go-sql-driver/mysql/buffer.go @@ -10,54 +10,42 @@ package mysql import ( "io" - "net" - "time" ) const defaultBufSize = 4096 const maxCachedBufSize = 256 * 1024 +// readerFunc is a function that compatible with io.Reader. +// We use this function type instead of io.Reader because we want to +// just pass mc.readWithTimeout. +type readerFunc func([]byte) (int, error) + // A buffer which is used for both reading and writing. // This is possible since communication on each connection is synchronous. // In other words, we can't write and read simultaneously on the same connection. // The buffer is similar to bufio.Reader / Writer but zero-copy-ish // Also highly optimized for this particular use case. -// This buffer is backed by two byte slices in a double-buffering scheme type buffer struct { - buf []byte // buf is a byte buffer who's length and capacity are equal. - nc net.Conn - idx int - length int - timeout time.Duration - dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer - flipcnt uint // flipccnt is the current buffer counter for double-buffering + buf []byte // read buffer. + cachedBuf []byte // buffer that will be reused. len(cachedBuf) <= maxCachedBufSize. } // newBuffer allocates and returns a new buffer. -func newBuffer(nc net.Conn) buffer { - fg := make([]byte, defaultBufSize) +func newBuffer() buffer { return buffer{ - buf: fg, - nc: nc, - dbuf: [2][]byte{fg, nil}, + cachedBuf: make([]byte, defaultBufSize), } } -// flip replaces the active buffer with the background buffer -// this is a delayed flip that simply increases the buffer counter; -// the actual flip will be performed the next time we call `buffer.fill` -func (b *buffer) flip() { - b.flipcnt += 1 +// busy returns true if the read buffer is not empty. +func (b *buffer) busy() bool { + return len(b.buf) > 0 } -// fill reads into the buffer until at least _need_ bytes are in it -func (b *buffer) fill(need int) error { - n := b.length - // fill data into its double-buffering target: if we've called - // flip on this buffer, we'll be copying to the background buffer, - // and then filling it with network data; otherwise we'll just move - // the contents of the current buffer to the front before filling it - dest := b.dbuf[b.flipcnt&1] +// fill reads into the read buffer until at least _need_ bytes are in it. +func (b *buffer) fill(need int, r readerFunc) error { + // we'll move the contents of the current buffer to dest before filling it. + dest := b.cachedBuf // grow buffer if necessary to fit the whole packet. if need > len(dest) { @@ -67,64 +55,48 @@ func (b *buffer) fill(need int) error { // if the allocated buffer is not too large, move it to backing storage // to prevent extra allocations on applications that perform large reads if len(dest) <= maxCachedBufSize { - b.dbuf[b.flipcnt&1] = dest + b.cachedBuf = dest } } - // if we're filling the fg buffer, move the existing data to the start of it. - // if we're filling the bg buffer, copy over the data - if n > 0 { - copy(dest[:n], b.buf[b.idx:]) - } - - b.buf = dest - b.idx = 0 + // move the existing data to the start of the buffer. + n := len(b.buf) + copy(dest[:n], b.buf) for { - if b.timeout > 0 { - if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil { - return err - } - } - - nn, err := b.nc.Read(b.buf[n:]) + nn, err := r(dest[n:]) n += nn - switch err { - case nil: - if n < need { - continue - } - b.length = n - return nil - - case io.EOF: - if n >= need { - b.length = n - return nil - } - return io.ErrUnexpectedEOF - - default: - return err + if err == nil && n < need { + continue } + + b.buf = dest[:n] + + if err == io.EOF { + if n < need { + err = io.ErrUnexpectedEOF + } else { + err = nil + } + } + return err } } // returns next N bytes from buffer. // The returned slice is only guaranteed to be valid until the next read -func (b *buffer) readNext(need int) ([]byte, error) { - if b.length < need { +func (b *buffer) readNext(need int, r readerFunc) ([]byte, error) { + if len(b.buf) < need { // refill - if err := b.fill(need); err != nil { + if err := b.fill(need, r); err != nil { return nil, err } } - offset := b.idx - b.idx += need - b.length -= need - return b.buf[offset:b.idx], nil + data := b.buf[:need] + b.buf = b.buf[need:] + return data, nil } // takeBuffer returns a buffer with the requested size. @@ -132,18 +104,18 @@ func (b *buffer) readNext(need int) ([]byte, error) { // Otherwise a bigger buffer is made. // Only one buffer (total) can be used at a time. func (b *buffer) takeBuffer(length int) ([]byte, error) { - if b.length > 0 { + if b.busy() { return nil, ErrBusyBuffer } // test (cheap) general case first - if length <= cap(b.buf) { - return b.buf[:length], nil + if length <= len(b.cachedBuf) { + return b.cachedBuf[:length], nil } - if length < maxPacketSize { - b.buf = make([]byte, length) - return b.buf, nil + if length < maxCachedBufSize { + b.cachedBuf = make([]byte, length) + return b.cachedBuf, nil } // buffer is larger than we want to store. @@ -154,10 +126,10 @@ func (b *buffer) takeBuffer(length int) ([]byte, error) { // known to be smaller than defaultBufSize. // Only one buffer (total) can be used at a time. func (b *buffer) takeSmallBuffer(length int) ([]byte, error) { - if b.length > 0 { + if b.busy() { return nil, ErrBusyBuffer } - return b.buf[:length], nil + return b.cachedBuf[:length], nil } // takeCompleteBuffer returns the complete existing buffer. @@ -165,18 +137,15 @@ func (b *buffer) takeSmallBuffer(length int) ([]byte, error) { // cap and len of the returned buffer will be equal. // Only one buffer (total) can be used at a time. func (b *buffer) takeCompleteBuffer() ([]byte, error) { - if b.length > 0 { + if b.busy() { return nil, ErrBusyBuffer } - return b.buf, nil + return b.cachedBuf, nil } // store stores buf, an updated buffer, if its suitable to do so. -func (b *buffer) store(buf []byte) error { - if b.length > 0 { - return ErrBusyBuffer - } else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) { - b.buf = buf[:cap(buf)] +func (b *buffer) store(buf []byte) { + if cap(buf) <= maxCachedBufSize && cap(buf) > cap(b.cachedBuf) { + b.cachedBuf = buf[:cap(buf)] } - return nil } diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go index 1cdf97b67e..29b1aa43f2 100644 --- a/vendor/github.com/go-sql-driver/mysql/collations.go +++ b/vendor/github.com/go-sql-driver/mysql/collations.go @@ -8,7 +8,7 @@ package mysql -const defaultCollation = "utf8mb4_general_ci" +const defaultCollationID = 45 // utf8mb4_general_ci const binaryCollationID = 63 // A list of available collations mapped to the internal ID. diff --git a/vendor/github.com/go-sql-driver/mysql/compress.go b/vendor/github.com/go-sql-driver/mysql/compress.go new file mode 100644 index 0000000000..fa42772acb --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/compress.go @@ -0,0 +1,214 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2024 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "compress/zlib" + "fmt" + "io" + "sync" +) + +var ( + zrPool *sync.Pool // Do not use directly. Use zDecompress() instead. + zwPool *sync.Pool // Do not use directly. Use zCompress() instead. +) + +func init() { + zrPool = &sync.Pool{ + New: func() any { return nil }, + } + zwPool = &sync.Pool{ + New: func() any { + zw, err := zlib.NewWriterLevel(new(bytes.Buffer), 2) + if err != nil { + panic(err) // compress/zlib return non-nil error only if level is invalid + } + return zw + }, + } +} + +func zDecompress(src []byte, dst *bytes.Buffer) (int, error) { + br := bytes.NewReader(src) + var zr io.ReadCloser + var err error + + if a := zrPool.Get(); a == nil { + if zr, err = zlib.NewReader(br); err != nil { + return 0, err + } + } else { + zr = a.(io.ReadCloser) + if err := zr.(zlib.Resetter).Reset(br, nil); err != nil { + return 0, err + } + } + + n, _ := dst.ReadFrom(zr) // ignore err because zr.Close() will return it again. + err = zr.Close() // zr.Close() may return chuecksum error. + zrPool.Put(zr) + return int(n), err +} + +func zCompress(src []byte, dst io.Writer) error { + zw := zwPool.Get().(*zlib.Writer) + zw.Reset(dst) + if _, err := zw.Write(src); err != nil { + return err + } + err := zw.Close() + zwPool.Put(zw) + return err +} + +type compIO struct { + mc *mysqlConn + buff bytes.Buffer +} + +func newCompIO(mc *mysqlConn) *compIO { + return &compIO{ + mc: mc, + } +} + +func (c *compIO) reset() { + c.buff.Reset() +} + +func (c *compIO) readNext(need int, r readerFunc) ([]byte, error) { + for c.buff.Len() < need { + if err := c.readCompressedPacket(r); err != nil { + return nil, err + } + } + data := c.buff.Next(need) + return data[:need:need], nil // prevent caller writes into c.buff +} + +func (c *compIO) readCompressedPacket(r readerFunc) error { + header, err := c.mc.buf.readNext(7, r) // size of compressed header + if err != nil { + return err + } + _ = header[6] // bounds check hint to compiler; guaranteed by readNext + + // compressed header structure + comprLength := getUint24(header[0:3]) + compressionSequence := uint8(header[3]) + uncompressedLength := getUint24(header[4:7]) + if debug { + fmt.Printf("uncompress cmplen=%v uncomplen=%v pkt_cmp_seq=%v expected_cmp_seq=%v\n", + comprLength, uncompressedLength, compressionSequence, c.mc.sequence) + } + // Do not return ErrPktSync here. + // Server may return error packet (e.g. 1153 Got a packet bigger than 'max_allowed_packet' bytes) + // before receiving all packets from client. In this case, seqnr is younger than expected. + // NOTE: Both of mariadbclient and mysqlclient do not check seqnr. Only server checks it. + if debug && compressionSequence != c.mc.sequence { + fmt.Printf("WARN: unexpected cmpress seq nr: expected %v, got %v", + c.mc.sequence, compressionSequence) + } + c.mc.sequence = compressionSequence + 1 + c.mc.compressSequence = c.mc.sequence + + comprData, err := c.mc.buf.readNext(comprLength, r) + if err != nil { + return err + } + + // if payload is uncompressed, its length will be specified as zero, and its + // true length is contained in comprLength + if uncompressedLength == 0 { + c.buff.Write(comprData) + return nil + } + + // use existing capacity in bytesBuf if possible + c.buff.Grow(uncompressedLength) + nread, err := zDecompress(comprData, &c.buff) + if err != nil { + return err + } + if nread != uncompressedLength { + return fmt.Errorf("invalid compressed packet: uncompressed length in header is %d, actual %d", + uncompressedLength, nread) + } + return nil +} + +const minCompressLength = 150 +const maxPayloadLen = maxPacketSize - 4 + +// writePackets sends one or some packets with compression. +// Use this instead of mc.netConn.Write() when mc.compress is true. +func (c *compIO) writePackets(packets []byte) (int, error) { + totalBytes := len(packets) + blankHeader := make([]byte, 7) + buf := &c.buff + + for len(packets) > 0 { + payloadLen := min(maxPayloadLen, len(packets)) + payload := packets[:payloadLen] + uncompressedLen := payloadLen + + buf.Reset() + buf.Write(blankHeader) // Buffer.Write() never returns error + + // If payload is less than minCompressLength, don't compress. + if uncompressedLen < minCompressLength { + buf.Write(payload) + uncompressedLen = 0 + } else { + err := zCompress(payload, buf) + if debug && err != nil { + fmt.Printf("zCompress error: %v", err) + } + // do not compress if compressed data is larger than uncompressed data + // I intentionally miss 7 byte header in the buf; zCompress must compress more than 7 bytes. + if err != nil || buf.Len() >= uncompressedLen { + buf.Reset() + buf.Write(blankHeader) + buf.Write(payload) + uncompressedLen = 0 + } + } + + if n, err := c.writeCompressedPacket(buf.Bytes(), uncompressedLen); err != nil { + // To allow returning ErrBadConn when sending really 0 bytes, we sum + // up compressed bytes that is returned by underlying Write(). + return totalBytes - len(packets) + n, err + } + packets = packets[payloadLen:] + } + + return totalBytes, nil +} + +// writeCompressedPacket writes a compressed packet with header. +// data should start with 7 size space for header followed by payload. +func (c *compIO) writeCompressedPacket(data []byte, uncompressedLen int) (int, error) { + mc := c.mc + comprLength := len(data) - 7 + if debug { + fmt.Printf( + "writeCompressedPacket: comprLength=%v, uncompressedLen=%v, seq=%v", + comprLength, uncompressedLen, mc.compressSequence) + } + + // compression header + putUint24(data[0:3], comprLength) + data[3] = mc.compressSequence + putUint24(data[4:7], uncompressedLen) + + mc.compressSequence++ + return mc.writeWithTimeout(data) +} diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go index eff978d930..3e455a3ff0 100644 --- a/vendor/github.com/go-sql-driver/mysql/connection.go +++ b/vendor/github.com/go-sql-driver/mysql/connection.go @@ -13,10 +13,13 @@ import ( "database/sql" "database/sql/driver" "encoding/json" + "fmt" "io" "net" + "runtime" "strconv" "strings" + "sync/atomic" "time" ) @@ -25,15 +28,17 @@ type mysqlConn struct { netConn net.Conn rawConn net.Conn // underlying connection when netConn is TLS connection. result mysqlResult // managed by clearResult() and handleOkPacket(). + compIO *compIO cfg *Config connector *connector maxAllowedPacket int maxWriteSize int - writeTimeout time.Duration flags clientFlag status statusFlag sequence uint8 + compressSequence uint8 parseTime bool + compress bool // for context support (Go 1.8+) watching bool @@ -41,71 +46,92 @@ type mysqlConn struct { closech chan struct{} finished chan<- struct{} canceled atomicError // set non-nil if conn is canceled - closed atomicBool // set when conn is closed, before closech is closed + closed atomic.Bool // set when conn is closed, before closech is closed } // Helper function to call per-connection logger. func (mc *mysqlConn) log(v ...any) { + _, filename, lineno, ok := runtime.Caller(1) + if ok { + pos := strings.LastIndexByte(filename, '/') + if pos != -1 { + filename = filename[pos+1:] + } + prefix := fmt.Sprintf("%s:%d ", filename, lineno) + v = append([]any{prefix}, v...) + } + mc.cfg.Logger.Print(v...) } +func (mc *mysqlConn) readWithTimeout(b []byte) (int, error) { + to := mc.cfg.ReadTimeout + if to > 0 { + if err := mc.netConn.SetReadDeadline(time.Now().Add(to)); err != nil { + return 0, err + } + } + return mc.netConn.Read(b) +} + +func (mc *mysqlConn) writeWithTimeout(b []byte) (int, error) { + to := mc.cfg.WriteTimeout + if to > 0 { + if err := mc.netConn.SetWriteDeadline(time.Now().Add(to)); err != nil { + return 0, err + } + } + return mc.netConn.Write(b) +} + +func (mc *mysqlConn) resetSequence() { + mc.sequence = 0 + mc.compressSequence = 0 +} + +// syncSequence must be called when finished writing some packet and before start reading. +func (mc *mysqlConn) syncSequence() { + // Syncs compressionSequence to sequence. + // This is not documented but done in `net_flush()` in MySQL and MariaDB. + // https://github.com/mariadb-corporation/mariadb-connector-c/blob/8228164f850b12353da24df1b93a1e53cc5e85e9/libmariadb/ma_net.c#L170-L171 + // https://github.com/mysql/mysql-server/blob/824e2b4064053f7daf17d7f3f84b7a3ed92e5fb4/sql-common/net_serv.cc#L293 + if mc.compress { + mc.sequence = mc.compressSequence + mc.compIO.reset() + } +} + // Handles parameters set in DSN after the connection is established func (mc *mysqlConn) handleParams() (err error) { var cmdSet strings.Builder for param, val := range mc.cfg.Params { - switch param { - // Charset: character_set_connection, character_set_client, character_set_results - case "charset": - charsets := strings.Split(val, ",") - for _, cs := range charsets { - // ignore errors here - a charset may not exist - if mc.cfg.Collation != "" { - err = mc.exec("SET NAMES " + cs + " COLLATE " + mc.cfg.Collation) - } else { - err = mc.exec("SET NAMES " + cs) - } - if err == nil { - break - } - } - if err != nil { - return - } - - // Other system vars accumulated in a single SET command - default: - if cmdSet.Len() == 0 { - // Heuristic: 29 chars for each other key=value to reduce reallocations - cmdSet.Grow(4 + len(param) + 3 + len(val) + 30*(len(mc.cfg.Params)-1)) - cmdSet.WriteString("SET ") - } else { - cmdSet.WriteString(", ") - } - cmdSet.WriteString(param) - cmdSet.WriteString(" = ") - cmdSet.WriteString(val) + if cmdSet.Len() == 0 { + // Heuristic: 29 chars for each other key=value to reduce reallocations + cmdSet.Grow(4 + len(param) + 3 + len(val) + 30*(len(mc.cfg.Params)-1)) + cmdSet.WriteString("SET ") + } else { + cmdSet.WriteString(", ") } + cmdSet.WriteString(param) + cmdSet.WriteString(" = ") + cmdSet.WriteString(val) } if cmdSet.Len() > 0 { err = mc.exec(cmdSet.String()) - if err != nil { - return - } } return } +// markBadConn replaces errBadConnNoWrite with driver.ErrBadConn. +// This function is used to return driver.ErrBadConn only when safe to retry. func (mc *mysqlConn) markBadConn(err error) error { - if mc == nil { - return err + if err == errBadConnNoWrite { + return driver.ErrBadConn } - if err != errBadConnNoWrite { - return err - } - return driver.ErrBadConn + return err } func (mc *mysqlConn) Begin() (driver.Tx, error) { @@ -114,7 +140,6 @@ func (mc *mysqlConn) Begin() (driver.Tx, error) { func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) { if mc.closed.Load() { - mc.log(ErrInvalidConn) return nil, driver.ErrBadConn } var q string @@ -135,10 +160,14 @@ func (mc *mysqlConn) Close() (err error) { if !mc.closed.Load() { err = mc.writeCommandPacket(comQuit) } + mc.close() + return +} +// close closes the network connection and clear results without sending COM_QUIT. +func (mc *mysqlConn) close() { mc.cleanup() mc.clearResult() - return } // Closes the network connection and unsets internal variables. Do not call this @@ -157,7 +186,7 @@ func (mc *mysqlConn) cleanup() { return } if err := conn.Close(); err != nil { - mc.log(err) + mc.log("closing connection:", err) } // This function can be called from multiple goroutines. // So we can not mc.clearResult() here. @@ -176,7 +205,6 @@ func (mc *mysqlConn) error() error { func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) { if mc.closed.Load() { - mc.log(ErrInvalidConn) return nil, driver.ErrBadConn } // Send command @@ -217,8 +245,10 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin buf, err := mc.buf.takeCompleteBuffer() if err != nil { // can not take the buffer. Something must be wrong with the connection - mc.log(err) - return "", ErrInvalidConn + mc.cleanup() + // interpolateParams would be called before sending any query. + // So its safe to retry. + return "", driver.ErrBadConn } buf = buf[:0] argPos := 0 @@ -309,7 +339,6 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) { if mc.closed.Load() { - mc.log(ErrInvalidConn) return nil, driver.ErrBadConn } if len(args) != 0 { @@ -369,7 +398,6 @@ func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) handleOk := mc.clearResult() if mc.closed.Load() { - mc.log(ErrInvalidConn) return nil, driver.ErrBadConn } if len(args) != 0 { @@ -385,31 +413,34 @@ func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) } // Send command err := mc.writeCommandPacketStr(comQuery, query) - if err == nil { - // Read Result - var resLen int - resLen, err = handleOk.readResultSetHeaderPacket() - if err == nil { - rows := new(textRows) - rows.mc = mc + if err != nil { + return nil, mc.markBadConn(err) + } - if resLen == 0 { - rows.rs.done = true + // Read Result + var resLen int + resLen, err = handleOk.readResultSetHeaderPacket() + if err != nil { + return nil, err + } - switch err := rows.NextResultSet(); err { - case nil, io.EOF: - return rows, nil - default: - return nil, err - } - } + rows := new(textRows) + rows.mc = mc - // Columns - rows.rs.columns, err = mc.readColumns(resLen) - return rows, err + if resLen == 0 { + rows.rs.done = true + + switch err := rows.NextResultSet(); err { + case nil, io.EOF: + return rows, nil + default: + return nil, err } } - return nil, mc.markBadConn(err) + + // Columns + rows.rs.columns, err = mc.readColumns(resLen) + return rows, err } // Gets the value of the given MySQL System Variable @@ -443,7 +474,7 @@ func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) { return nil, err } -// finish is called when the query has canceled. +// cancel is called when the query has canceled. func (mc *mysqlConn) cancel(err error) { mc.canceled.Set(err) mc.cleanup() @@ -464,7 +495,6 @@ func (mc *mysqlConn) finish() { // Ping implements driver.Pinger interface func (mc *mysqlConn) Ping(ctx context.Context) (err error) { if mc.closed.Load() { - mc.log(ErrInvalidConn) return driver.ErrBadConn } @@ -650,7 +680,7 @@ func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) { // ResetSession implements driver.SessionResetter. // (From Go 1.10) func (mc *mysqlConn) ResetSession(ctx context.Context) error { - if mc.closed.Load() { + if mc.closed.Load() || mc.buf.busy() { return driver.ErrBadConn } @@ -684,5 +714,8 @@ func (mc *mysqlConn) ResetSession(ctx context.Context) error { // IsValid implements driver.Validator interface // (From Go 1.15) func (mc *mysqlConn) IsValid() bool { - return !mc.closed.Load() + return !mc.closed.Load() && !mc.buf.busy() } + +var _ driver.SessionResetter = &mysqlConn{} +var _ driver.Validator = &mysqlConn{} diff --git a/vendor/github.com/go-sql-driver/mysql/connector.go b/vendor/github.com/go-sql-driver/mysql/connector.go index b670775969..bc1d46afc6 100644 --- a/vendor/github.com/go-sql-driver/mysql/connector.go +++ b/vendor/github.com/go-sql-driver/mysql/connector.go @@ -11,6 +11,7 @@ package mysql import ( "context" "database/sql/driver" + "fmt" "net" "os" "strconv" @@ -87,20 +88,25 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) { mc.parseTime = mc.cfg.ParseTime // Connect to Server - dialsLock.RLock() - dial, ok := dials[mc.cfg.Net] - dialsLock.RUnlock() - if ok { - dctx := ctx - if mc.cfg.Timeout > 0 { - var cancel context.CancelFunc - dctx, cancel = context.WithTimeout(ctx, c.cfg.Timeout) - defer cancel() - } - mc.netConn, err = dial(dctx, mc.cfg.Addr) + dctx := ctx + if mc.cfg.Timeout > 0 { + var cancel context.CancelFunc + dctx, cancel = context.WithTimeout(ctx, c.cfg.Timeout) + defer cancel() + } + + if c.cfg.DialFunc != nil { + mc.netConn, err = c.cfg.DialFunc(dctx, mc.cfg.Net, mc.cfg.Addr) } else { - nd := net.Dialer{Timeout: mc.cfg.Timeout} - mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr) + dialsLock.RLock() + dial, ok := dials[mc.cfg.Net] + dialsLock.RUnlock() + if ok { + mc.netConn, err = dial(dctx, mc.cfg.Addr) + } else { + nd := net.Dialer{} + mc.netConn, err = nd.DialContext(dctx, mc.cfg.Net, mc.cfg.Addr) + } } if err != nil { return nil, err @@ -122,11 +128,7 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) { } defer mc.finish() - mc.buf = newBuffer(mc.netConn) - - // Set I/O timeouts - mc.buf.timeout = mc.cfg.ReadTimeout - mc.writeTimeout = mc.cfg.WriteTimeout + mc.buf = newBuffer() // Reading Handshake Initialization Packet authData, plugin, err := mc.readHandshakePacket() @@ -165,6 +167,10 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) { return nil, err } + if mc.cfg.compress && mc.flags&clientCompress == clientCompress { + mc.compress = true + mc.compIO = newCompIO(mc) + } if mc.cfg.MaxAllowedPacket > 0 { mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket } else { @@ -174,12 +180,36 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) { mc.Close() return nil, err } - mc.maxAllowedPacket = stringToInt(maxap) - 1 + n, err := strconv.Atoi(string(maxap)) + if err != nil { + mc.Close() + return nil, fmt.Errorf("invalid max_allowed_packet value (%q): %w", maxap, err) + } + mc.maxAllowedPacket = n - 1 } if mc.maxAllowedPacket < maxPacketSize { mc.maxWriteSize = mc.maxAllowedPacket } + // Charset: character_set_connection, character_set_client, character_set_results + if len(mc.cfg.charsets) > 0 { + for _, cs := range mc.cfg.charsets { + // ignore errors here - a charset may not exist + if mc.cfg.Collation != "" { + err = mc.exec("SET NAMES " + cs + " COLLATE " + mc.cfg.Collation) + } else { + err = mc.exec("SET NAMES " + cs) + } + if err == nil { + break + } + } + if err != nil { + mc.Close() + return nil, err + } + } + // Handle DSN Params err = mc.handleParams() if err != nil { diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go index 22526e0317..4aadcd6422 100644 --- a/vendor/github.com/go-sql-driver/mysql/const.go +++ b/vendor/github.com/go-sql-driver/mysql/const.go @@ -11,6 +11,8 @@ package mysql import "runtime" const ( + debug = false // for debugging. Set true only in development. + defaultAuthPlugin = "mysql_native_password" defaultMaxAllowedPacket = 64 << 20 // 64 MiB. See https://github.com/go-sql-driver/mysql/issues/1355 minProtocolVersion = 10 @@ -125,7 +127,10 @@ const ( fieldTypeBit ) const ( - fieldTypeJSON fieldType = iota + 0xf5 + fieldTypeVector fieldType = iota + 0xf2 + fieldTypeInvalid + fieldTypeBool + fieldTypeJSON fieldTypeNewDecimal fieldTypeEnum fieldTypeSet diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go index 65f5a0242f..9b560b7357 100644 --- a/vendor/github.com/go-sql-driver/mysql/dsn.go +++ b/vendor/github.com/go-sql-driver/mysql/dsn.go @@ -44,7 +44,8 @@ type Config struct { DBName string // Database name Params map[string]string // Connection parameters ConnectionAttributes string // Connection Attributes, comma-delimited string of user-defined "key:value" pairs - Collation string // Connection collation + charsets []string // Connection charset. When set, this will be set in SET NAMES query + Collation string // Connection collation. When set, this will be set in SET NAMES COLLATE query Loc *time.Location // Location for time.Time values MaxAllowedPacket int // Max packet size allowed ServerPubKey string // Server public key name @@ -54,6 +55,8 @@ type Config struct { ReadTimeout time.Duration // I/O read timeout WriteTimeout time.Duration // I/O write timeout Logger Logger // Logger + // DialFunc specifies the dial function for creating connections + DialFunc func(ctx context.Context, network, addr string) (net.Conn, error) // boolean fields @@ -70,7 +73,10 @@ type Config struct { ParseTime bool // Parse time values to time.Time RejectReadOnly bool // Reject read-only connections - // unexported fields. new options should be come here + // unexported fields. new options should be come here. + // boolean first. alphabetical order. + + compress bool // Enable zlib compression beforeConnect func(context.Context, *Config) error // Invoked before a connection is established pubKey *rsa.PublicKey // Server public key @@ -90,7 +96,6 @@ func NewConfig() *Config { AllowNativePasswords: true, CheckConnLiveness: true, } - return cfg } @@ -122,6 +127,14 @@ func BeforeConnect(fn func(context.Context, *Config) error) Option { } } +// EnableCompress sets the compression mode. +func EnableCompression(yes bool) Option { + return func(cfg *Config) error { + cfg.compress = yes + return nil + } +} + func (cfg *Config) Clone() *Config { cp := *cfg if cp.TLS != nil { @@ -282,6 +295,10 @@ func (cfg *Config) FormatDSN() string { writeDSNParam(&buf, &hasParam, "clientFoundRows", "true") } + if charsets := cfg.charsets; len(charsets) > 0 { + writeDSNParam(&buf, &hasParam, "charset", strings.Join(charsets, ",")) + } + if col := cfg.Collation; col != "" { writeDSNParam(&buf, &hasParam, "collation", col) } @@ -290,6 +307,10 @@ func (cfg *Config) FormatDSN() string { writeDSNParam(&buf, &hasParam, "columnsWithAlias", "true") } + if cfg.compress { + writeDSNParam(&buf, &hasParam, "compress", "true") + } + if cfg.InterpolateParams { writeDSNParam(&buf, &hasParam, "interpolateParams", "true") } @@ -501,6 +522,10 @@ func parseDSNParams(cfg *Config, params string) (err error) { return errors.New("invalid bool value: " + value) } + // charset + case "charset": + cfg.charsets = strings.Split(value, ",") + // Collation case "collation": cfg.Collation = value @@ -514,7 +539,11 @@ func parseDSNParams(cfg *Config, params string) (err error) { // Compression case "compress": - return errors.New("compression not implemented yet") + var isBool bool + cfg.compress, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } // Enable client side placeholder substitution case "interpolateParams": diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go index a7ef88909a..584617b118 100644 --- a/vendor/github.com/go-sql-driver/mysql/errors.go +++ b/vendor/github.com/go-sql-driver/mysql/errors.go @@ -32,12 +32,12 @@ var ( // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet. // If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn - // to trigger a resend. + // to trigger a resend. Use mc.markBadConn(err) to do this. // See https://github.com/go-sql-driver/mysql/pull/302 errBadConnNoWrite = errors.New("bad connection") ) -var defaultLogger = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile)) +var defaultLogger = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime)) // Logger is used to log critical error messages. type Logger interface { diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go index 2860842474..be5cd809a6 100644 --- a/vendor/github.com/go-sql-driver/mysql/fields.go +++ b/vendor/github.com/go-sql-driver/mysql/fields.go @@ -112,6 +112,8 @@ func (mf *mysqlField) typeDatabaseName() string { return "VARCHAR" case fieldTypeYear: return "YEAR" + case fieldTypeVector: + return "VECTOR" default: return "" } @@ -198,7 +200,7 @@ func (mf *mysqlField) scanType() reflect.Type { return scanTypeNullFloat case fieldTypeBit, fieldTypeTinyBLOB, fieldTypeMediumBLOB, fieldTypeLongBLOB, - fieldTypeBLOB, fieldTypeVarString, fieldTypeString, fieldTypeGeometry: + fieldTypeBLOB, fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeVector: if mf.charSet == binaryCollationID { return scanTypeBytes } diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go index 0c8af9f110..453ae091e5 100644 --- a/vendor/github.com/go-sql-driver/mysql/infile.go +++ b/vendor/github.com/go-sql-driver/mysql/infile.go @@ -17,7 +17,7 @@ import ( ) var ( - fileRegister map[string]bool + fileRegister map[string]struct{} fileRegisterLock sync.RWMutex readerRegister map[string]func() io.Reader readerRegisterLock sync.RWMutex @@ -37,10 +37,10 @@ func RegisterLocalFile(filePath string) { fileRegisterLock.Lock() // lazy map init if fileRegister == nil { - fileRegister = make(map[string]bool) + fileRegister = make(map[string]struct{}) } - fileRegister[strings.Trim(filePath, `"`)] = true + fileRegister[strings.Trim(filePath, `"`)] = struct{}{} fileRegisterLock.Unlock() } @@ -95,7 +95,6 @@ const defaultPacketSize = 16 * 1024 // 16KB is small enough for disk readahead a func (mc *okHandler) handleInFileRequest(name string) (err error) { var rdr io.Reader - var data []byte packetSize := defaultPacketSize if mc.maxWriteSize < packetSize { packetSize = mc.maxWriteSize @@ -124,9 +123,9 @@ func (mc *okHandler) handleInFileRequest(name string) (err error) { } else { // File name = strings.Trim(name, `"`) fileRegisterLock.RLock() - fr := fileRegister[name] + _, exists := fileRegister[name] fileRegisterLock.RUnlock() - if mc.cfg.AllowAllFiles || fr { + if mc.cfg.AllowAllFiles || exists { var file *os.File var fi os.FileInfo @@ -147,9 +146,11 @@ func (mc *okHandler) handleInFileRequest(name string) (err error) { } // send content packets + var data []byte + // if packetSize == 0, the Reader contains no data if err == nil && packetSize > 0 { - data := make([]byte, 4+packetSize) + data = make([]byte, 4+packetSize) var n int for err == nil { n, err = rdr.Read(data[4:]) @@ -171,6 +172,7 @@ func (mc *okHandler) handleInFileRequest(name string) (err error) { if ioErr := mc.conn().writePacket(data[:4]); ioErr != nil { return ioErr } + mc.conn().syncSequence() // read OK packet if err == nil { diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go index 90a34728b5..4b83621600 100644 --- a/vendor/github.com/go-sql-driver/mysql/packets.go +++ b/vendor/github.com/go-sql-driver/mysql/packets.go @@ -21,36 +21,56 @@ import ( "time" ) -// Packets documentation: -// http://dev.mysql.com/doc/internals/en/client-server-protocol.html +// MySQL client/server protocol documentations. +// https://dev.mysql.com/doc/dev/mysql-server/latest/PAGE_PROTOCOL.html +// https://mariadb.com/kb/en/clientserver-protocol/ // Read packet to buffer 'data' func (mc *mysqlConn) readPacket() ([]byte, error) { var prevData []byte + invalidSequence := false + + readNext := mc.buf.readNext + if mc.compress { + readNext = mc.compIO.readNext + } + for { // read packet header - data, err := mc.buf.readNext(4) + data, err := readNext(4, mc.readWithTimeout) if err != nil { + mc.close() if cerr := mc.canceled.Value(); cerr != nil { return nil, cerr } mc.log(err) - mc.Close() return nil, ErrInvalidConn } // packet length [24 bit] - pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16) + pktLen := getUint24(data[:3]) + seq := data[3] - // check packet sync [8 bit] - if data[3] != mc.sequence { - mc.Close() - if data[3] > mc.sequence { - return nil, ErrPktSyncMul + if mc.compress { + // MySQL and MariaDB doesn't check packet nr in compressed packet. + if debug && seq != mc.compressSequence { + fmt.Printf("[debug] mismatched compression sequence nr: expected: %v, got %v", + mc.compressSequence, seq) } - return nil, ErrPktSync + mc.compressSequence = seq + 1 + } else { + // check packet sync [8 bit] + if seq != mc.sequence { + mc.log(fmt.Sprintf("[warn] unexpected seq nr: expected %v, got %v", mc.sequence, seq)) + // For large packets, we stop reading as soon as sync error. + if len(prevData) > 0 { + mc.close() + return nil, ErrPktSyncMul + } + invalidSequence = true + } + mc.sequence++ } - mc.sequence++ // packets with length 0 terminate a previous packet which is a // multiple of (2^24)-1 bytes long @@ -58,32 +78,38 @@ func (mc *mysqlConn) readPacket() ([]byte, error) { // there was no previous packet if prevData == nil { mc.log(ErrMalformPkt) - mc.Close() + mc.close() return nil, ErrInvalidConn } - return prevData, nil } // read packet body [pktLen bytes] - data, err = mc.buf.readNext(pktLen) + data, err = readNext(pktLen, mc.readWithTimeout) if err != nil { + mc.close() if cerr := mc.canceled.Value(); cerr != nil { return nil, cerr } mc.log(err) - mc.Close() return nil, ErrInvalidConn } // return data if this was the last packet if pktLen < maxPacketSize { // zero allocations for non-split packets - if prevData == nil { - return data, nil + if prevData != nil { + data = append(prevData, data...) } - - return append(prevData, data...), nil + if invalidSequence { + mc.close() + // return sync error only for regular packet. + // error packets may have wrong sequence number. + if data[0] != iERR { + return nil, ErrPktSync + } + } + return data, nil } prevData = append(prevData, data...) @@ -93,60 +119,52 @@ func (mc *mysqlConn) readPacket() ([]byte, error) { // Write packet buffer 'data' func (mc *mysqlConn) writePacket(data []byte) error { pktLen := len(data) - 4 - if pktLen > mc.maxAllowedPacket { return ErrPktTooLarge } + writeFunc := mc.writeWithTimeout + if mc.compress { + writeFunc = mc.compIO.writePackets + } + for { - var size int - if pktLen >= maxPacketSize { - data[0] = 0xff - data[1] = 0xff - data[2] = 0xff - size = maxPacketSize - } else { - data[0] = byte(pktLen) - data[1] = byte(pktLen >> 8) - data[2] = byte(pktLen >> 16) - size = pktLen - } + size := min(maxPacketSize, pktLen) + putUint24(data[:3], size) data[3] = mc.sequence // Write packet - if mc.writeTimeout > 0 { - if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil { - return err - } + if debug { + fmt.Printf("writePacket: size=%v seq=%v", size, mc.sequence) } - n, err := mc.netConn.Write(data[:4+size]) - if err == nil && n == 4+size { - mc.sequence++ - if size != maxPacketSize { - return nil - } - pktLen -= size - data = data[size:] - continue - } - - // Handle error - if err == nil { // n != len(data) + n, err := writeFunc(data[:4+size]) + if err != nil { mc.cleanup() - mc.log(ErrMalformPkt) - } else { if cerr := mc.canceled.Value(); cerr != nil { return cerr } if n == 0 && pktLen == len(data)-4 { // only for the first loop iteration when nothing was written yet + mc.log(err) return errBadConnNoWrite + } else { + return err } - mc.cleanup() - mc.log(err) } - return ErrInvalidConn + if n != 4+size { + // io.Writer(b) must return a non-nil error if it cannot write len(b) bytes. + // The io.ErrShortWrite error is used to indicate that this rule has not been followed. + mc.cleanup() + return io.ErrShortWrite + } + + mc.sequence++ + if size != maxPacketSize { + return nil + } + pktLen -= size + data = data[size:] } } @@ -159,11 +177,6 @@ func (mc *mysqlConn) writePacket(data []byte) error { func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) { data, err = mc.readPacket() if err != nil { - // for init we can rewrite this to ErrBadConn for sql.Driver to retry, since - // in connection initialization we don't risk retrying non-idempotent actions. - if err == ErrInvalidConn { - return nil, "", driver.ErrBadConn - } return } @@ -207,10 +220,13 @@ func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err erro if len(data) > pos { // character set [1 byte] // status flags [2 bytes] + pos += 3 // capability flags (upper 2 bytes) [2 bytes] + mc.flags |= clientFlag(binary.LittleEndian.Uint16(data[pos:pos+2])) << 16 + pos += 2 // length of auth-plugin-data [1 byte] // reserved (all [00]) [10 bytes] - pos += 1 + 2 + 2 + 1 + 10 + pos += 11 // second part of the password cipher [minimum 13 bytes], // where len=MAX(13, length of auth-plugin-data - 8) @@ -258,13 +274,17 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string clientLocalFiles | clientPluginAuth | clientMultiResults | - clientConnectAttrs | + mc.flags&clientConnectAttrs | mc.flags&clientLongFlag + sendConnectAttrs := mc.flags&clientConnectAttrs != 0 + if mc.cfg.ClientFoundRows { clientFlags |= clientFoundRows } - + if mc.cfg.compress && mc.flags&clientCompress == clientCompress { + clientFlags |= clientCompress + } // To enable TLS / SSL if mc.cfg.TLS != nil { clientFlags |= clientSSL @@ -293,43 +313,37 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string } // encode length of the connection attributes - var connAttrsLEIBuf [9]byte - connAttrsLen := len(mc.connector.encodedAttributes) - connAttrsLEI := appendLengthEncodedInteger(connAttrsLEIBuf[:0], uint64(connAttrsLen)) - pktLen += len(connAttrsLEI) + len(mc.connector.encodedAttributes) + var connAttrsLEI []byte + if sendConnectAttrs { + var connAttrsLEIBuf [9]byte + connAttrsLen := len(mc.connector.encodedAttributes) + connAttrsLEI = appendLengthEncodedInteger(connAttrsLEIBuf[:0], uint64(connAttrsLen)) + pktLen += len(connAttrsLEI) + len(mc.connector.encodedAttributes) + } // Calculate packet length and get buffer with that size data, err := mc.buf.takeBuffer(pktLen + 4) if err != nil { - // cannot take the buffer. Something must be wrong with the connection - mc.log(err) - return errBadConnNoWrite + mc.cleanup() + return err } // ClientFlags [32 bit] - data[4] = byte(clientFlags) - data[5] = byte(clientFlags >> 8) - data[6] = byte(clientFlags >> 16) - data[7] = byte(clientFlags >> 24) + binary.LittleEndian.PutUint32(data[4:], uint32(clientFlags)) // MaxPacketSize [32 bit] (none) - data[8] = 0x00 - data[9] = 0x00 - data[10] = 0x00 - data[11] = 0x00 + binary.LittleEndian.PutUint32(data[8:], 0) // Collation ID [1 byte] - cname := mc.cfg.Collation - if cname == "" { - cname = defaultCollation - } - var found bool - data[12], found = collations[cname] - if !found { - // Note possibility for false negatives: - // could be triggered although the collation is valid if the - // collations map does not contain entries the server supports. - return fmt.Errorf("unknown collation: %q", cname) + data[12] = defaultCollationID + if cname := mc.cfg.Collation; cname != "" { + colID, ok := collations[cname] + if ok { + data[12] = colID + } else if len(mc.cfg.charsets) > 0 { + // When cfg.charset is set, the collation is set by `SET NAMES COLLATE `. + return fmt.Errorf("unknown collation: %q", cname) + } } // Filler [23 bytes] (all 0x00) @@ -349,10 +363,12 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string // Switch to TLS tlsConn := tls.Client(mc.netConn, mc.cfg.TLS) if err := tlsConn.Handshake(); err != nil { + if cerr := mc.canceled.Value(); cerr != nil { + return cerr + } return err } mc.netConn = tlsConn - mc.buf.nc = tlsConn } // User [null terminated string] @@ -378,8 +394,10 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string pos++ // Connection Attributes - pos += copy(data[pos:], connAttrsLEI) - pos += copy(data[pos:], []byte(mc.connector.encodedAttributes)) + if sendConnectAttrs { + pos += copy(data[pos:], connAttrsLEI) + pos += copy(data[pos:], []byte(mc.connector.encodedAttributes)) + } // Send Auth packet return mc.writePacket(data[:pos]) @@ -388,11 +406,10 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error { pktLen := 4 + len(authData) - data, err := mc.buf.takeSmallBuffer(pktLen) + data, err := mc.buf.takeBuffer(pktLen) if err != nil { - // cannot take the buffer. Something must be wrong with the connection - mc.log(err) - return errBadConnNoWrite + mc.cleanup() + return err } // Add the auth data [EOF] @@ -406,13 +423,11 @@ func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error { func (mc *mysqlConn) writeCommandPacket(command byte) error { // Reset Packet Sequence - mc.sequence = 0 + mc.resetSequence() data, err := mc.buf.takeSmallBuffer(4 + 1) if err != nil { - // cannot take the buffer. Something must be wrong with the connection - mc.log(err) - return errBadConnNoWrite + return err } // Add command byte @@ -424,14 +439,12 @@ func (mc *mysqlConn) writeCommandPacket(command byte) error { func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error { // Reset Packet Sequence - mc.sequence = 0 + mc.resetSequence() pktLen := 1 + len(arg) data, err := mc.buf.takeBuffer(pktLen + 4) if err != nil { - // cannot take the buffer. Something must be wrong with the connection - mc.log(err) - return errBadConnNoWrite + return err } // Add command byte @@ -441,28 +454,25 @@ func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error { copy(data[5:], arg) // Send CMD packet - return mc.writePacket(data) + err = mc.writePacket(data) + mc.syncSequence() + return err } func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error { // Reset Packet Sequence - mc.sequence = 0 + mc.resetSequence() data, err := mc.buf.takeSmallBuffer(4 + 1 + 4) if err != nil { - // cannot take the buffer. Something must be wrong with the connection - mc.log(err) - return errBadConnNoWrite + return err } // Add command byte data[4] = command // Add arg [32 bit] - data[5] = byte(arg) - data[6] = byte(arg >> 8) - data[7] = byte(arg >> 16) - data[8] = byte(arg >> 24) + binary.LittleEndian.PutUint32(data[5:], arg) // Send CMD packet return mc.writePacket(data) @@ -500,6 +510,9 @@ func (mc *mysqlConn) readAuthResult() ([]byte, string, error) { } plugin := string(data[1:pluginEndIndex]) authData := data[pluginEndIndex+1:] + if len(authData) > 0 && authData[len(authData)-1] == 0 { + authData = authData[:len(authData)-1] + } return authData, plugin, nil default: // Error otherwise @@ -521,32 +534,33 @@ func (mc *okHandler) readResultOK() error { } // Result Set Header Packet -// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset +// https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_com_query_response.html func (mc *okHandler) readResultSetHeaderPacket() (int, error) { // handleOkPacket replaces both values; other cases leave the values unchanged. mc.result.affectedRows = append(mc.result.affectedRows, 0) mc.result.insertIds = append(mc.result.insertIds, 0) data, err := mc.conn().readPacket() - if err == nil { - switch data[0] { - - case iOK: - return 0, mc.handleOkPacket(data) - - case iERR: - return 0, mc.conn().handleErrorPacket(data) - - case iLocalInFile: - return 0, mc.handleInFileRequest(string(data[1:])) - } - - // column count - num, _, _ := readLengthEncodedInteger(data) - // ignore remaining data in the packet. see #1478. - return int(num), nil + if err != nil { + return 0, err } - return 0, err + + switch data[0] { + case iOK: + return 0, mc.handleOkPacket(data) + + case iERR: + return 0, mc.conn().handleErrorPacket(data) + + case iLocalInFile: + return 0, mc.handleInFileRequest(string(data[1:])) + } + + // column count + // https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_com_query_response_text_resultset.html + num, _, _ := readLengthEncodedInteger(data) + // ignore remaining data in the packet. see #1478. + return int(num), nil } // Error Packet @@ -563,7 +577,8 @@ func (mc *mysqlConn) handleErrorPacket(data []byte) error { // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover) - if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly { + // 1836: ER_READ_ONLY_MODE + if (errno == 1792 || errno == 1290 || errno == 1836) && mc.cfg.RejectReadOnly { // Oops; we are connected to a read-only connection, and won't be able // to issue any write statements. Since RejectReadOnly is configured, // we throw away this connection hoping this one would have write @@ -930,19 +945,15 @@ func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error { pktLen = dataOffset + argLen } - stmt.mc.sequence = 0 + stmt.mc.resetSequence() // Add command byte [1 byte] data[4] = comStmtSendLongData // Add stmtID [32 bit] - data[5] = byte(stmt.id) - data[6] = byte(stmt.id >> 8) - data[7] = byte(stmt.id >> 16) - data[8] = byte(stmt.id >> 24) + binary.LittleEndian.PutUint32(data[5:], stmt.id) // Add paramID [16 bit] - data[9] = byte(paramID) - data[10] = byte(paramID >> 8) + binary.LittleEndian.PutUint16(data[9:], uint16(paramID)) // Send CMD packet err := stmt.mc.writePacket(data[:4+pktLen]) @@ -951,11 +962,10 @@ func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error { continue } return err - } // Reset Packet Sequence - stmt.mc.sequence = 0 + stmt.mc.resetSequence() return nil } @@ -980,7 +990,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { } // Reset packet-sequence - mc.sequence = 0 + mc.resetSequence() var data []byte var err error @@ -992,28 +1002,20 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { // In this case the len(data) == cap(data) which is used to optimise the flow below. } if err != nil { - // cannot take the buffer. Something must be wrong with the connection - mc.log(err) - return errBadConnNoWrite + return err } // command [1 byte] data[4] = comStmtExecute // statement_id [4 bytes] - data[5] = byte(stmt.id) - data[6] = byte(stmt.id >> 8) - data[7] = byte(stmt.id >> 16) - data[8] = byte(stmt.id >> 24) + binary.LittleEndian.PutUint32(data[5:], stmt.id) // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte] data[9] = 0x00 // iteration_count (uint32(1)) [4 bytes] - data[10] = 0x01 - data[11] = 0x00 - data[12] = 0x00 - data[13] = 0x00 + binary.LittleEndian.PutUint32(data[10:], 1) if len(args) > 0 { pos := minPktLen @@ -1067,50 +1069,17 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { case int64: paramTypes[i+i] = byte(fieldTypeLongLong) paramTypes[i+i+1] = 0x00 - - if cap(paramValues)-len(paramValues)-8 >= 0 { - paramValues = paramValues[:len(paramValues)+8] - binary.LittleEndian.PutUint64( - paramValues[len(paramValues)-8:], - uint64(v), - ) - } else { - paramValues = append(paramValues, - uint64ToBytes(uint64(v))..., - ) - } + paramValues = binary.LittleEndian.AppendUint64(paramValues, uint64(v)) case uint64: paramTypes[i+i] = byte(fieldTypeLongLong) paramTypes[i+i+1] = 0x80 // type is unsigned - - if cap(paramValues)-len(paramValues)-8 >= 0 { - paramValues = paramValues[:len(paramValues)+8] - binary.LittleEndian.PutUint64( - paramValues[len(paramValues)-8:], - uint64(v), - ) - } else { - paramValues = append(paramValues, - uint64ToBytes(uint64(v))..., - ) - } + paramValues = binary.LittleEndian.AppendUint64(paramValues, uint64(v)) case float64: paramTypes[i+i] = byte(fieldTypeDouble) paramTypes[i+i+1] = 0x00 - - if cap(paramValues)-len(paramValues)-8 >= 0 { - paramValues = paramValues[:len(paramValues)+8] - binary.LittleEndian.PutUint64( - paramValues[len(paramValues)-8:], - math.Float64bits(v), - ) - } else { - paramValues = append(paramValues, - uint64ToBytes(math.Float64bits(v))..., - ) - } + paramValues = binary.LittleEndian.AppendUint64(paramValues, math.Float64bits(v)) case bool: paramTypes[i+i] = byte(fieldTypeTiny) @@ -1191,17 +1160,16 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { // In that case we must build the data packet with the new values buffer if valuesCap != cap(paramValues) { data = append(data[:pos], paramValues...) - if err = mc.buf.store(data); err != nil { - mc.log(err) - return errBadConnNoWrite - } + mc.buf.store(data) // allow this buffer to be reused } pos += len(paramValues) data = data[:pos] } - return mc.writePacket(data) + err = mc.writePacket(data) + mc.syncSequence() + return err } // For each remaining resultset in the stream, discards its rows and updates @@ -1325,7 +1293,8 @@ func (rows *binaryRows) readRow(dest []driver.Value) error { case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, - fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON: + fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON, + fieldTypeVector: var isNull bool var n int dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go index 81fa6062cd..df98417b8d 100644 --- a/vendor/github.com/go-sql-driver/mysql/rows.go +++ b/vendor/github.com/go-sql-driver/mysql/rows.go @@ -111,13 +111,6 @@ func (rows *mysqlRows) Close() (err error) { return err } - // flip the buffer for this connection if we need to drain it. - // note that for a successful query (i.e. one where rows.next() - // has been called until it returns false), `rows.mc` will be nil - // by the time the user calls `(*Rows).Close`, so we won't reach this - // see: https://github.com/golang/go/commit/651ddbdb5056ded455f47f9c494c67b389622a47 - mc.buf.flip() - // Remove unread packets from stream if !rows.rs.done { err = mc.readUntilEOF() diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go index 0436f2240d..35df854570 100644 --- a/vendor/github.com/go-sql-driver/mysql/statement.go +++ b/vendor/github.com/go-sql-driver/mysql/statement.go @@ -24,11 +24,12 @@ type mysqlStmt struct { func (stmt *mysqlStmt) Close() error { if stmt.mc == nil || stmt.mc.closed.Load() { - // driver.Stmt.Close can be called more than once, thus this function - // has to be idempotent. - // See also Issue #450 and golang/go#16019. - //errLog.Print(ErrInvalidConn) - return driver.ErrBadConn + // driver.Stmt.Close could be called more than once, thus this function + // had to be idempotent. See also Issue #450 and golang/go#16019. + // This bug has been fixed in Go 1.8. + // https://github.com/golang/go/commit/90b8a0ca2d0b565c7c7199ffcf77b15ea6b6db3a + // But we keep this function idempotent because it is safer. + return nil } err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id) @@ -51,7 +52,6 @@ func (stmt *mysqlStmt) CheckNamedValue(nv *driver.NamedValue) (err error) { func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) { if stmt.mc.closed.Load() { - stmt.mc.log(ErrInvalidConn) return nil, driver.ErrBadConn } // Send command @@ -95,7 +95,6 @@ func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) { func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) { if stmt.mc.closed.Load() { - stmt.mc.log(ErrInvalidConn) return nil, driver.ErrBadConn } // Send command diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go index cda24fe744..8716c26c52 100644 --- a/vendor/github.com/go-sql-driver/mysql/utils.go +++ b/vendor/github.com/go-sql-driver/mysql/utils.go @@ -490,17 +490,16 @@ func formatBinaryTime(src []byte, length uint8) (driver.Value, error) { * Convert from and to bytes * ******************************************************************************/ -func uint64ToBytes(n uint64) []byte { - return []byte{ - byte(n), - byte(n >> 8), - byte(n >> 16), - byte(n >> 24), - byte(n >> 32), - byte(n >> 40), - byte(n >> 48), - byte(n >> 56), - } +// 24bit integer: used for packet headers. + +func putUint24(data []byte, n int) { + data[2] = byte(n >> 16) + data[1] = byte(n >> 8) + data[0] = byte(n) +} + +func getUint24(data []byte) int { + return int(data[2])<<16 | int(data[1])<<8 | int(data[0]) } func uint64ToString(n uint64) []byte { @@ -525,16 +524,6 @@ func uint64ToString(n uint64) []byte { return a[i:] } -// treats string value as unsigned integer representation -func stringToInt(b []byte) int { - val := 0 - for i := range b { - val *= 10 - val += int(b[i] - 0x30) - } - return val -} - // returns the string read as a bytes slice, whether the value is NULL, // the number of bytes read and an error, in case the string is longer than // the input slice @@ -586,18 +575,15 @@ func readLengthEncodedInteger(b []byte) (uint64, bool, int) { // 252: value of following 2 case 0xfc: - return uint64(b[1]) | uint64(b[2])<<8, false, 3 + return uint64(binary.LittleEndian.Uint16(b[1:])), false, 3 // 253: value of following 3 case 0xfd: - return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4 + return uint64(getUint24(b[1:])), false, 4 // 254: value of following 8 case 0xfe: - return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 | - uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 | - uint64(b[7])<<48 | uint64(b[8])<<56, - false, 9 + return uint64(binary.LittleEndian.Uint64(b[1:])), false, 9 } // 0-250: value of first byte @@ -611,13 +597,14 @@ func appendLengthEncodedInteger(b []byte, n uint64) []byte { return append(b, byte(n)) case n <= 0xffff: - return append(b, 0xfc, byte(n), byte(n>>8)) + b = append(b, 0xfc) + return binary.LittleEndian.AppendUint16(b, uint16(n)) case n <= 0xffffff: return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16)) } - return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24), - byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56)) + b = append(b, 0xfe) + return binary.LittleEndian.AppendUint64(b, n) } func appendLengthEncodedString(b []byte, s string) []byte { diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index de264c85a5..244ee19c4b 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -14,8 +14,34 @@ This package provides various compression algorithms. [![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) [![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) +# package usage + +Use `go get github.com/klauspost/compress@latest` to add it to your project. + +This package will support the current Go version and 2 versions back. + +* Use the `nounsafe` tag to disable all use of the "unsafe" package. +* Use the `noasm` tag to disable all assembly across packages. + +Use the links above for more information on each. + # changelog +* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0) + * Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036 + * fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028 + * flate: Simplify L4-6 loading https://github.com/klauspost/compress/pull/1043 + * flate: Simplify matchlen (remove asm) https://github.com/klauspost/compress/pull/1045 + * s2: Improve small block compression speed w/o asm https://github.com/klauspost/compress/pull/1048 + * flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049 + * flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050 + +* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11) + * zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017 + * s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014 + * gzhttp: No content-type on no body response code by @juliens in https://github.com/klauspost/compress/pull/1011 + * gzhttp: Do not set the content-type when response has no body by @kevinpollet in https://github.com/klauspost/compress/pull/1013 + * Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10) * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978 * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002 @@ -65,9 +91,9 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 * Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) - * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871 + * s2: Fix S2 "best" dictionary wrong encoding https://github.com/klauspost/compress/pull/871 * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 - * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867 + * s2: Fix EstimateBlockSize on 6&7 length input https://github.com/klauspost/compress/pull/867 * Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 @@ -124,7 +150,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp See changes to v1.15.x * Jan 21st, 2023 (v1.15.15) - * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 + * deflate: Improve level 7-9 https://github.com/klauspost/compress/pull/739 * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 @@ -167,7 +193,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 - * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 + * zstd: Allow single segments up to "max decoded size" https://github.com/klauspost/compress/pull/643 * July 13, 2022 (v1.15.8) @@ -209,7 +235,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 - * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + * flate: Inplace hashing for level 7-9 https://github.com/klauspost/compress/pull/590 * May 11, 2022 (v1.15.4) @@ -236,12 +262,12 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) * Mar 3, 2022 (v1.15.0) - * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) - * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * zstd: Refactor decoder [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines [#505](https://github.com/klauspost/compress/pull/505) * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) - * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) - * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) - * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + * flate: Inline literal emission [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional [#510](https://github.com/klauspost/compress/pull/510) Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. @@ -258,7 +284,7 @@ While the release has been extensively tested, it is recommended to testing when * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 - * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + * huff0: Use static decompression buffer up to 30% faster [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) * Feb 17, 2022 (v1.14.3) * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) @@ -565,12 +591,14 @@ While the release has been extensively tested, it is recommended to testing when The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: -| old import | new import | Documentation -|--------------------|-----------------------------------------|--------------------| -| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) -| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) -| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) -| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) +Typical speed is about 2x of the standard library packages. + +| old import | new import | Documentation | +|------------------|---------------------------------------|-------------------------------------------------------------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) | +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) | +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) | +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) | * Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). @@ -625,84 +653,6 @@ This will only use up to 4KB in memory when the writer is idle. Compression is almost always worse than the fastest compression level and each write will allocate (a little) memory. -# Performance Update 2018 - -It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. - -The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. - -The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. - -The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). - - -## Overall differences. - -There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. - -The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. - -This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. - -There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. - -## Web Content - -This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. - -Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. - -Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. - -## Object files - -This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. - -The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. - -The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. - -## Highly Compressible File - -This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. - -It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. - -So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". - -## Medium-High Compressible - -This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. - -We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. - -## Medium Compressible - -I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. - -The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. - - -## Un-compressible Content - -This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. - - -## Huffman only compression - -This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. - -This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). - -Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. - -The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). - -The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. - -For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). - -This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. # Other packages diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index c8124b5c49..0e8b1630c0 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -6,8 +6,10 @@ package flate import ( - "encoding/binary" "fmt" + "math/bits" + + "github.com/klauspost/compress/internal/le" ) type fastEnc interface { @@ -58,11 +60,11 @@ const ( ) func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[i:]) + return le.Load32(b, i) } func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[i:]) + return le.Load64(b, i) } type tableEntry struct { @@ -134,8 +136,8 @@ func hashLen(u uint64, length, mls uint8) uint32 { // matchlen will return the match length between offsets and t in src. // The maximum length returned is maxMatchLength - 4. // It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastGen) matchlen(s, t int32, src []byte) int32 { - if debugDecode { +func (e *fastGen) matchlen(s, t int, src []byte) int32 { + if debugDeflate { if t >= s { panic(fmt.Sprint("t >=s:", t, s)) } @@ -149,18 +151,34 @@ func (e *fastGen) matchlen(s, t int32, src []byte) int32 { panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) } } - s1 := int(s) + maxMatchLength - 4 - if s1 > len(src) { - s1 = len(src) + s1 := min(s+maxMatchLength-4, len(src)) + left := s1 - s + n := int32(0) + for left >= 8 { + diff := le.Load64(src, s) ^ le.Load64(src, t) + if diff != 0 { + return n + int32(bits.TrailingZeros64(diff)>>3) + } + s += 8 + t += 8 + n += 8 + left -= 8 } - // Extend the match to be as long as possible. - return int32(matchLen(src[s:s1], src[t:])) + a := src[s:s1] + b := src[t:] + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n } // matchlenLong will return the match length between offsets and t in src. // It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { +func (e *fastGen) matchlenLong(s, t int, src []byte) int32 { if debugDeflate { if t >= s { panic(fmt.Sprint("t >=s:", t, s)) @@ -176,7 +194,28 @@ func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { } } // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) + left := len(src) - s + n := int32(0) + for left >= 8 { + diff := le.Load64(src, s) ^ le.Load64(src, t) + if diff != 0 { + return n + int32(bits.TrailingZeros64(diff)>>3) + } + s += 8 + t += 8 + n += 8 + left -= 8 + } + + a := src[s:] + b := src[t:] + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n } // Reset the encoding table. diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index f70594c34e..afdc8c053a 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -5,10 +5,11 @@ package flate import ( - "encoding/binary" "fmt" "io" "math" + + "github.com/klauspost/compress/internal/le" ) const ( @@ -438,7 +439,7 @@ func (w *huffmanBitWriter) writeOutBits() { n := w.nbytes // We over-write, but faster... - binary.LittleEndian.PutUint64(w.bytes[n:], bits) + le.Store64(w.bytes[n:], bits) n += 6 if n >= bufferFlushSize { @@ -854,7 +855,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= c.code64() << (nbits & 63) nbits += c.len() if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits bits >>= 48 nbits -= 48 @@ -882,7 +883,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= c.code64() << (nbits & 63) nbits += c.len() if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits bits >>= 48 nbits -= 48 @@ -905,7 +906,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= uint64(extraLength) << (nbits & 63) nbits += extraLengthBits if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits bits >>= 48 nbits -= 48 @@ -931,7 +932,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= c.code64() << (nbits & 63) nbits += c.len() if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits bits >>= 48 nbits -= 48 @@ -953,7 +954,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) nbits += uint8(offsetComb) if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits bits >>= 48 nbits -= 48 @@ -1107,7 +1108,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // We must have at least 48 bits free. if nbits >= 8 { n := nbits >> 3 - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[nbytes:], bits) bits >>= (n * 8) & 63 nbits -= n * 8 nbytes += n @@ -1136,7 +1137,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // Remaining... for _, t := range input { if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits bits >>= 48 nbits -= 48 diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go index 703b9a89aa..c3581a3420 100644 --- a/vendor/github.com/klauspost/compress/flate/level1.go +++ b/vendor/github.com/klauspost/compress/flate/level1.go @@ -1,9 +1,9 @@ package flate import ( - "encoding/binary" "fmt" - "math/bits" + + "github.com/klauspost/compress/internal/le" ) // fastGen maintains the table for matches, @@ -77,6 +77,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { nextS := s var candidate tableEntry + var t int32 for { nextHash := hashLen(cv, tableBits, hashBytes) candidate = e.table[nextHash] @@ -88,9 +89,8 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { now := load6432(src, nextS) e.table[nextHash] = tableEntry{offset: s + e.cur} nextHash = hashLen(now, tableBits, hashBytes) - - offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + t = candidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { e.table[nextHash] = tableEntry{offset: nextS + e.cur} break } @@ -103,8 +103,8 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { now >>= 8 e.table[nextHash] = tableEntry{offset: s + e.cur} - offset = s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + t = candidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { e.table[nextHash] = tableEntry{offset: nextS + e.cur} break } @@ -120,36 +120,10 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { // literal bytes prior to s. // Extend the 4-byte match as long as possible. - t := candidate.offset - e.cur - var l = int32(4) - if false { - l = e.matchlenLong(s+4, t+4, src) + 4 - } else { - // inlined: - a := src[s+4:] - b := src[t+4:] - for len(a) >= 8 { - if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { - l += int32(bits.TrailingZeros64(diff) >> 3) - break - } - l += 8 - a = a[8:] - b = b[8:] - } - if len(a) < 8 { - b = b[:len(a)] - for i := range a { - if a[i] != b[i] { - break - } - l++ - } - } - } + l := e.matchlenLong(int(s+4), int(t+4), src) + 4 // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + for t > 0 && s > nextEmit && le.Load8(src, t-1) == le.Load8(src, s-1) { s-- t-- l++ @@ -221,8 +195,8 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { candidate = e.table[currHash] e.table[currHash] = tableEntry{offset: o + 2} - offset := s - (candidate.offset - e.cur) - if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) { + t = candidate.offset - e.cur + if s-t > maxMatchOffset || uint32(x) != load3232(src, t) { cv = x >> 8 s++ break diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go index 876dfbe305..c8d047f2d9 100644 --- a/vendor/github.com/klauspost/compress/flate/level2.go +++ b/vendor/github.com/klauspost/compress/flate/level2.go @@ -126,7 +126,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { // Extend the 4-byte match as long as possible. t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 + l := e.matchlenLong(int(s+4), int(t+4), src) + 4 // Extend backwards for t > 0 && s > nextEmit && src[t-1] == src[s-1] { diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go index 7aa2b72a12..33f9fb1525 100644 --- a/vendor/github.com/klauspost/compress/flate/level3.go +++ b/vendor/github.com/klauspost/compress/flate/level3.go @@ -135,7 +135,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { // Extend the 4-byte match as long as possible. // t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 + l := e.matchlenLong(int(s+4), int(t+4), src) + 4 // Extend backwards for t > 0 && s > nextEmit && src[t-1] == src[s-1] { diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go index 23c08b325c..88509e1973 100644 --- a/vendor/github.com/klauspost/compress/flate/level4.go +++ b/vendor/github.com/klauspost/compress/flate/level4.go @@ -98,19 +98,19 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { e.bTable[nextHashL] = entry t = lCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // We got a long match. Use that. break } t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // Found a 4 match... lCandidate = e.bTable[hash7(next, tableBits)] // If the next long is a candidate, check if we should use that instead... - lOff := nextS - (lCandidate.offset - e.cur) - if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) { + lOff := lCandidate.offset - e.cur + if nextS-lOff < maxMatchOffset && load3232(src, lOff) == uint32(next) { l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) if l2 > l1 { s = nextS @@ -127,7 +127,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { // them as literal bytes. // Extend the 4-byte match as long as possible. - l := e.matchlenLong(s+4, t+4, src) + 4 + l := e.matchlenLong(int(s+4), int(t+4), src) + 4 // Extend backwards for t > 0 && s > nextEmit && src[t-1] == src[s-1] { diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go index 1f61ec1829..6e5c21502f 100644 --- a/vendor/github.com/klauspost/compress/flate/level5.go +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -111,16 +111,16 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { t = lCandidate.Cur.offset - e.cur if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + if uint32(cv) == load3232(src, t) { // Store the next match e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - l = e.matchlen(s+4, t+4, src) + 4 - ml1 := e.matchlen(s+4, t2+4, src) + 4 + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) { + l = e.matchlen(int(s+4), int(t+4), src) + 4 + ml1 := e.matchlen(int(s+4), int(t2+4), src) + 4 if ml1 > l { t = t2 l = ml1 @@ -130,7 +130,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { break } t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // Store the next match e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] @@ -140,9 +140,9 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { } t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // Found a 4 match... - l = e.matchlen(s+4, t+4, src) + 4 + l = e.matchlen(int(s+4), int(t+4), src) + 4 lCandidate = e.bTable[nextHashL] // Store the next match @@ -153,8 +153,8 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { // If the next long is a candidate, use that... t2 := lCandidate.Cur.offset - e.cur if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 + if load3232(src, t2) == uint32(next) { + ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4 if ml > l { t = t2 s = nextS @@ -164,8 +164,8 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { } // If the previous long is a candidate, use that... t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 + if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) { + ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4 if ml > l { t = t2 s = nextS @@ -185,9 +185,9 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { if l == 0 { // Extend the 4-byte match as long as possible. - l = e.matchlenLong(s+4, t+4, src) + 4 + l = e.matchlenLong(int(s+4), int(t+4), src) + 4 } else if l == maxMatchLength { - l += e.matchlenLong(s+l, t+l, src) + l += e.matchlenLong(int(s+l), int(t+l), src) } // Try to locate a better match by checking the end of best match... @@ -203,7 +203,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { s2 := s + skipBeginning off := s2 - t2 if t2 >= 0 && off < maxMatchOffset && off > 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { + if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l { t = t2 l = l2 s = s2 @@ -423,14 +423,14 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { t = lCandidate.Cur.offset - e.cur if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + if uint32(cv) == load3232(src, t) { // Store the next match e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) { l = e.matchlen(s+4, t+4, src) + 4 ml1 := e.matchlen(s+4, t2+4, src) + 4 if ml1 > l { @@ -442,7 +442,7 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { break } t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // Store the next match e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] @@ -452,7 +452,7 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { } t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // Found a 4 match... l = e.matchlen(s+4, t+4, src) + 4 lCandidate = e.bTable[nextHashL] @@ -465,7 +465,7 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { // If the next long is a candidate, use that... t2 := lCandidate.Cur.offset - e.cur if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { + if load3232(src, t2) == uint32(next) { ml := e.matchlen(nextS+4, t2+4, src) + 4 if ml > l { t = t2 @@ -476,7 +476,7 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { } // If the previous long is a candidate, use that... t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { + if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) { ml := e.matchlen(nextS+4, t2+4, src) + 4 if ml > l { t = t2 diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go index f1e9d98fa5..96f5bb430a 100644 --- a/vendor/github.com/klauspost/compress/flate/level6.go +++ b/vendor/github.com/klauspost/compress/flate/level6.go @@ -113,7 +113,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { t = lCandidate.Cur.offset - e.cur if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + if uint32(cv) == load3232(src, t) { // Long candidate matches at least 4 bytes. // Store the next match @@ -123,9 +123,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { // Check the previous long candidate as well. t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - l = e.matchlen(s+4, t+4, src) + 4 - ml1 := e.matchlen(s+4, t2+4, src) + 4 + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) { + l = e.matchlen(int(s+4), int(t+4), src) + 4 + ml1 := e.matchlen(int(s+4), int(t2+4), src) + 4 if ml1 > l { t = t2 l = ml1 @@ -136,7 +136,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { } // Current value did not match, but check if previous long value does. t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // Store the next match e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] @@ -146,9 +146,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { } t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // Found a 4 match... - l = e.matchlen(s+4, t+4, src) + 4 + l = e.matchlen(int(s+4), int(t+4), src) + 4 // Look up next long candidate (at nextS) lCandidate = e.bTable[nextHashL] @@ -162,7 +162,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { const repOff = 1 t2 := s - repeat + repOff if load3232(src, t2) == uint32(cv>>(8*repOff)) { - ml := e.matchlen(s+4+repOff, t2+4, src) + 4 + ml := e.matchlen(int(s+4+repOff), int(t2+4), src) + 4 if ml > l { t = t2 l = ml @@ -175,8 +175,8 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { // If the next long is a candidate, use that... t2 = lCandidate.Cur.offset - e.cur if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 + if load3232(src, t2) == uint32(next) { + ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4 if ml > l { t = t2 s = nextS @@ -186,8 +186,8 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { } // If the previous long is a candidate, use that... t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 + if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) { + ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4 if ml > l { t = t2 s = nextS @@ -207,9 +207,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { // Extend the 4-byte match as long as possible. if l == 0 { - l = e.matchlenLong(s+4, t+4, src) + 4 + l = e.matchlenLong(int(s+4), int(t+4), src) + 4 } else if l == maxMatchLength { - l += e.matchlenLong(s+l, t+l, src) + l += e.matchlenLong(int(s+l), int(t+l), src) } // Try to locate a better match by checking the end-of-match... @@ -227,7 +227,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { off := s2 - t2 if off < maxMatchOffset { if off > 0 && t2 >= 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { + if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l { t = t2 l = l2 s = s2 @@ -237,7 +237,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { t2 = eLong.Prev.offset - e.cur - l + skipBeginning off := s2 - t2 if off > 0 && off < maxMatchOffset && t2 >= 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { + if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l { t = t2 l = l2 s = s2 diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go deleted file mode 100644 index 4bd3885841..0000000000 --- a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package flate - -// matchLen returns how many bytes match in a and b -// -// It assumes that: -// -// len(a) <= len(b) and len(a) > 0 -// -//go:noescape -func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s deleted file mode 100644 index 0782b86e3d..0000000000 --- a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s +++ /dev/null @@ -1,66 +0,0 @@ -// Copied from S2 implementation. - -//go:build !appengine && !noasm && gc && !noasm - -#include "textflag.h" - -// func matchLen(a []byte, b []byte) int -TEXT ·matchLen(SB), NOSPLIT, $0-56 - MOVQ a_base+0(FP), AX - MOVQ b_base+24(FP), CX - MOVQ a_len+8(FP), DX - - // matchLen - XORL SI, SI - CMPL DX, $0x08 - JB matchlen_match4_standalone - -matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - JZ matchlen_loop_standalone - -#ifdef GOAMD64_v3 - TZCNTQ BX, BX -#else - BSFQ BX, BX -#endif - SHRL $0x03, BX - LEAL (SI)(BX*1), SI - JMP gen_match_len_end - -matchlen_loop_standalone: - LEAL -8(DX), DX - LEAL 8(SI), SI - CMPL DX, $0x08 - JAE matchlen_loopback_standalone - -matchlen_match4_standalone: - CMPL DX, $0x04 - JB matchlen_match2_standalone - MOVL (AX)(SI*1), BX - CMPL (CX)(SI*1), BX - JNE matchlen_match2_standalone - LEAL -4(DX), DX - LEAL 4(SI), SI - -matchlen_match2_standalone: - CMPL DX, $0x02 - JB matchlen_match1_standalone - MOVW (AX)(SI*1), BX - CMPW (CX)(SI*1), BX - JNE matchlen_match1_standalone - LEAL -2(DX), DX - LEAL 2(SI), SI - -matchlen_match1_standalone: - CMPL DX, $0x01 - JB gen_match_len_end - MOVB (AX)(SI*1), BL - CMPB (CX)(SI*1), BL - JNE gen_match_len_end - INCL SI - -gen_match_len_end: - MOVQ SI, ret+48(FP) - RET diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_generic.go b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go index ad5cd814b9..6149384aaf 100644 --- a/vendor/github.com/klauspost/compress/flate/matchlen_generic.go +++ b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go @@ -1,27 +1,29 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - // Copyright 2019+ Klaus Post. All rights reserved. // License information can be found in the LICENSE file. package flate import ( - "encoding/binary" "math/bits" + + "github.com/klauspost/compress/internal/le" ) // matchLen returns the maximum common prefix length of a and b. // a must be the shortest of the two. func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + left := len(a) + for left >= 8 { + diff := le.Load64(a, n) ^ le.Load64(b, n) if diff != 0 { return n + bits.TrailingZeros64(diff)>>3 } n += 8 + left -= 8 } + a = a[n:] + b = b[n:] for i := range a { if a[i] != b[i] { break @@ -29,5 +31,4 @@ func matchLen(a, b []byte) (n int) { n++ } return n - } diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go index f3d4139ef3..13b9b100db 100644 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -4,6 +4,8 @@ import ( "io" "math" "sync" + + "github.com/klauspost/compress/internal/le" ) const ( @@ -152,18 +154,11 @@ func hashSL(u uint32) uint32 { } func load3216(b []byte, i int16) uint32 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:4] - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + return le.Load32(b, i) } func load6416(b []byte, i int16) uint64 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:8] - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + return le.Load64(b, i) } func statelessEnc(dst *tokens, src []byte, startAt int16) { diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index e36d9742f9..bfc7a523de 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -6,10 +6,11 @@ package huff0 import ( - "encoding/binary" "errors" "fmt" "io" + + "github.com/klauspost/compress/internal/le" ) // bitReader reads a bitstream in reverse. @@ -46,7 +47,7 @@ func (b *bitReaderBytes) init(in []byte) error { return nil } -// peekBitsFast requires that at least one bit is requested every time. +// peekByteFast requires that at least one byte is requested every time. // There are no checks if the buffer is filled. func (b *bitReaderBytes) peekByteFast() uint8 { got := uint8(b.value >> 56) @@ -66,8 +67,7 @@ func (b *bitReaderBytes) fillFast() { } // 2 bounds checks. - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + low := le.Load32(b.in, b.off-4) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 b.off -= 4 @@ -76,7 +76,7 @@ func (b *bitReaderBytes) fillFast() { // fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. func (b *bitReaderBytes) fillFastStart() { // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.value = le.Load64(b.in, b.off-8) b.bitsRead = 0 b.off -= 8 } @@ -86,9 +86,8 @@ func (b *bitReaderBytes) fill() { if b.bitsRead < 32 { return } - if b.off > 4 { - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + if b.off >= 4 { + low := le.Load32(b.in, b.off-4) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 b.off -= 4 @@ -175,9 +174,7 @@ func (b *bitReaderShifted) fillFast() { return } - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + low := le.Load32(b.in, b.off-4) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 b.off -= 4 @@ -185,8 +182,7 @@ func (b *bitReaderShifted) fillFast() { // fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. func (b *bitReaderShifted) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.value = le.Load64(b.in, b.off-8) b.bitsRead = 0 b.off -= 8 } @@ -197,8 +193,7 @@ func (b *bitReaderShifted) fill() { return } if b.off > 4 { - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + low := le.Load32(b.in, b.off-4) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 b.off -= 4 diff --git a/vendor/github.com/klauspost/compress/internal/le/le.go b/vendor/github.com/klauspost/compress/internal/le/le.go new file mode 100644 index 0000000000..e54909e16f --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/le/le.go @@ -0,0 +1,5 @@ +package le + +type Indexer interface { + int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64 +} diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go new file mode 100644 index 0000000000..0cfb5c0e27 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go @@ -0,0 +1,42 @@ +//go:build !(amd64 || arm64 || ppc64le || riscv64) || nounsafe || purego || appengine + +package le + +import ( + "encoding/binary" +) + +// Load8 will load from b at index i. +func Load8[I Indexer](b []byte, i I) byte { + return b[i] +} + +// Load16 will load from b at index i. +func Load16[I Indexer](b []byte, i I) uint16 { + return binary.LittleEndian.Uint16(b[i:]) +} + +// Load32 will load from b at index i. +func Load32[I Indexer](b []byte, i I) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +// Load64 will load from b at index i. +func Load64[I Indexer](b []byte, i I) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +// Store16 will store v at b. +func Store16(b []byte, v uint16) { + binary.LittleEndian.PutUint16(b, v) +} + +// Store32 will store v at b. +func Store32(b []byte, v uint32) { + binary.LittleEndian.PutUint32(b, v) +} + +// Store64 will store v at b. +func Store64(b []byte, v uint64) { + binary.LittleEndian.PutUint64(b, v) +} diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go new file mode 100644 index 0000000000..ada45cd909 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go @@ -0,0 +1,55 @@ +// We enable 64 bit LE platforms: + +//go:build (amd64 || arm64 || ppc64le || riscv64) && !nounsafe && !purego && !appengine + +package le + +import ( + "unsafe" +) + +// Load8 will load from b at index i. +func Load8[I Indexer](b []byte, i I) byte { + //return binary.LittleEndian.Uint16(b[i:]) + //return *(*uint16)(unsafe.Pointer(&b[i])) + return *(*byte)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Load16 will load from b at index i. +func Load16[I Indexer](b []byte, i I) uint16 { + //return binary.LittleEndian.Uint16(b[i:]) + //return *(*uint16)(unsafe.Pointer(&b[i])) + return *(*uint16)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Load32 will load from b at index i. +func Load32[I Indexer](b []byte, i I) uint32 { + //return binary.LittleEndian.Uint32(b[i:]) + //return *(*uint32)(unsafe.Pointer(&b[i])) + return *(*uint32)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Load64 will load from b at index i. +func Load64[I Indexer](b []byte, i I) uint64 { + //return binary.LittleEndian.Uint64(b[i:]) + //return *(*uint64)(unsafe.Pointer(&b[i])) + return *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Store16 will store v at b. +func Store16(b []byte, v uint16) { + //binary.LittleEndian.PutUint16(b, v) + *(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v +} + +// Store32 will store v at b. +func Store32(b []byte, v uint32) { + //binary.LittleEndian.PutUint32(b, v) + *(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v +} + +// Store64 will store v at b. +func Store64(b []byte, v uint64) { + //binary.LittleEndian.PutUint64(b, v) + *(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v +} diff --git a/vendor/github.com/klauspost/compress/s2/README.md b/vendor/github.com/klauspost/compress/s2/README.md index 8284bb0810..1d9220cbf5 100644 --- a/vendor/github.com/klauspost/compress/s2/README.md +++ b/vendor/github.com/klauspost/compress/s2/README.md @@ -79,7 +79,7 @@ This will take ownership of the buffer until the stream is closed. func EncodeStream(src []byte, dst io.Writer) error { enc := s2.NewWriter(dst) // The encoder owns the buffer until Flush or Close is called. - err := enc.EncodeBuffer(buf) + err := enc.EncodeBuffer(src) if err != nil { enc.Close() return err diff --git a/vendor/github.com/klauspost/compress/s2/decode_other.go b/vendor/github.com/klauspost/compress/s2/decode_other.go index 2cb55c2c77..c99d40b69d 100644 --- a/vendor/github.com/klauspost/compress/s2/decode_other.go +++ b/vendor/github.com/klauspost/compress/s2/decode_other.go @@ -11,6 +11,8 @@ package s2 import ( "fmt" "strconv" + + "github.com/klauspost/compress/internal/le" ) // decode writes the decoding of src to dst. It assumes that the varint-encoded @@ -38,21 +40,18 @@ func s2Decode(dst, src []byte) int { case x < 60: s++ case x == 60: + x = uint32(src[s+1]) s += 2 - x = uint32(src[s-1]) case x == 61: - in := src[s : s+3] - x = uint32(in[1]) | uint32(in[2])<<8 + x = uint32(le.Load16(src, s+1)) s += 3 case x == 62: - in := src[s : s+4] // Load as 32 bit and shift down. - x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + x = le.Load32(src, s) x >>= 8 s += 4 case x == 63: - in := src[s : s+5] - x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24 + x = le.Load32(src, s+1) s += 5 } length = int(x) + 1 @@ -85,8 +84,7 @@ func s2Decode(dst, src []byte) int { length = int(src[s]) + 4 s += 1 case 6: - in := src[s : s+2] - length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8) + length = int(le.Load16(src, s)) + 1<<8 s += 2 case 7: in := src[s : s+3] @@ -99,15 +97,13 @@ func s2Decode(dst, src []byte) int { } length += 4 case tagCopy2: - in := src[s : s+3] - offset = int(uint32(in[1]) | uint32(in[2])<<8) - length = 1 + int(in[0])>>2 + offset = int(le.Load16(src, s+1)) + length = 1 + int(src[s])>>2 s += 3 case tagCopy4: - in := src[s : s+5] - offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24) - length = 1 + int(in[0])>>2 + offset = int(le.Load32(src, s+1)) + length = 1 + int(src[s])>>2 s += 5 } diff --git a/vendor/github.com/klauspost/compress/s2/encode_all.go b/vendor/github.com/klauspost/compress/s2/encode_all.go index 9977045696..a473b64529 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_all.go +++ b/vendor/github.com/klauspost/compress/s2/encode_all.go @@ -10,14 +10,16 @@ import ( "encoding/binary" "fmt" "math/bits" + + "github.com/klauspost/compress/internal/le" ) func load32(b []byte, i int) uint32 { - return binary.LittleEndian.Uint32(b[i:]) + return le.Load32(b, i) } func load64(b []byte, i int) uint64 { - return binary.LittleEndian.Uint64(b[i:]) + return le.Load64(b, i) } // hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. @@ -44,7 +46,12 @@ func encodeGo(dst, src []byte) []byte { d += emitLiteral(dst[d:], src) return dst[:d] } - n := encodeBlockGo(dst[d:], src) + var n int + if len(src) < 64<<10 { + n = encodeBlockGo64K(dst[d:], src) + } else { + n = encodeBlockGo(dst[d:], src) + } if n > 0 { d += n return dst[:d] @@ -70,7 +77,6 @@ func encodeBlockGo(dst, src []byte) (d int) { debug = false ) - var table [maxTableSize]uint32 // sLimit is when to stop looking for offset/length copies. The inputMargin @@ -277,13 +283,228 @@ emitRemainder: return d } +// encodeBlockGo64K is a specialized version for compressing blocks <= 64KB +func encodeBlockGo64K(dst, src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + + debug = false + ) + + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>5 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint16(s) + table[hash1] = uint16(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + + // Bail if we exceed the maximum size. + if d+(base-nextEmit) > dstLimit { + return 0 + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], repeat, s-base) + } + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint16(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint16(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopy(dst[d:], repeat, s-base) + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint16(s - 2) + table[currHash] = uint16(s) + if debug && s == candidate { + panic("s == candidate") + } + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + func encodeBlockSnappyGo(dst, src []byte) (d int) { // Initialize the hash table. const ( tableBits = 14 maxTableSize = 1 << tableBits ) - var table [maxTableSize]uint32 // sLimit is when to stop looking for offset/length copies. The inputMargin @@ -467,6 +688,197 @@ emitRemainder: return d } +// encodeBlockSnappyGo64K is a special version of encodeBlockSnappyGo for sizes <64KB +func encodeBlockSnappyGo64K(dst, src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>5 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint16(s) + table[hash1] = uint16(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + // Bail if we exceed the maximum size. + if d+(base-nextEmit) > dstLimit { + return 0 + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeat(dst[d:], repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint16(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint16(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeat(dst[d:], repeat, s-base) + if false { + // Validate match. + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint16(s - 2) + table[currHash] = uint16(s) + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + // encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It // assumes that the varint-encoded length of the decompressed bytes has already // been written. diff --git a/vendor/github.com/klauspost/compress/s2/encode_better.go b/vendor/github.com/klauspost/compress/s2/encode_better.go index 544cb1e17b..90ebf89c20 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_better.go +++ b/vendor/github.com/klauspost/compress/s2/encode_better.go @@ -348,12 +348,7 @@ func encodeBlockBetterSnappyGo(dst, src []byte) (d int) { nextS := 0 for { // Next src position to check - nextS = (s-nextEmit)>>7 + 1 - if nextS > maxSkip { - nextS = s + maxSkip - } else { - nextS += s - } + nextS = min(s+(s-nextEmit)>>7+1, s+maxSkip) if nextS > sLimit { goto emitRemainder @@ -483,6 +478,415 @@ emitRemainder: return d } +func encodeBlockBetterGo64K(dst, src []byte) (d int) { + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + // Initialize the hash tables. + // Use smaller tables for smaller blocks + const ( + // Long hash matches. + lTableBits = 16 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 13 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint16 + var sTable [maxSTableSize]uint16 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We initialize repeat to 0, so we never match on first attempt + repeat := 0 + + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = s + (s-nextEmit)>>6 + 1 + if nextS > sLimit { + goto emitRemainder + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + lTable[hashL] = uint16(s) + sTable[hashS] = uint16(s) + + valLong := load64(src, candidateL) + valShort := load64(src, candidateS) + + // If long matches at least 8 bytes, use that. + if cv == valLong { + break + } + if cv == valShort { + candidateL = candidateS + break + } + + // Check repeat at offset checkRep. + const checkRep = 1 + // Minimum length of a repeat. Tested with various values. + // While 4-5 offers improvements in some, 6 reduces + // regressions significantly. + const wantRepeatBytes = 6 + const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep) + if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + wantRepeatBytes + checkRep + s += wantRepeatBytes + checkRep + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidate] { + s++ + candidate++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + // Index in-between + index0 := base + 1 + index1 := s - 2 + + for index0 < index1 { + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint16(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint16(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint16(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint16(index1 + 1) + index0 += 2 + index1 -= 2 + } + + cv = load64(src, s) + continue + } + + // Long likely matches 7, so take that. + if uint32(cv) == uint32(valLong) { + break + } + + // Check our short candidate + if uint32(cv) == uint32(valShort) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint16(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + break + } + // Use our short candidate. + candidateL = candidateS + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if repeat == offset { + d += emitRepeat(dst[d:], offset, s-base) + } else { + d += emitCopy(dst[d:], offset, s-base) + repeat = offset + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint16(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint16(index0 + 1) + + // lTable could be postponed, but very minor difference. + lTable[hash7(cv1, lTableBits)] = uint16(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint16(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // Index large values sparsely in between. + // We do two starting from different offsets for speed. + index2 := (index0 + index1 + 1) >> 1 + for index2 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint16(index0) + lTable[hash7(load64(src, index2), lTableBits)] = uint16(index2) + index0 += 2 + index2 += 2 + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockBetterSnappyGo encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterSnappyGo64K(dst, src []byte) (d int) { + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + // Initialize the hash tables. + // Use smaller tables for smaller blocks + const ( + // Long hash matches. + lTableBits = 15 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 13 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint16 + var sTable [maxSTableSize]uint16 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + const maxSkip = 100 + + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = min(s+(s-nextEmit)>>6+1, s+maxSkip) + + if nextS > sLimit { + goto emitRemainder + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + lTable[hashL] = uint16(s) + sTable[hashS] = uint16(s) + + if uint32(cv) == load32(src, candidateL) { + break + } + + // Check our short candidate + if uint32(cv) == load32(src, candidateS) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint16(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + break + } + // Use our short candidate. + candidateL = candidateS + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + d += emitCopyNoRepeat(dst[d:], offset, s-base) + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint16(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint16(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint16(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint16(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // Index large values sparsely in between. + // We do two starting from different offsets for speed. + index2 := (index0 + index1 + 1) >> 1 + for index2 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint16(index0) + lTable[hash7(load64(src, index2), lTableBits)] = uint16(index2) + index0 += 2 + index2 += 2 + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + // encodeBlockBetterDict encodes a non-empty src to a guaranteed-large-enough dst. It // assumes that the varint-encoded length of the decompressed bytes has already // been written. diff --git a/vendor/github.com/klauspost/compress/s2/encode_go.go b/vendor/github.com/klauspost/compress/s2/encode_go.go index dd1c973ca5..e25b78445d 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_go.go +++ b/vendor/github.com/klauspost/compress/s2/encode_go.go @@ -21,6 +21,9 @@ func encodeBlock(dst, src []byte) (d int) { if len(src) < minNonLiteralBlockSize { return 0 } + if len(src) <= 64<<10 { + return encodeBlockGo64K(dst, src) + } return encodeBlockGo(dst, src) } @@ -32,6 +35,9 @@ func encodeBlock(dst, src []byte) (d int) { // // len(dst) >= MaxEncodedLen(len(src)) func encodeBlockBetter(dst, src []byte) (d int) { + if len(src) <= 64<<10 { + return encodeBlockBetterGo64K(dst, src) + } return encodeBlockBetterGo(dst, src) } @@ -43,6 +49,9 @@ func encodeBlockBetter(dst, src []byte) (d int) { // // len(dst) >= MaxEncodedLen(len(src)) func encodeBlockBetterSnappy(dst, src []byte) (d int) { + if len(src) <= 64<<10 { + return encodeBlockBetterSnappyGo64K(dst, src) + } return encodeBlockBetterSnappyGo(dst, src) } @@ -57,6 +66,9 @@ func encodeBlockSnappy(dst, src []byte) (d int) { if len(src) < minNonLiteralBlockSize { return 0 } + if len(src) <= 64<<10 { + return encodeBlockSnappyGo64K(dst, src) + } return encodeBlockSnappyGo(dst, src) } diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod index 5a4412f907..81bda5e294 100644 --- a/vendor/github.com/klauspost/compress/s2sx.mod +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -1,4 +1,3 @@ module github.com/klauspost/compress -go 1.19 - +go 1.22 diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index 92e2347bbc..c11d7fa28e 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -6,7 +6,7 @@ A high performance compression algorithm is implemented. For now focused on spee This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. -This package is pure Go and without use of "unsafe". +This package is pure Go. Use `noasm` and `nounsafe` to disable relevant features. The `zstd` package is provided as open source software using a Go standard license. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go index 25ca983941..d41e3e1709 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -5,11 +5,12 @@ package zstd import ( - "encoding/binary" "errors" "fmt" "io" "math/bits" + + "github.com/klauspost/compress/internal/le" ) // bitReader reads a bitstream in reverse. @@ -18,6 +19,7 @@ import ( type bitReader struct { in []byte value uint64 // Maybe use [16]byte, but shifting is awkward. + cursor int // offset where next read should end bitsRead uint8 } @@ -32,6 +34,7 @@ func (b *bitReader) init(in []byte) error { if v == 0 { return errors.New("corrupt stream, did not find end of stream") } + b.cursor = len(in) b.bitsRead = 64 b.value = 0 if len(in) >= 8 { @@ -67,18 +70,15 @@ func (b *bitReader) fillFast() { if b.bitsRead < 32 { return } - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) + b.cursor -= 4 + b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor)) b.bitsRead -= 32 } // fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. func (b *bitReader) fillFastStart() { - v := b.in[len(b.in)-8:] - b.in = b.in[:len(b.in)-8] - b.value = binary.LittleEndian.Uint64(v) + b.cursor -= 8 + b.value = le.Load64(b.in, b.cursor) b.bitsRead = 0 } @@ -87,25 +87,23 @@ func (b *bitReader) fill() { if b.bitsRead < 32 { return } - if len(b.in) >= 4 { - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) + if b.cursor >= 4 { + b.cursor -= 4 + b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor)) b.bitsRead -= 32 return } - b.bitsRead -= uint8(8 * len(b.in)) - for len(b.in) > 0 { - b.value = (b.value << 8) | uint64(b.in[len(b.in)-1]) - b.in = b.in[:len(b.in)-1] + b.bitsRead -= uint8(8 * b.cursor) + for b.cursor > 0 { + b.cursor -= 1 + b.value = (b.value << 8) | uint64(b.in[b.cursor]) } } // finished returns true if all bits have been read from the bit stream. func (b *bitReader) finished() bool { - return len(b.in) == 0 && b.bitsRead >= 64 + return b.cursor == 0 && b.bitsRead >= 64 } // overread returns true if more bits have been requested than is on the stream. @@ -115,13 +113,14 @@ func (b *bitReader) overread() bool { // remain returns the number of bits remaining. func (b *bitReader) remain() uint { - return 8*uint(len(b.in)) + 64 - uint(b.bitsRead) + return 8*uint(b.cursor) + 64 - uint(b.bitsRead) } // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReader) close() error { // Release reference. b.in = nil + b.cursor = 0 if !b.finished() { return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) } diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 9c28840c3b..0dd742fd2a 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -5,14 +5,10 @@ package zstd import ( - "bytes" - "encoding/binary" "errors" "fmt" "hash/crc32" "io" - "os" - "path/filepath" "sync" "github.com/klauspost/compress/huff0" @@ -648,21 +644,6 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { println("initializing sequences:", err) return err } - // Extract blocks... - if false && hist.dict == nil { - fatalErr := func(err error) { - if err != nil { - panic(err) - } - } - fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) - var buf bytes.Buffer - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) - buf.Write(in) - os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) - } return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 32a7f401d5..fd35ea1480 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -9,6 +9,7 @@ import ( "fmt" "math" "math/bits" + "slices" "github.com/klauspost/compress/huff0" ) @@ -457,16 +458,7 @@ func fuzzFseEncoder(data []byte) int { // All 0 return 0 } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - cnt := maxCount(hist[:maxSym]) + cnt := int(slices.Max(hist[:maxSym])) if cnt == len(data) { // RLE return 0 @@ -884,15 +876,6 @@ func (b *blockEnc) genCodes() { } } } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } if debugAsserts && mlMax > maxMatchLengthSymbol { panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) } @@ -903,7 +886,7 @@ func (b *blockEnc) genCodes() { panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) } - b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) - b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) - b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) + b.coders.mlEnc.HistogramFinished(mlMax, int(slices.Max(mlH[:mlMax+1]))) + b.coders.ofEnc.HistogramFinished(ofMax, int(slices.Max(ofH[:ofMax+1]))) + b.coders.llEnc.HistogramFinished(llMax, int(slices.Max(llH[:llMax+1]))) } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index bbca17234a..ea2a19376c 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -123,7 +123,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { } // Read bytes from the decompressed stream into p. -// Returns the number of bytes written and any error that occurred. +// Returns the number of bytes read and any error that occurred. // When the stream is done, io.EOF will be returned. func (d *Decoder) Read(p []byte) (int, error) { var n int @@ -323,6 +323,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { frame.bBuf = nil if frame.history.decoders.br != nil { frame.history.decoders.br.in = nil + frame.history.decoders.br.cursor = 0 } d.decoders <- block }() diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index 5ca46038ad..7d250c67f5 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -116,7 +116,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 { panic(err) } if t < 0 { - err := fmt.Sprintf("s (%d) < 0", s) + err := fmt.Sprintf("t (%d) < 0", t) panic(err) } if s-t > e.maxMatchOff { diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go index 57b9c31c02..bea1779e97 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -7,20 +7,25 @@ package zstd import ( - "encoding/binary" "math/bits" + + "github.com/klauspost/compress/internal/le" ) // matchLen returns the maximum common prefix length of a and b. // a must be the shortest of the two. func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + left := len(a) + for left >= 8 { + diff := le.Load64(a, n) ^ le.Load64(b, n) if diff != 0 { return n + bits.TrailingZeros64(diff)>>3 } n += 8 + left -= 8 } + a = a[n:] + b = b[n:] for i := range a { if a[i] != b[i] { diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index d7fe6d82d9..9a7de82f9e 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -245,7 +245,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { return io.ErrUnexpectedEOF } var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + if br.cursor > 4+((maxOffsetBits+16+16)>>3) { // inlined function: // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index f5591fa1e8..a708ca6d3d 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -7,9 +7,9 @@ TEXT ·sequenceDecs_decode_amd64(SB), $8-32 MOVQ br+8(FP), CX MOVQ 24(CX), DX - MOVBQZX 32(CX), BX + MOVBQZX 40(CX), BX MOVQ (CX), AX - MOVQ 8(CX), SI + MOVQ 32(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -299,8 +299,8 @@ sequenceDecs_decode_amd64_match_len_ofs_ok: MOVQ R13, 160(AX) MOVQ br+8(FP), AX MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -335,9 +335,9 @@ error_overread: TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 MOVQ br+8(FP), CX MOVQ 24(CX), DX - MOVBQZX 32(CX), BX + MOVBQZX 40(CX), BX MOVQ (CX), AX - MOVQ 8(CX), SI + MOVQ 32(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -598,8 +598,8 @@ sequenceDecs_decode_56_amd64_match_len_ofs_ok: MOVQ R13, 160(AX) MOVQ br+8(FP), AX MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -634,9 +634,9 @@ error_overread: TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 MOVQ br+8(FP), BX MOVQ 24(BX), AX - MOVBQZX 32(BX), DX + MOVBQZX 40(BX), DX MOVQ (BX), CX - MOVQ 8(BX), BX + MOVQ 32(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -884,8 +884,8 @@ sequenceDecs_decode_bmi2_match_len_ofs_ok: MOVQ R12, 160(CX) MOVQ br+8(FP), CX MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -920,9 +920,9 @@ error_overread: TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 MOVQ br+8(FP), BX MOVQ 24(BX), AX - MOVBQZX 32(BX), DX + MOVBQZX 40(BX), DX MOVQ (BX), CX - MOVQ 8(BX), BX + MOVQ 32(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -1141,8 +1141,8 @@ sequenceDecs_decode_56_bmi2_match_len_ofs_ok: MOVQ R12, 160(CX) MOVQ br+8(FP), CX MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -1787,9 +1787,9 @@ empty_seqs: TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 MOVQ br+8(FP), CX MOVQ 24(CX), DX - MOVBQZX 32(CX), BX + MOVBQZX 40(CX), BX MOVQ (CX), AX - MOVQ 8(CX), SI + MOVQ 32(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -2281,8 +2281,8 @@ handle_loop: loop_finished: MOVQ br+8(FP), AX MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) // Update the context MOVQ ctx+16(FP), AX @@ -2349,9 +2349,9 @@ error_not_enough_space: TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 MOVQ br+8(FP), BX MOVQ 24(BX), AX - MOVBQZX 32(BX), DX + MOVBQZX 40(BX), DX MOVQ (BX), CX - MOVQ 8(BX), BX + MOVQ 32(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -2801,8 +2801,8 @@ handle_loop: loop_finished: MOVQ br+8(FP), CX MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) // Update the context MOVQ ctx+16(FP), AX @@ -2869,9 +2869,9 @@ error_not_enough_space: TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 MOVQ br+8(FP), CX MOVQ 24(CX), DX - MOVBQZX 32(CX), BX + MOVBQZX 40(CX), BX MOVQ (CX), AX - MOVQ 8(CX), SI + MOVQ 32(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -3465,8 +3465,8 @@ handle_loop: loop_finished: MOVQ br+8(FP), AX MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) // Update the context MOVQ ctx+16(FP), AX @@ -3533,9 +3533,9 @@ error_not_enough_space: TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 MOVQ br+8(FP), BX MOVQ 24(BX), AX - MOVBQZX 32(BX), DX + MOVBQZX 40(BX), DX MOVQ (BX), CX - MOVQ 8(BX), BX + MOVQ 32(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -4087,8 +4087,8 @@ handle_loop: loop_finished: MOVQ br+8(FP), CX MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) // Update the context MOVQ ctx+16(FP), AX diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go index 2fb35b788c..7cec2197cd 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -29,7 +29,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { } for i := range seqs { var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + if br.cursor > 4+((maxOffsetBits+16+16)>>3) { // inlined function: // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go index 8014174a77..65045eabdd 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqenc.go +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -69,7 +69,6 @@ var llBitsTable = [maxLLCode + 1]byte{ func llCode(litLength uint32) uint8 { const llDeltaCode = 19 if litLength <= 63 { - // Compiler insists on bounds check (Go 1.12) return llCodeTable[litLength&63] } return uint8(highBit(litLength)) + llDeltaCode @@ -102,7 +101,6 @@ var mlBitsTable = [maxMLCode + 1]byte{ func mlCode(mlBase uint32) uint8 { const mlDeltaCode = 36 if mlBase <= 127 { - // Compiler insists on bounds check (Go 1.12) return mlCodeTable[mlBase&127] } return uint8(highBit(mlBase)) + mlDeltaCode diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go index ec13594e89..a17381b8f8 100644 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -197,7 +197,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { n, r.err = w.Write(r.block.output) if r.err != nil { - return written, err + return written, r.err } written += int64(n) continue @@ -239,7 +239,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { } n, r.err = w.Write(r.block.output) if r.err != nil { - return written, err + return written, r.err } written += int64(n) continue diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 066bef2a4f..6252b46ae6 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -5,10 +5,11 @@ package zstd import ( "bytes" - "encoding/binary" "errors" "log" "math" + + "github.com/klauspost/compress/internal/le" ) // enable debug printing @@ -110,11 +111,11 @@ func printf(format string, a ...interface{}) { } func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) + return le.Load32(b, i) } func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) + return le.Load64(b, i) } type byter interface { diff --git a/vendor/github.com/nats-io/nats-server/v2/conf/fuzz.go b/vendor/github.com/nats-io/nats-server/v2/conf/fuzz.go index 99faf999fe..3aba1551eb 100644 --- a/vendor/github.com/nats-io/nats-server/v2/conf/fuzz.go +++ b/vendor/github.com/nats-io/nats-server/v2/conf/fuzz.go @@ -1,4 +1,4 @@ -// Copyright 2020 The NATS Authors +// Copyright 2020-2021 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/conf/parse.go b/vendor/github.com/nats-io/nats-server/v2/conf/parse.go index bf5953c1b2..4e91c6667b 100644 --- a/vendor/github.com/nats-io/nats-server/v2/conf/parse.go +++ b/vendor/github.com/nats-io/nats-server/v2/conf/parse.go @@ -1,4 +1,4 @@ -// Copyright 2013-2018 The NATS Authors +// Copyright 2013-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/logger/log.go b/vendor/github.com/nats-io/nats-server/v2/logger/log.go index c59bffef66..38473fd08a 100644 --- a/vendor/github.com/nats-io/nats-server/v2/logger/log.go +++ b/vendor/github.com/nats-io/nats-server/v2/logger/log.go @@ -1,4 +1,4 @@ -// Copyright 2012-2019 The NATS Authors +// Copyright 2012-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/logger/syslog.go b/vendor/github.com/nats-io/nats-server/v2/logger/syslog.go index 04b71d264b..d1c9fea2cc 100644 --- a/vendor/github.com/nats-io/nats-server/v2/logger/syslog.go +++ b/vendor/github.com/nats-io/nats-server/v2/logger/syslog.go @@ -1,4 +1,4 @@ -// Copyright 2012-2019 The NATS Authors +// Copyright 2012-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/logger/syslog_windows.go b/vendor/github.com/nats-io/nats-server/v2/logger/syslog_windows.go index f3c58266bf..c341a5d969 100644 --- a/vendor/github.com/nats-io/nats-server/v2/logger/syslog_windows.go +++ b/vendor/github.com/nats-io/nats-server/v2/logger/syslog_windows.go @@ -1,4 +1,4 @@ -// Copyright 2012-2018 The NATS Authors +// Copyright 2012-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/accounts.go b/vendor/github.com/nats-io/nats-server/v2/server/accounts.go index 08112f67a5..a821b9d8e5 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/accounts.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/accounts.go @@ -1,4 +1,4 @@ -// Copyright 2018-2023 The NATS Authors +// Copyright 2018-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -858,9 +858,14 @@ func (a *Account) Interest(subject string) int { func (a *Account) addClient(c *client) int { a.mu.Lock() n := len(a.clients) - if a.clients != nil { - a.clients[c] = struct{}{} + + // Could come here earlier than the account is registered with the server. + // Make sure we can still track clients. + if a.clients == nil { + a.clients = make(map[*client]struct{}) } + a.clients[c] = struct{}{} + // If we did not add it, we are done if n == len(a.clients) { a.mu.Unlock() @@ -2021,7 +2026,7 @@ func (a *Account) addServiceImportSub(si *serviceImport) error { a.mu.Unlock() cb := func(sub *subscription, c *client, acc *Account, subject, reply string, msg []byte) { - c.processServiceImport(si, acc, msg) + c.pa.delivered = c.processServiceImport(si, acc, msg) } sub, err := c.processSubEx([]byte(subject), nil, []byte(sid), cb, true, true, false) if err != nil { diff --git a/vendor/github.com/nats-io/nats-server/v2/server/auth_callout.go b/vendor/github.com/nats-io/nats-server/v2/server/auth_callout.go index 318114ebae..3801d9eaa5 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/auth_callout.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/auth_callout.go @@ -1,4 +1,4 @@ -// Copyright 2022-2023 The NATS Authors +// Copyright 2022-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/avl/seqset.go b/vendor/github.com/nats-io/nats-server/v2/server/avl/seqset.go index 0b6aca67cd..96ff376736 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/avl/seqset.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/avl/seqset.go @@ -1,4 +1,4 @@ -// Copyright 2023 The NATS Authors +// Copyright 2023-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore.go b/vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore.go index 110ea85a7d..42e228e806 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore.go @@ -1,4 +1,4 @@ -// Copyright 2022-2023 The NATS Authors +// Copyright 2022-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore_other.go b/vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore_other.go index 459b8db64a..18d62f8f55 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore_other.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore_other.go @@ -1,4 +1,4 @@ -// Copyright 2022-2024 The NATS Authors +// Copyright 2022-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/ciphersuites.go b/vendor/github.com/nats-io/nats-server/v2/server/ciphersuites.go index 55dbc8bf07..bc594c51f5 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/ciphersuites.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/ciphersuites.go @@ -1,4 +1,4 @@ -// Copyright 2016-2018 The NATS Authors +// Copyright 2016-2020 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/client.go b/vendor/github.com/nats-io/nats-server/v2/server/client.go index d7a3ccb125..8cbf98e517 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/client.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/client.go @@ -1,4 +1,4 @@ -// Copyright 2012-2023 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -113,8 +113,9 @@ const ( maxNoRTTPingBeforeFirstPong = 2 * time.Second // For stalling fast producers - stallClientMinDuration = 100 * time.Millisecond - stallClientMaxDuration = time.Second + stallClientMinDuration = 2 * time.Millisecond + stallClientMaxDuration = 5 * time.Millisecond + stallTotalAllowed = 10 * time.Millisecond ) var readLoopReportThreshold = readLoopReport @@ -462,6 +463,9 @@ type readCache struct { // Capture the time we started processing our readLoop. start time.Time + + // Total time stalled so far for readLoop processing. + tst time.Duration } // set the flag (would be equivalent to set the boolean to true) @@ -1414,6 +1418,11 @@ func (c *client) readLoop(pre []byte) { } return } + // Clear total stalled time here. + if c.in.tst >= stallClientMaxDuration { + c.rateLimitFormatWarnf("Producer was stalled for a total of %v", c.in.tst.Round(time.Millisecond)) + } + c.in.tst = 0 } // If we are a ROUTER/LEAF and have processed an INFO, it is possible that @@ -1640,8 +1649,10 @@ func (c *client) flushOutbound() bool { } consumed := len(wnb) - // Actual write to the socket. - nc.SetWriteDeadline(start.Add(wdl)) + // Actual write to the socket. The deadline applies to each batch + // rather than the total write, such that the configured deadline + // can be tuned to a known maximum quantity (64MB). + nc.SetWriteDeadline(time.Now().Add(wdl)) wn, err = wnb.WriteTo(nc) nc.SetWriteDeadline(time.Time{}) @@ -1728,7 +1739,7 @@ func (c *client) flushOutbound() bool { // Check if we have a stalled gate and if so and we are recovering release // any stalled producers. Only kind==CLIENT will stall. - if c.out.stc != nil && (n == attempted || c.out.pb < c.out.mp/2) { + if c.out.stc != nil && (n == attempted || c.out.pb < c.out.mp/4*3) { close(c.out.stc) c.out.stc = nil } @@ -2290,7 +2301,8 @@ func (c *client) queueOutbound(data []byte) { // Check here if we should create a stall channel if we are falling behind. // We do this here since if we wait for consumer's writeLoop it could be // too late with large number of fan in producers. - if c.out.pb > c.out.mp/2 && c.out.stc == nil { + // If the outbound connection is > 75% of maximum pending allowed, create a stall gate. + if c.out.pb > c.out.mp/4*3 && c.out.stc == nil { c.out.stc = make(chan struct{}) } } @@ -3335,31 +3347,37 @@ func (c *client) msgHeader(subj, reply []byte, sub *subscription) []byte { } func (c *client) stalledWait(producer *client) { + // Check to see if we have exceeded our total wait time per readLoop invocation. + if producer.in.tst > stallTotalAllowed { + return + } + + // Grab stall channel which the slow consumer will close when caught up. stall := c.out.stc - ttl := stallDuration(c.out.pb, c.out.mp) + + // Calculate stall time. + ttl := stallClientMinDuration + if c.out.pb >= c.out.mp { + ttl = stallClientMaxDuration + } + c.mu.Unlock() defer c.mu.Lock() + // Now check if we are close to total allowed. + if producer.in.tst+ttl > stallTotalAllowed { + ttl = stallTotalAllowed - producer.in.tst + } delay := time.NewTimer(ttl) defer delay.Stop() + start := time.Now() select { case <-stall: case <-delay.C: producer.Debugf("Timed out of fast producer stall (%v)", ttl) } -} - -func stallDuration(pb, mp int64) time.Duration { - ttl := stallClientMinDuration - if pb >= mp { - ttl = stallClientMaxDuration - } else if hmp := mp / 2; pb > hmp { - bsz := hmp / 10 - additional := int64(ttl) * ((pb - hmp) / bsz) - ttl += time.Duration(additional) - } - return ttl + producer.in.tst += time.Since(start) } // Used to treat maps as efficient set @@ -3451,10 +3469,15 @@ func (c *client) deliverMsg(prodIsMQTT bool, sub *subscription, acc *Account, su msgSize -= int64(LEN_CR_LF) } - // No atomic needed since accessed under client lock. - // Monitor is reading those also under client's lock. - client.outMsgs++ - client.outBytes += msgSize + // We do not update the outbound stats if we are doing trace only since + // this message will not be sent out. + // Also do not update on internal callbacks. + if sub.icb == nil { + // No atomic needed since accessed under client lock. + // Monitor is reading those also under client's lock. + client.outMsgs++ + client.outBytes += msgSize + } // Check for internal subscriptions. if sub.icb != nil && !c.noIcb { @@ -3465,23 +3488,35 @@ func (c *client) deliverMsg(prodIsMQTT bool, sub *subscription, acc *Account, su } client.mu.Unlock() + // For service imports, track if we delivered. + didDeliver := true + // Internal account clients are for service imports and need the '\r\n'. start := time.Now() if client.kind == ACCOUNT { sub.icb(sub, c, acc, string(subject), string(reply), msg) + // If we are a service import check to make sure we delivered the message somewhere. + if sub.si { + didDeliver = c.pa.delivered + } } else { sub.icb(sub, c, acc, string(subject), string(reply), msg[:msgSize]) } if dur := time.Since(start); dur >= readLoopReportThreshold { srv.Warnf("Internal subscription on %q took too long: %v", subject, dur) } - return true + + return didDeliver } // If we are a client and we detect that the consumer we are // sending to is in a stalled state, go ahead and wait here // with a limit. if c.kind == CLIENT && client.out.stc != nil { + if srv.getOpts().NoFastProducerStall { + client.mu.Unlock() + return false + } client.stalledWait(c) } @@ -3959,7 +3994,7 @@ func (c *client) processInboundClientMsg(msg []byte) (bool, bool) { reply = append(reply, '@') reply = append(reply, c.pa.deliver...) } - didDeliver = c.sendMsgToGateways(acc, msg, c.pa.subject, reply, qnames) || didDeliver + didDeliver = c.sendMsgToGateways(acc, msg, c.pa.subject, reply, qnames, false) || didDeliver } // Check to see if we did not deliver to anyone and the client has a reply subject set @@ -4006,7 +4041,7 @@ func (c *client) handleGWReplyMap(msg []byte) bool { reply = append(reply, '@') reply = append(reply, c.pa.deliver...) } - c.sendMsgToGateways(c.acc, msg, c.pa.subject, reply, nil) + c.sendMsgToGateways(c.acc, msg, c.pa.subject, reply, nil, false) } return true } @@ -4129,9 +4164,20 @@ func (c *client) setHeader(key, value string, msg []byte) []byte { return bb.Bytes() } -// Will return the value for the header denoted by key or nil if it does not exists. -// This function ignores errors and tries to achieve speed and no additional allocations. +// Will return a copy of the value for the header denoted by key or nil if it does not exist. +// If you know that it is safe to refer to the underlying hdr slice for the period that the +// return value is used, then sliceHeader() will be faster. func getHeader(key string, hdr []byte) []byte { + v := sliceHeader(key, hdr) + if v == nil { + return nil + } + return append(make([]byte, 0, len(v)), v...) +} + +// Will return the sliced value for the header denoted by key or nil if it does not exists. +// This function ignores errors and tries to achieve speed and no additional allocations. +func sliceHeader(key string, hdr []byte) []byte { if len(hdr) == 0 { return nil } @@ -4156,15 +4202,14 @@ func getHeader(key string, hdr []byte) []byte { index++ } // Collect together the rest of the value until we hit a CRLF. - var value []byte + start := index for index < hdrLen { if hdr[index] == '\r' && index < hdrLen-1 && hdr[index+1] == '\n' { break } - value = append(value, hdr[index]) index++ } - return value + return hdr[start:index:index] } // For bytes.HasPrefix below. @@ -4175,17 +4220,17 @@ var ( // processServiceImport is an internal callback when a subscription matches an imported service // from another account. This includes response mappings as well. -func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byte) { +func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byte) bool { // If we are a GW and this is not a direct serviceImport ignore. isResponse := si.isRespServiceImport() if (c.kind == GATEWAY || c.kind == ROUTER) && !isResponse { - return + return false } // Detect cycles and ignore (return) when we detect one. if len(c.pa.psi) > 0 { for i := len(c.pa.psi) - 1; i >= 0; i-- { if psi := c.pa.psi[i]; psi.se == si.se { - return + return false } } } @@ -4206,7 +4251,7 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt // response service imports and rrMap entries which all will need to simply expire. // TODO(dlc) - Come up with something better. if shouldReturn || (checkJS && si.se != nil && si.se.acc == c.srv.SystemAccount()) { - return + return false } var nrr []byte @@ -4278,7 +4323,7 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt var ci *ClientInfo if hadPrevSi && c.pa.hdr >= 0 { var cis ClientInfo - if err := json.Unmarshal(getHeader(ClientInfoHdr, msg[:c.pa.hdr]), &cis); err == nil { + if err := json.Unmarshal(sliceHeader(ClientInfoHdr, msg[:c.pa.hdr]), &cis); err == nil { ci = &cis ci.Service = acc.Name // Check if we are moving into a share details account from a non-shared @@ -4287,7 +4332,7 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt c.addServerAndClusterInfo(ci) } } - } else if c.kind != LEAF || c.pa.hdr < 0 || len(getHeader(ClientInfoHdr, msg[:c.pa.hdr])) == 0 { + } else if c.kind != LEAF || c.pa.hdr < 0 || len(sliceHeader(ClientInfoHdr, msg[:c.pa.hdr])) == 0 { ci = c.getClientInfo(share) // If we did not share but the imports destination is the system account add in the server and cluster info. if !share && isSysImport { @@ -4345,7 +4390,7 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt flags |= pmrCollectQueueNames var queues [][]byte didDeliver, queues = c.processMsgResults(siAcc, rr, msg, c.pa.deliver, []byte(to), nrr, flags) - didDeliver = c.sendMsgToGateways(siAcc, msg, []byte(to), nrr, queues) || didDeliver + didDeliver = c.sendMsgToGateways(siAcc, msg, []byte(to), nrr, queues, false) || didDeliver } else { didDeliver, _ = c.processMsgResults(siAcc, rr, msg, c.pa.deliver, []byte(to), nrr, flags) } @@ -4354,6 +4399,10 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt c.in.rts = orts c.pa = pacopy + // Before we undo didDeliver based on tracing and last mile, mark in the c.pa which informs us of no responders status. + // If we override due to tracing and traceOnly we do not want to send back a no responders. + c.pa.delivered = didDeliver + // Determine if we should remove this service import. This is for response service imports. // We will remove if we did not deliver, or if we are a response service import and we are // a singleton, or we have an EOF message. @@ -4383,6 +4432,8 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt siAcc.removeRespServiceImport(rsi, reason) } } + + return didDeliver } func (c *client) addSubToRouteTargets(sub *subscription) { @@ -4846,7 +4897,7 @@ func (c *client) checkLeafClientInfoHeader(msg []byte) (dmsg []byte, setHdr bool if c.pa.hdr < 0 || len(msg) < c.pa.hdr { return msg, false } - cir := getHeader(ClientInfoHdr, msg[:c.pa.hdr]) + cir := sliceHeader(ClientInfoHdr, msg[:c.pa.hdr]) if len(cir) == 0 { return msg, false } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/const.go b/vendor/github.com/nats-io/nats-server/v2/server/const.go index 01c951326f..1a6be0b2f6 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/const.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/const.go @@ -1,4 +1,4 @@ -// Copyright 2012-2023 The NATS Authors +// Copyright 2012-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -55,7 +55,7 @@ func init() { const ( // VERSION is the current version for the server. - VERSION = "2.10.25" + VERSION = "2.10.26" // PROTO is the currently supported protocol. // 0 was the original diff --git a/vendor/github.com/nats-io/nats-server/v2/server/consumer.go b/vendor/github.com/nats-io/nats-server/v2/server/consumer.go index 4d66b23b97..83f2f3ce83 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/consumer.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/consumer.go @@ -1,4 +1,4 @@ -// Copyright 2019-2024 The NATS Authors +// Copyright 2019-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -60,7 +60,6 @@ type ConsumerInfo struct { } type ConsumerConfig struct { - // Durable is deprecated. All consumers should have names, picked by clients. Durable string `json:"durable_name,omitempty"` Name string `json:"name,omitempty"` Description string `json:"description,omitempty"` @@ -395,7 +394,7 @@ type consumer struct { ackMsgs *ipQueue[*jsAckMsg] // for stream signaling when multiple filters are set. - sigSubs []*subscription + sigSubs []string } // A single subject filter. @@ -1583,6 +1582,12 @@ var ( // deleteNotActive must only be called from time.AfterFunc or in its own // goroutine, as it can block on clean-up. func (o *consumer) deleteNotActive() { + // Take a copy of these when the goroutine starts, mostly it avoids a + // race condition with tests that modify these consts, such as + // TestJetStreamClusterGhostEphemeralsAfterRestart. + cnaMax := consumerNotActiveMaxInterval + cnaStart := consumerNotActiveStartInterval + o.mu.Lock() if o.mset == nil { o.mu.Unlock() @@ -1626,10 +1631,10 @@ func (o *consumer) deleteNotActive() { if o.srv != nil { qch = o.srv.quitCh } - if o.js != nil { - cqch = o.js.clusterQuitC() - } o.mu.Unlock() + if js != nil { + cqch = js.clusterQuitC() + } // Useful for pprof. setGoRoutineLabels(pprofLabels{ @@ -1663,8 +1668,8 @@ func (o *consumer) deleteNotActive() { if ca != nil && cc != nil { // Check to make sure we went away. // Don't think this needs to be a monitored go routine. - jitter := time.Duration(rand.Int63n(int64(consumerNotActiveStartInterval))) - interval := consumerNotActiveStartInterval + jitter + jitter := time.Duration(rand.Int63n(int64(cnaStart))) + interval := cnaStart + jitter ticker := time.NewTicker(interval) defer ticker.Stop() for { @@ -1686,7 +1691,7 @@ func (o *consumer) deleteNotActive() { if nca != nil && nca == ca { s.Warnf("Consumer assignment for '%s > %s > %s' not cleaned up, retrying", acc, stream, name) meta.ForwardProposal(removeEntry) - if interval < consumerNotActiveMaxInterval { + if interval < cnaMax { interval *= 2 ticker.Reset(interval) } @@ -1744,11 +1749,16 @@ func (o *consumer) hasMaxDeliveries(seq uint64) bool { if o.maxp > 0 && len(o.pending) >= o.maxp { o.signalNewMessages() } - // Cleanup our tracking. - delete(o.pending, seq) - if o.rdc != nil { - delete(o.rdc, seq) + // Make sure to remove from pending. + if p, ok := o.pending[seq]; ok && p != nil { + delete(o.pending, seq) + o.updateDelivered(p.Sequence, seq, dc, p.Timestamp) } + // Ensure redelivered state is set, if not already. + if o.rdc == nil { + o.rdc = make(map[uint64]uint64) + } + o.rdc[seq] = dc return true } return false @@ -1859,9 +1869,6 @@ func (acc *Account) checkNewConsumerConfig(cfg, ncfg *ConsumerConfig) error { if cfg.FlowControl != ncfg.FlowControl { return errors.New("flow control can not be updated") } - if cfg.MaxWaiting != ncfg.MaxWaiting { - return errors.New("max waiting can not be updated") - } // Deliver Subject is conditional on if its bound. if cfg.DeliverSubject != ncfg.DeliverSubject { @@ -1876,6 +1883,10 @@ func (acc *Account) checkNewConsumerConfig(cfg, ncfg *ConsumerConfig) error { } } + if cfg.MaxWaiting != ncfg.MaxWaiting { + return errors.New("max waiting can not be updated") + } + // Check if BackOff is defined, MaxDeliver is within range. if lbo := len(ncfg.BackOff); lbo > 0 && ncfg.MaxDeliver != -1 && lbo > ncfg.MaxDeliver { return NewJSConsumerMaxDeliverBackoffError() @@ -2806,17 +2817,20 @@ func (o *consumer) processAckMsg(sseq, dseq, dc uint64, reply string, doSample b if sseq >= o.sseq { // Let's make sure this is valid. // This is only received on the consumer leader, so should never be higher - // than the last stream sequence. + // than the last stream sequence. But could happen if we've just become + // consumer leader, and we are not up-to-date on the stream yet. var ss StreamState mset.store.FastState(&ss) if sseq > ss.LastSeq { o.srv.Warnf("JetStream consumer '%s > %s > %s' ACK sequence %d past last stream sequence of %d", o.acc.Name, o.stream, o.name, sseq, ss.LastSeq) // FIXME(dlc) - For 2.11 onwards should we return an error here to the caller? - o.mu.Unlock() - return false } - o.sseq = sseq + 1 + // Even though another leader must have delivered a message with this sequence, we must not adjust + // the current pointer. This could otherwise result in a stuck consumer, where messages below this + // sequence can't be redelivered, and we'll have incorrect pending state and ack floors. + o.mu.Unlock() + return false } // Let the owning stream know if we are interest or workqueue retention based. @@ -2979,6 +2993,7 @@ func (o *consumer) needAck(sseq uint64, subj string) bool { var needAck bool var asflr, osseq uint64 var pending map[uint64]*Pending + var rdc map[uint64]uint64 o.mu.RLock() defer o.mu.RUnlock() @@ -3003,7 +3018,7 @@ func (o *consumer) needAck(sseq uint64, subj string) bool { } if o.isLeader() { asflr, osseq = o.asflr, o.sseq - pending = o.pending + pending, rdc = o.pending, o.rdc } else { if o.store == nil { return false @@ -3014,7 +3029,7 @@ func (o *consumer) needAck(sseq uint64, subj string) bool { return sseq > o.asflr && !o.isFiltered() } // If loading state as here, the osseq is +1. - asflr, osseq, pending = state.AckFloor.Stream, state.Delivered.Stream+1, state.Pending + asflr, osseq, pending, rdc = state.AckFloor.Stream, state.Delivered.Stream+1, state.Pending, state.Redelivered } switch o.cfg.AckPolicy { @@ -3030,6 +3045,12 @@ func (o *consumer) needAck(sseq uint64, subj string) bool { } } + // Finally check if redelivery of this message is tracked. + // If the message is not pending, it should be preserved if it reached max delivery. + if !needAck { + _, needAck = rdc[sseq] + } + return needAck } @@ -3488,7 +3509,10 @@ func (o *consumer) deliveryCount(seq uint64) uint64 { if o.rdc == nil { return 1 } - return o.rdc[seq] + if dc := o.rdc[seq]; dc >= 1 { + return dc + } + return 1 } // Increase the delivery count for this message. @@ -3802,10 +3826,7 @@ func (o *consumer) checkAckFloor() { // Check if this message was pending. o.mu.RLock() p, isPending := o.pending[seq] - var rdc uint64 = 1 - if o.rdc != nil { - rdc = o.rdc[seq] - } + rdc := o.deliveryCount(seq) o.mu.RUnlock() // If it was pending for us, get rid of it. if isPending { @@ -3823,10 +3844,7 @@ func (o *consumer) checkAckFloor() { if p != nil { dseq = p.Sequence } - var rdc uint64 = 1 - if o.rdc != nil { - rdc = o.rdc[seq] - } + rdc := o.deliveryCount(seq) toTerm = append(toTerm, seq, dseq, rdc) } } @@ -5400,10 +5418,7 @@ func (o *consumer) decStreamPending(sseq uint64, subj string) { // Check if this message was pending. p, wasPending := o.pending[sseq] - var rdc uint64 = 1 - if o.rdc != nil { - rdc = o.rdc[sseq] - } + rdc := o.deliveryCount(sseq) o.mu.Unlock() @@ -5424,7 +5439,7 @@ func (o *consumer) account() *Account { // Creates a sublist for consumer. // All subjects share the same callback. -func (o *consumer) signalSubs() []*subscription { +func (o *consumer) signalSubs() []string { o.mu.Lock() defer o.mu.Unlock() @@ -5432,15 +5447,15 @@ func (o *consumer) signalSubs() []*subscription { return o.sigSubs } - subs := []*subscription{} - if o.subjf == nil { - subs = append(subs, &subscription{subject: []byte(fwcs), icb: o.processStreamSignal}) + if len(o.subjf) == 0 { + subs := []string{fwcs} o.sigSubs = subs return subs } + subs := make([]string, 0, len(o.subjf)) for _, filter := range o.subjf { - subs = append(subs, &subscription{subject: []byte(filter.subject), icb: o.processStreamSignal}) + subs = append(subs, filter.subject) } o.sigSubs = subs return subs @@ -5450,7 +5465,7 @@ func (o *consumer) signalSubs() []*subscription { // We know that this subject matches us by how the parent handles registering us with the signaling sublist, // but we must check if we are leader. // We do need the sequence of the message however and we use the msg as the encoded seq. -func (o *consumer) processStreamSignal(_ *subscription, _ *client, _ *Account, subject, _ string, seqb []byte) { +func (o *consumer) processStreamSignal(seq uint64) { // We can get called here now when not leader, so bail fast // and without acquiring any locks. if !o.leader.Load() { @@ -5461,10 +5476,6 @@ func (o *consumer) processStreamSignal(_ *subscription, _ *client, _ *Account, s if o.mset == nil { return } - - var le = binary.LittleEndian - seq := le.Uint64(seqb) - if seq > o.npf { o.npc++ } @@ -5539,6 +5550,7 @@ func (o *consumer) isMonitorRunning() bool { // If we detect that our ackfloor is higher than the stream's last sequence, return this error. var errAckFloorHigherThanLastSeq = errors.New("consumer ack floor is higher than streams last sequence") +var errAckFloorInvalid = errors.New("consumer ack floor is invalid") // If we are a consumer of an interest or workqueue policy stream, process that state and make sure consistent. func (o *consumer) checkStateForInterestStream(ss *StreamState) error { @@ -5568,7 +5580,7 @@ func (o *consumer) checkStateForInterestStream(ss *StreamState) error { asflr := state.AckFloor.Stream // Protect ourselves against rolling backwards. if asflr&(1<<63) != 0 { - return nil + return errAckFloorInvalid } // Check if the underlying stream's last sequence is less than our floor. @@ -5587,6 +5599,7 @@ func (o *consumer) checkStateForInterestStream(ss *StreamState) error { fseq = chkfloor } + var retryAsflr uint64 for seq = fseq; asflr > 0 && seq <= asflr; seq++ { if filters != nil { _, nseq, err = store.LoadNextMsgMulti(filters, seq, &smv) @@ -5599,15 +5612,24 @@ func (o *consumer) checkStateForInterestStream(ss *StreamState) error { } // Only ack though if no error and seq <= ack floor. if err == nil && seq <= asflr { - mset.ackMsg(o, seq) + didRemove := mset.ackMsg(o, seq) + // Removing the message could fail. For example if we're behind on stream applies. + // Overwrite retry floor (only the first time) to allow us to check next time if the removal was successful. + if didRemove && retryAsflr == 0 { + retryAsflr = seq + } } } + // If retry floor was not overwritten, set to ack floor+1, we don't need to account for any retries below it. + if retryAsflr == 0 { + retryAsflr = asflr + 1 + } o.mu.Lock() // Update our check floor. // Check floor must never be greater than ack floor+1, otherwise subsequent calls to this function would skip work. - if asflr+1 > o.chkflr { - o.chkflr = asflr + 1 + if retryAsflr > o.chkflr { + o.chkflr = retryAsflr } // See if we need to process this update if our parent stream is not a limits policy stream. state, _ = o.store.State() diff --git a/vendor/github.com/nats-io/nats-server/v2/server/dirstore.go b/vendor/github.com/nats-io/nats-server/v2/server/dirstore.go index 66ca52599e..9d229bc3d6 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/dirstore.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/dirstore.go @@ -1,4 +1,4 @@ -// Copyright 2012-2021 The NATS Authors +// Copyright 2012-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/disk_avail_wasm.go b/vendor/github.com/nats-io/nats-server/v2/server/disk_avail_wasm.go index 39a88fa4b0..c668243439 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/disk_avail_wasm.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/disk_avail_wasm.go @@ -1,4 +1,4 @@ -// Copyright 2022 The NATS Authors +// Copyright 2022-2021 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/disk_avail_windows.go b/vendor/github.com/nats-io/nats-server/v2/server/disk_avail_windows.go index 4c7cf74932..b7f95f313a 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/disk_avail_windows.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/disk_avail_windows.go @@ -1,4 +1,4 @@ -// Copyright 2020 The NATS Authors +// Copyright 2020-2021 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/errors.go b/vendor/github.com/nats-io/nats-server/v2/server/errors.go index c096bbef92..ff718648a0 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/errors.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/errors.go @@ -1,4 +1,4 @@ -// Copyright 2012-2021 The NATS Authors +// Copyright 2012-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/events.go b/vendor/github.com/nats-io/nats-server/v2/server/events.go index 7cb9feb6a7..c050e6525d 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/events.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/events.go @@ -1,4 +1,4 @@ -// Copyright 2018-2023 The NATS Authors +// Copyright 2018-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -1215,6 +1215,14 @@ func (s *Server) initEventTracking() { optz := &ExpvarzEventOptions{} s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (any, error) { return s.expvarz(optz), nil }) }, + "IPQUEUESZ": func(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { + optz := &IpqueueszEventOptions{} + s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (any, error) { return s.Ipqueuesz(&optz.IpqueueszOptions), nil }) + }, + "RAFTZ": func(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { + optz := &RaftzEventOptions{} + s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (any, error) { return s.Raftz(&optz.RaftzOptions), nil }) + }, } profilez := func(_ *subscription, c *client, _ *Account, _, rply string, rmsg []byte) { hdr, msg := c.msgParts(rmsg) @@ -1921,6 +1929,18 @@ type ExpvarzEventOptions struct { EventFilterOptions } +// In the context of system events, IpqueueszEventOptions are options passed to Ipqueuesz +type IpqueueszEventOptions struct { + EventFilterOptions + IpqueueszOptions +} + +// In the context of system events, RaftzEventOptions are options passed to Raftz +type RaftzEventOptions struct { + EventFilterOptions + RaftzOptions +} + // returns true if the request does NOT apply to this server and can be ignored. // DO NOT hold the server lock when func (s *Server) filterRequest(fOpts *EventFilterOptions) bool { @@ -2043,6 +2063,20 @@ type ServerAPIExpvarzResponse struct { Error *ApiError `json:"error,omitempty"` } +// ServerAPIpqueueszResponse is the response type for ipqueuesz +type ServerAPIpqueueszResponse struct { + Server *ServerInfo `json:"server"` + Data *IpqueueszStatus `json:"data,omitempty"` + Error *ApiError `json:"error,omitempty"` +} + +// ServerAPIRaftzResponse is the response type for raftz +type ServerAPIRaftzResponse struct { + Server *ServerInfo `json:"server"` + Data *RaftzStatus `json:"data,omitempty"` + Error *ApiError `json:"error,omitempty"` +} + // statszReq is a request for us to respond with current statsz. func (s *Server) statszReq(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { if !s.EventsEnabled() { diff --git a/vendor/github.com/nats-io/nats-server/v2/server/filestore.go b/vendor/github.com/nats-io/nats-server/v2/server/filestore.go index 4e0c28ca4a..7168e76a45 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/filestore.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/filestore.go @@ -1,4 +1,4 @@ -// Copyright 2019-2024 The NATS Authors +// Copyright 2019-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -33,6 +33,7 @@ import ( "net" "os" "path/filepath" + "runtime" "slices" "sort" "strings" @@ -1431,15 +1432,14 @@ func (mb *msgBlock) rebuildStateLocked() (*LostStreamData, []uint64, error) { if seq == 0 || seq&ebit != 0 || seq < fseq { seq = seq &^ ebit if seq >= fseq { - // Only add to dmap if past recorded first seq and non-zero. - if seq != 0 { - addToDmap(seq) - } atomic.StoreUint64(&mb.last.seq, seq) mb.last.ts = ts if mb.msgs == 0 { atomic.StoreUint64(&mb.first.seq, seq+1) mb.first.ts = 0 + } else if seq != 0 { + // Only add to dmap if past recorded first seq and non-zero. + addToDmap(seq) } } index += rl @@ -2004,7 +2004,7 @@ func (fs *fileStore) expireMsgsOnRecover() error { } // Make sure we do subject cleanup as well. mb.ensurePerSubjectInfoLoaded() - mb.fss.Iter(func(bsubj []byte, ss *SimpleState) bool { + mb.fss.IterOrdered(func(bsubj []byte, ss *SimpleState) bool { subj := bytesToString(bsubj) for i := uint64(0); i < ss.Msgs; i++ { fs.removePerSubject(subj) @@ -2207,12 +2207,15 @@ func (fs *fileStore) GetSeqFromTime(t time.Time) uint64 { // Find the first matching message against a sublist. func (mb *msgBlock) firstMatchingMulti(sl *Sublist, start uint64, sm *StoreMsg) (*StoreMsg, bool, error) { mb.mu.Lock() - defer mb.mu.Unlock() - - // Will just do linear walk for now. - // TODO(dlc) - Be better at skipping blocks that will not match us regardless. - var didLoad bool + var updateLLTS bool + defer func() { + if updateLLTS { + mb.llts = time.Now().UnixNano() + } + mb.mu.Unlock() + }() + // Need messages loaded from here on out. if mb.cacheNotLoaded() { if err := mb.loadMsgsWithLock(); err != nil { @@ -2231,20 +2234,88 @@ func (mb *msgBlock) firstMatchingMulti(sl *Sublist, start uint64, sm *StoreMsg) sm = new(StoreMsg) } - for seq := start; seq <= lseq; seq++ { - llseq := mb.llseq - fsm, err := mb.cacheLookup(seq, sm) - if err != nil { - continue + // If the FSS state has fewer entries than sequences in the linear scan, + // then use intersection instead as likely going to be cheaper. This will + // often be the case with high numbers of deletes, as well as a smaller + // number of subjects in the block. + if uint64(mb.fss.Size()) < lseq-start { + // If there are no subject matches then this is effectively no-op. + hseq := uint64(math.MaxUint64) + IntersectStree(mb.fss, sl, func(subj []byte, ss *SimpleState) { + if ss.firstNeedsUpdate || ss.lastNeedsUpdate { + // mb is already loaded into the cache so should be fast-ish. + mb.recalculateForSubj(bytesToString(subj), ss) + } + first := ss.First + if start > first { + first = start + } + if first > ss.Last || first >= hseq { + // The start cutoff is after the last sequence for this subject, + // or we think we already know of a subject with an earlier msg + // than our first seq for this subject. + return + } + if first == ss.First { + // If the start floor is below where this subject starts then we can + // short-circuit, avoiding needing to scan for the next message. + if fsm, err := mb.cacheLookup(ss.First, sm); err == nil { + sm = fsm + hseq = ss.First + } + return + } + for seq := first; seq <= ss.Last; seq++ { + // Otherwise we have a start floor that intersects where this subject + // has messages in the block, so we need to walk up until we find a + // message matching the subject. + if mb.dmap.Exists(seq) { + // Optimisation to avoid calling cacheLookup which hits time.Now(). + // Instead we will update it only once in a defer. + updateLLTS = true + continue + } + llseq := mb.llseq + fsm, err := mb.cacheLookup(seq, sm) + if err != nil { + continue + } + updateLLTS = false // cacheLookup already updated it. + if sl.HasInterest(fsm.subj) { + hseq = seq + sm = fsm + break + } + // If we are here we did not match, so put the llseq back. + mb.llseq = llseq + } + }) + if hseq < uint64(math.MaxUint64) && sm != nil { + return sm, didLoad, nil } - expireOk := seq == lseq && mb.llseq == seq - - if sl.HasInterest(fsm.subj) { - return fsm, expireOk, nil + } else { + for seq := start; seq <= lseq; seq++ { + if mb.dmap.Exists(seq) { + // Optimisation to avoid calling cacheLookup which hits time.Now(). + // Instead we will update it only once in a defer. + updateLLTS = true + continue + } + llseq := mb.llseq + fsm, err := mb.cacheLookup(seq, sm) + if err != nil { + continue + } + expireOk := seq == lseq && mb.llseq == seq + updateLLTS = false // cacheLookup already updated it. + if sl.HasInterest(fsm.subj) { + return fsm, expireOk, nil + } + // If we are here we did not match, so put the llseq back. + mb.llseq = llseq } - // If we are here we did not match, so put the llseq back. - mb.llseq = llseq } + return nil, didLoad, ErrStoreMsgNotFound } @@ -2252,7 +2323,13 @@ func (mb *msgBlock) firstMatchingMulti(sl *Sublist, start uint64, sm *StoreMsg) // fs lock should be held. func (mb *msgBlock) firstMatching(filter string, wc bool, start uint64, sm *StoreMsg) (*StoreMsg, bool, error) { mb.mu.Lock() - defer mb.mu.Unlock() + var updateLLTS bool + defer func() { + if updateLLTS { + mb.llts = time.Now().UnixNano() + } + mb.mu.Unlock() + }() fseq, isAll, subs := start, filter == _EMPTY_ || filter == fwcs, []string{filter} @@ -2364,6 +2441,12 @@ func (mb *msgBlock) firstMatching(filter string, wc bool, start uint64, sm *Stor } for seq := fseq; seq <= lseq; seq++ { + if mb.dmap.Exists(seq) { + // Optimisation to avoid calling cacheLookup which hits time.Now(). + // Instead we will update it only once in a defer. + updateLLTS = true + continue + } llseq := mb.llseq fsm, err := mb.cacheLookup(seq, sm) if err != nil { @@ -2372,6 +2455,7 @@ func (mb *msgBlock) firstMatching(filter string, wc bool, start uint64, sm *Stor } continue } + updateLLTS = false // cacheLookup already updated it. expireOk := seq == lseq && mb.llseq == seq if isAll { return fsm, expireOk, nil @@ -2876,6 +2960,7 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) // Now check if we need to inspect the seqStart block. // Grab write lock in case we need to load in msgs. mb.mu.Lock() + var updateLLTS bool var shouldExpire bool // We need to walk this block to correct accounting from above. if sseq > mb.first.seq { @@ -2889,10 +2974,16 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) } var smv StoreMsg for seq, lseq := atomic.LoadUint64(&mb.first.seq), atomic.LoadUint64(&mb.last.seq); seq <= lseq; seq++ { + if mb.dmap.Exists(seq) { + // Optimisation to avoid calling cacheLookup which hits time.Now(). + updateLLTS = true + continue + } sm, _ := mb.cacheLookup(seq, &smv) if sm == nil || sm.subj == _EMPTY_ || !lbm[sm.subj] { continue } + updateLLTS = false // cacheLookup already updated it. if isMatch(sm.subj) { // If less than sseq adjust off of total as long as this subject matched the last block. if seq < sseq { @@ -2913,6 +3004,9 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) if shouldExpire { mb.tryForceExpireCacheLocked() } + if updateLLTS { + mb.llts = time.Now().UnixNano() + } mb.mu.Unlock() return total, validThrough } @@ -3023,6 +3117,7 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) } // We need to scan this block. var shouldExpire bool + var updateLLTS bool mb.mu.Lock() // Check if we should include all of this block in adjusting. If so work with metadata. if sseq > atomic.LoadUint64(&mb.last.seq) { @@ -3055,10 +3150,16 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) // We need to walk all messages in this block var smv StoreMsg for seq := atomic.LoadUint64(&mb.first.seq); seq < last; seq++ { + if mb.dmap.Exists(seq) { + // Optimisation to avoid calling cacheLookup which hits time.Now(). + updateLLTS = true + continue + } sm, _ := mb.cacheLookup(seq, &smv) if sm == nil || sm.subj == _EMPTY_ { continue } + updateLLTS = false // cacheLookup already updated it. // Check if it matches our filter. if sm.seq < sseq && isMatch(sm.subj) { adjust++ @@ -3069,6 +3170,9 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) if shouldExpire { mb.tryForceExpireCacheLocked() } + if updateLLTS { + mb.llts = time.Now().UnixNano() + } mb.mu.Unlock() } // Make final adjustment. @@ -3109,7 +3213,7 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *Sublist, lastPerSubject bo // See if filter was provided but its the only subject. if !isAll && fs.psim.Size() == 1 { - fs.psim.Iter(func(subject []byte, _ *psi) bool { + fs.psim.IterFast(func(subject []byte, _ *psi) bool { isAll = sl.HasInterest(bytesToString(subject)) return true }) @@ -3166,6 +3270,7 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *Sublist, lastPerSubject bo // Grab write lock in case we need to load in msgs. mb.mu.Lock() var shouldExpire bool + var updateLLTS bool // We need to walk this block to correct accounting from above. if sseq > mb.first.seq { // Track the ones we add back in case more than one. @@ -3178,10 +3283,16 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *Sublist, lastPerSubject bo } var smv StoreMsg for seq, lseq := atomic.LoadUint64(&mb.first.seq), atomic.LoadUint64(&mb.last.seq); seq <= lseq; seq++ { + if mb.dmap.Exists(seq) { + // Optimisation to avoid calling cacheLookup which hits time.Now(). + updateLLTS = true + continue + } sm, _ := mb.cacheLookup(seq, &smv) if sm == nil || sm.subj == _EMPTY_ || !lbm[sm.subj] { continue } + updateLLTS = false // cacheLookup already updated it. if isMatch(sm.subj) { // If less than sseq adjust off of total as long as this subject matched the last block. if seq < sseq { @@ -3202,6 +3313,9 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *Sublist, lastPerSubject bo if shouldExpire { mb.tryForceExpireCacheLocked() } + if updateLLTS { + mb.llts = time.Now().UnixNano() + } mb.mu.Unlock() return total, validThrough } @@ -3229,6 +3343,7 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *Sublist, lastPerSubject bo var t uint64 var havePartial bool + var updateLLTS bool IntersectStree[SimpleState](mb.fss, sl, func(bsubj []byte, ss *SimpleState) { subj := bytesToString(bsubj) if havePartial { @@ -3261,8 +3376,14 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *Sublist, lastPerSubject bo } var smv StoreMsg for seq, lseq := start, atomic.LoadUint64(&mb.last.seq); seq <= lseq; seq++ { + if mb.dmap.Exists(seq) { + // Optimisation to avoid calling cacheLookup which hits time.Now(). + updateLLTS = true + continue + } if sm, _ := mb.cacheLookup(seq, &smv); sm != nil && isMatch(sm.subj) { t++ + updateLLTS = false // cacheLookup already updated it. } } } @@ -3270,6 +3391,9 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *Sublist, lastPerSubject bo if shouldExpire { mb.tryForceExpireCacheLocked() } + if updateLLTS { + mb.llts = time.Now().UnixNano() + } mb.mu.Unlock() total += t } @@ -3314,6 +3438,7 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *Sublist, lastPerSubject bo } // We need to scan this block. var shouldExpire bool + var updateLLTS bool mb.mu.Lock() // Check if we should include all of this block in adjusting. If so work with metadata. if sseq > atomic.LoadUint64(&mb.last.seq) { @@ -3345,10 +3470,16 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *Sublist, lastPerSubject bo // We need to walk all messages in this block var smv StoreMsg for seq := atomic.LoadUint64(&mb.first.seq); seq < last; seq++ { + if mb.dmap.Exists(seq) { + // Optimisation to avoid calling cacheLookup which hits time.Now(). + updateLLTS = true + continue + } sm, _ := mb.cacheLookup(seq, &smv) if sm == nil || sm.subj == _EMPTY_ { continue } + updateLLTS = false // cacheLookup already updated it. // Check if it matches our filter. if sm.seq < sseq && isMatch(sm.subj) { adjust++ @@ -3359,6 +3490,9 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *Sublist, lastPerSubject bo if shouldExpire { mb.tryForceExpireCacheLocked() } + if updateLLTS { + mb.llts = time.Now().UnixNano() + } mb.mu.Unlock() } // Make final adjustment. @@ -3992,7 +4126,7 @@ func (fs *fileStore) enforceMsgPerSubjectLimit(fireCallback bool) { // collect all that are not correct. needAttention := make(map[string]*psi) - fs.psim.Iter(func(subj []byte, psi *psi) bool { + fs.psim.IterFast(func(subj []byte, psi *psi) bool { numMsgs += psi.total if psi.total > maxMsgsPer { needAttention[string(subj)] = psi @@ -4017,7 +4151,7 @@ func (fs *fileStore) enforceMsgPerSubjectLimit(fireCallback bool) { fs.rebuildStateLocked(nil) // Need to redo blocks that need attention. needAttention = make(map[string]*psi) - fs.psim.Iter(func(subj []byte, psi *psi) bool { + fs.psim.IterFast(func(subj []byte, psi *psi) bool { if psi.total > maxMsgsPer { needAttention[string(subj)] = psi } @@ -5174,6 +5308,7 @@ func (mb *msgBlock) writeMsgRecord(rl, seq uint64, subj string, mhdr, msg []byte if ss, ok := mb.fss.Find(stringToBytes(subj)); ok && ss != nil { ss.Msgs++ ss.Last = seq + ss.lastNeedsUpdate = false } else { mb.fss.Insert(stringToBytes(subj), SimpleState{Msgs: 1, First: seq, Last: seq}) } @@ -5188,9 +5323,7 @@ func (mb *msgBlock) writeMsgRecord(rl, seq uint64, subj string, mhdr, msg []byte // With headers, high bit on total length will be set. // total_len(4) sequence(8) timestamp(8) subj_len(2) subj hdr_len(4) hdr msg hash(8) - // First write header, etc. var le = binary.LittleEndian - var hdr [msgHdrSize]byte l := uint32(rl) hasHeaders := len(mhdr) > 0 @@ -5198,13 +5331,15 @@ func (mb *msgBlock) writeMsgRecord(rl, seq uint64, subj string, mhdr, msg []byte l |= hbit } + // Reserve space for the header on the underlying buffer. + mb.cache.buf = append(mb.cache.buf, make([]byte, msgHdrSize)...) + hdr := mb.cache.buf[len(mb.cache.buf)-msgHdrSize : len(mb.cache.buf)] le.PutUint32(hdr[0:], l) le.PutUint64(hdr[4:], seq) le.PutUint64(hdr[12:], uint64(ts)) le.PutUint16(hdr[20:], uint16(len(subj))) // Now write to underlying buffer. - mb.cache.buf = append(mb.cache.buf, hdr[:]...) mb.cache.buf = append(mb.cache.buf, subj...) if hasHeaders { @@ -5218,13 +5353,12 @@ func (mb *msgBlock) writeMsgRecord(rl, seq uint64, subj string, mhdr, msg []byte // Calculate hash. mb.hh.Reset() mb.hh.Write(hdr[4:20]) - mb.hh.Write([]byte(subj)) + mb.hh.Write(stringToBytes(subj)) if hasHeaders { mb.hh.Write(mhdr) } mb.hh.Write(msg) - checksum := mb.hh.Sum(nil) - // Grab last checksum + checksum := mb.hh.Sum(mb.lchk[:0:highwayhash.Size64]) copy(mb.lchk[0:], checksum) // Update write through cache. @@ -5896,6 +6030,7 @@ func (mb *msgBlock) indexCacheBuf(buf []byte) error { if ss, ok := mb.fss.Find(bsubj); ok && ss != nil { ss.Msgs++ ss.Last = seq + ss.lastNeedsUpdate = false } else { mb.fss.Insert(bsubj, SimpleState{ Msgs: 1, @@ -6763,6 +6898,57 @@ func (fs *fileStore) LoadNextMsg(filter string, wc bool, start uint64, sm *Store return nil, fs.state.LastSeq, ErrStoreEOF } +// Will load the next non-deleted msg starting at the start sequence and walking backwards. +func (fs *fileStore) LoadPrevMsg(start uint64, smp *StoreMsg) (sm *StoreMsg, err error) { + fs.mu.RLock() + defer fs.mu.RUnlock() + + if fs.closed { + return nil, ErrStoreClosed + } + if fs.state.Msgs == 0 || start < fs.state.FirstSeq { + return nil, ErrStoreEOF + } + + if start > fs.state.LastSeq { + start = fs.state.LastSeq + } + if smp == nil { + smp = new(StoreMsg) + } + + if bi, _ := fs.selectMsgBlockWithIndex(start); bi >= 0 { + for i := bi; i >= 0; i-- { + mb := fs.blks[i] + mb.mu.Lock() + // Need messages loaded from here on out. + if mb.cacheNotLoaded() { + if err := mb.loadMsgsWithLock(); err != nil { + mb.mu.Unlock() + return nil, err + } + } + + lseq, fseq := atomic.LoadUint64(&mb.last.seq), atomic.LoadUint64(&mb.first.seq) + if start > lseq { + start = lseq + } + for seq := start; seq >= fseq; seq-- { + if mb.dmap.Exists(seq) { + continue + } + if sm, err := mb.cacheLookup(seq, smp); err == nil { + mb.mu.Unlock() + return sm, nil + } + } + mb.mu.Unlock() + } + } + + return nil, ErrStoreEOF +} + // Type returns the type of the underlying store. func (fs *fileStore) Type() StorageType { return FileStorage @@ -6831,11 +7017,7 @@ func (fs *fileStore) State() StreamState { } // Add in deleted. mb.dmap.Range(func(seq uint64) bool { - if seq < fseq { - mb.dmap.Delete(seq) - } else { - state.Deleted = append(state.Deleted, seq) - } + state.Deleted = append(state.Deleted, seq) return true }) mb.mu.Unlock() @@ -7353,7 +7535,7 @@ func (fs *fileStore) Compact(seq uint64) (uint64, error) { bytes += mb.bytes // Make sure we do subject cleanup as well. mb.ensurePerSubjectInfoLoaded() - mb.fss.Iter(func(bsubj []byte, ss *SimpleState) bool { + mb.fss.IterOrdered(func(bsubj []byte, ss *SimpleState) bool { subj := bytesToString(bsubj) for i := uint64(0); i < ss.Msgs; i++ { fs.removePerSubject(subj) @@ -7387,7 +7569,7 @@ func (fs *fileStore) Compact(seq uint64) (uint64, error) { if err == errDeletedMsg { // Update dmap. if !smb.dmap.IsEmpty() { - smb.dmap.Delete(seq) + smb.dmap.Delete(mseq) } } else if sm != nil { sz := fileStoreMsgSize(sm.subj, sm.hdr, sm.msg) @@ -7876,8 +8058,11 @@ func (mb *msgBlock) recalculateForSubj(subj string, ss *SimpleState) { } if startSlot >= len(mb.cache.idx) { ss.First = ss.Last + ss.firstNeedsUpdate = false + ss.lastNeedsUpdate = false return } + endSlot := int(ss.Last - mb.cache.fseq) if endSlot < 0 { endSlot = 0 @@ -7904,6 +8089,8 @@ func (mb *msgBlock) recalculateForSubj(subj string, ss *SimpleState) { li := int(bi) - mb.cache.off if li >= len(mb.cache.buf) { ss.First = ss.Last + // Only need to reset ss.lastNeedsUpdate, ss.firstNeedsUpdate is already reset above. + ss.lastNeedsUpdate = false return } buf := mb.cache.buf[li:] @@ -8007,6 +8194,11 @@ func (mb *msgBlock) generatePerSubjectInfo() error { var smv StoreMsg fseq, lseq := atomic.LoadUint64(&mb.first.seq), atomic.LoadUint64(&mb.last.seq) for seq := fseq; seq <= lseq; seq++ { + if mb.dmap.Exists(seq) { + // Optimisation to avoid calling cacheLookup which hits time.Now(). + // It gets set later on if the fss is non-empty anyway. + continue + } sm, err := mb.cacheLookup(seq, &smv) if err != nil { // Since we are walking by sequence we can ignore some errors that are benign to rebuilding our state. @@ -8022,6 +8214,7 @@ func (mb *msgBlock) generatePerSubjectInfo() error { if ss, ok := mb.fss.Find(stringToBytes(sm.subj)); ok && ss != nil { ss.Msgs++ ss.Last = seq + ss.lastNeedsUpdate = false } else { mb.fss.Insert(stringToBytes(sm.subj), SimpleState{Msgs: 1, First: seq, Last: seq}) } @@ -8066,7 +8259,7 @@ func (fs *fileStore) populateGlobalPerSubjectInfo(mb *msgBlock) { } // Now populate psim. - mb.fss.Iter(func(bsubj []byte, ss *SimpleState) bool { + mb.fss.IterFast(func(bsubj []byte, ss *SimpleState) bool { if len(bsubj) > 0 { if info, ok := fs.psim.Find(bsubj); ok { info.total += ss.Msgs @@ -9482,8 +9675,15 @@ var dios chan struct{} // Used to setup our simplistic counting semaphore using buffered channels. // golang.org's semaphore seemed a bit heavy. func init() { - // Limit ourselves to a max of 4 blocking IO calls. - const nIO = 4 + // Limit ourselves to a sensible number of blocking I/O calls. Range between + // 4-16 concurrent disk I/Os based on CPU cores, or 50% of cores if greater + // than 32 cores. + mp := runtime.GOMAXPROCS(-1) + nIO := min(16, max(4, mp)) + if mp > 32 { + // If the system has more than 32 cores then limit dios to 50% of cores. + nIO = max(16, min(mp, mp/2)) + } dios = make(chan struct{}, nIO) // Fill it up to start. for i := 0; i < nIO; i++ { diff --git a/vendor/github.com/nats-io/nats-server/v2/server/fuzz.go b/vendor/github.com/nats-io/nats-server/v2/server/fuzz.go index ad378eb793..88f30350e9 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/fuzz.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/fuzz.go @@ -1,4 +1,4 @@ -// Copyright 2020 The NATS Authors +// Copyright 2020-2022 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/gateway.go b/vendor/github.com/nats-io/nats-server/v2/server/gateway.go index 46dd7260ec..22f0e417bd 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/gateway.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/gateway.go @@ -1,4 +1,4 @@ -// Copyright 2018-2023 The NATS Authors +// Copyright 2018-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -2499,8 +2499,13 @@ var subPool = &sync.Pool{ // that the message is not sent to a given gateway if for instance // it is known that this gateway has no interest in the account or // subject, etc.. +// When invoked from a LEAF connection, `checkLeafQF` should be passed as `true` +// so that we skip any queue subscription interest that is not part of the +// `c.pa.queues` filter (similar to what we do in `processMsgResults`). However, +// when processing service imports, then this boolean should be passes as `false`, +// regardless if it is a LEAF connection or not. // -func (c *client) sendMsgToGateways(acc *Account, msg, subject, reply []byte, qgroups [][]byte) bool { +func (c *client) sendMsgToGateways(acc *Account, msg, subject, reply []byte, qgroups [][]byte, checkLeafQF bool) bool { // We had some times when we were sending across a GW with no subject, and the other side would break // due to parser error. These need to be fixed upstream but also double check here. if len(subject) == 0 { @@ -2577,6 +2582,21 @@ func (c *client) sendMsgToGateways(acc *Account, msg, subject, reply []byte, qgr qsubs := qr.qsubs[i] if len(qsubs) > 0 { queue := qsubs[0].queue + if checkLeafQF { + // Skip any queue that is not in the leaf's queue filter. + skip := true + for _, qn := range c.pa.queues { + if bytes.Equal(queue, qn) { + skip = false + break + } + } + if skip { + continue + } + // Now we still need to check that it was not delivered + // locally by checking the given `qgroups`. + } add := true for _, qn := range qgroups { if bytes.Equal(queue, qn) { @@ -2969,7 +2989,7 @@ func (c *client) handleGatewayReply(msg []byte) (processed bool) { // we now need to send the message with the real subject to // gateways in case they have interest on that reply subject. if !isServiceReply { - c.sendMsgToGateways(acc, msg, c.pa.subject, c.pa.reply, queues) + c.sendMsgToGateways(acc, msg, c.pa.subject, c.pa.reply, queues, false) } } else if c.kind == GATEWAY { // Only if we are a gateway connection should we try to route diff --git a/vendor/github.com/nats-io/nats-server/v2/server/gsl/gsl.go b/vendor/github.com/nats-io/nats-server/v2/server/gsl/gsl.go new file mode 100644 index 0000000000..377fabe271 --- /dev/null +++ b/vendor/github.com/nats-io/nats-server/v2/server/gsl/gsl.go @@ -0,0 +1,532 @@ +// Copyright 2025 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gsl + +import ( + "errors" + "sync" + + "github.com/nats-io/nats-server/v2/server/stree" +) + +// Sublist is a routing mechanism to handle subject distribution and +// provides a facility to match subjects from published messages to +// interested subscribers. Subscribers can have wildcard subjects to +// match multiple published subjects. + +// Common byte variables for wildcards and token separator. +const ( + pwc = '*' + pwcs = "*" + fwc = '>' + fwcs = ">" + tsep = "." + btsep = '.' + _EMPTY_ = "" +) + +// Sublist related errors +var ( + ErrInvalidSubject = errors.New("gsl: invalid subject") + ErrNotFound = errors.New("gsl: no matches found") + ErrNilChan = errors.New("gsl: nil channel") + ErrAlreadyRegistered = errors.New("gsl: notification already registered") +) + +// A GenericSublist stores and efficiently retrieves subscriptions. +type GenericSublist[T comparable] struct { + sync.RWMutex + root *level[T] + count uint32 +} + +// A node contains subscriptions and a pointer to the next level. +type node[T comparable] struct { + next *level[T] + subs map[T]string // value -> subject +} + +// A level represents a group of nodes and special pointers to +// wildcard nodes. +type level[T comparable] struct { + nodes map[string]*node[T] + pwc, fwc *node[T] +} + +// Create a new default node. +func newNode[T comparable]() *node[T] { + return &node[T]{subs: make(map[T]string)} +} + +// Create a new default level. +func newLevel[T comparable]() *level[T] { + return &level[T]{nodes: make(map[string]*node[T])} +} + +// NewSublist will create a default sublist with caching enabled per the flag. +func NewSublist[T comparable]() *GenericSublist[T] { + return &GenericSublist[T]{root: newLevel[T]()} +} + +// Insert adds a subscription into the sublist +func (s *GenericSublist[T]) Insert(subject string, value T) error { + tsa := [32]string{} + tokens := tsa[:0] + start := 0 + for i := 0; i < len(subject); i++ { + if subject[i] == btsep { + tokens = append(tokens, subject[start:i]) + start = i + 1 + } + } + tokens = append(tokens, subject[start:]) + + s.Lock() + + var sfwc bool + var n *node[T] + l := s.root + + for _, t := range tokens { + lt := len(t) + if lt == 0 || sfwc { + s.Unlock() + return ErrInvalidSubject + } + + if lt > 1 { + n = l.nodes[t] + } else { + switch t[0] { + case pwc: + n = l.pwc + case fwc: + n = l.fwc + sfwc = true + default: + n = l.nodes[t] + } + } + if n == nil { + n = newNode[T]() + if lt > 1 { + l.nodes[t] = n + } else { + switch t[0] { + case pwc: + l.pwc = n + case fwc: + l.fwc = n + default: + l.nodes[t] = n + } + } + } + if n.next == nil { + n.next = newLevel[T]() + } + l = n.next + } + + n.subs[value] = subject + + s.count++ + s.Unlock() + + return nil +} + +// Match will match all entries to the literal subject. +// It will return a set of results for both normal and queue subscribers. +func (s *GenericSublist[T]) Match(subject string, cb func(T)) { + s.match(subject, cb, true) +} + +// MatchBytes will match all entries to the literal subject. +// It will return a set of results for both normal and queue subscribers. +func (s *GenericSublist[T]) MatchBytes(subject []byte, cb func(T)) { + s.match(string(subject), cb, true) +} + +// HasInterest will return whether or not there is any interest in the subject. +// In cases where more detail is not required, this may be faster than Match. +func (s *GenericSublist[T]) HasInterest(subject string) bool { + return s.hasInterest(subject, true, nil) +} + +// NumInterest will return the number of subs interested in the subject. +// In cases where more detail is not required, this may be faster than Match. +func (s *GenericSublist[T]) NumInterest(subject string) (np int) { + s.hasInterest(subject, true, &np) + return +} + +func (s *GenericSublist[T]) match(subject string, cb func(T), doLock bool) { + tsa := [32]string{} + tokens := tsa[:0] + start := 0 + for i := 0; i < len(subject); i++ { + if subject[i] == btsep { + if i-start == 0 { + return + } + tokens = append(tokens, subject[start:i]) + start = i + 1 + } + } + if start >= len(subject) { + return + } + tokens = append(tokens, subject[start:]) + + if doLock { + s.RLock() + defer s.RUnlock() + } + matchLevel(s.root, tokens, cb) +} + +func (s *GenericSublist[T]) hasInterest(subject string, doLock bool, np *int) bool { + tsa := [32]string{} + tokens := tsa[:0] + start := 0 + for i := 0; i < len(subject); i++ { + if subject[i] == btsep { + if i-start == 0 { + return false + } + tokens = append(tokens, subject[start:i]) + start = i + 1 + } + } + if start >= len(subject) { + return false + } + tokens = append(tokens, subject[start:]) + + if doLock { + s.RLock() + defer s.RUnlock() + } + return matchLevelForAny(s.root, tokens, np) +} + +func matchLevelForAny[T comparable](l *level[T], toks []string, np *int) bool { + var pwc, n *node[T] + for i, t := range toks { + if l == nil { + return false + } + if l.fwc != nil { + if np != nil { + *np += len(l.fwc.subs) + } + return true + } + if pwc = l.pwc; pwc != nil { + if match := matchLevelForAny(pwc.next, toks[i+1:], np); match { + return true + } + } + n = l.nodes[t] + if n != nil { + l = n.next + } else { + l = nil + } + } + if n != nil { + if np != nil { + *np += len(n.subs) + } + return len(n.subs) > 0 + } + if pwc != nil { + if np != nil { + *np += len(pwc.subs) + } + return len(pwc.subs) > 0 + } + return false +} + +// callbacksForResults will make the necessary callbacks for each +// result in this node. +func callbacksForResults[T comparable](n *node[T], cb func(T)) { + for sub := range n.subs { + cb(sub) + } +} + +// matchLevel is used to recursively descend into the trie. +func matchLevel[T comparable](l *level[T], toks []string, cb func(T)) { + var pwc, n *node[T] + for i, t := range toks { + if l == nil { + return + } + if l.fwc != nil { + callbacksForResults(l.fwc, cb) + } + if pwc = l.pwc; pwc != nil { + matchLevel(pwc.next, toks[i+1:], cb) + } + n = l.nodes[t] + if n != nil { + l = n.next + } else { + l = nil + } + } + if n != nil { + callbacksForResults(n, cb) + } + if pwc != nil { + callbacksForResults(pwc, cb) + } +} + +// lnt is used to track descent into levels for a removal for pruning. +type lnt[T comparable] struct { + l *level[T] + n *node[T] + t string +} + +// Raw low level remove, can do batches with lock held outside. +func (s *GenericSublist[T]) remove(subject string, value T, shouldLock bool) error { + tsa := [32]string{} + tokens := tsa[:0] + start := 0 + for i := 0; i < len(subject); i++ { + if subject[i] == btsep { + tokens = append(tokens, subject[start:i]) + start = i + 1 + } + } + tokens = append(tokens, subject[start:]) + + if shouldLock { + s.Lock() + defer s.Unlock() + } + + var sfwc bool + var n *node[T] + l := s.root + + // Track levels for pruning + var lnts [32]lnt[T] + levels := lnts[:0] + + for _, t := range tokens { + lt := len(t) + if lt == 0 || sfwc { + return ErrInvalidSubject + } + if l == nil { + return ErrNotFound + } + if lt > 1 { + n = l.nodes[t] + } else { + switch t[0] { + case pwc: + n = l.pwc + case fwc: + n = l.fwc + sfwc = true + default: + n = l.nodes[t] + } + } + if n != nil { + levels = append(levels, lnt[T]{l, n, t}) + l = n.next + } else { + l = nil + } + } + + if !s.removeFromNode(n, value) { + return ErrNotFound + } + + s.count-- + + for i := len(levels) - 1; i >= 0; i-- { + l, n, t := levels[i].l, levels[i].n, levels[i].t + if n.isEmpty() { + l.pruneNode(n, t) + } + } + + return nil +} + +// Remove will remove a subscription. +func (s *GenericSublist[T]) Remove(subject string, value T) error { + return s.remove(subject, value, true) +} + +// pruneNode is used to prune an empty node from the tree. +func (l *level[T]) pruneNode(n *node[T], t string) { + if n == nil { + return + } + if n == l.fwc { + l.fwc = nil + } else if n == l.pwc { + l.pwc = nil + } else { + delete(l.nodes, t) + } +} + +// isEmpty will test if the node has any entries. Used +// in pruning. +func (n *node[T]) isEmpty() bool { + return len(n.subs) == 0 && (n.next == nil || n.next.numNodes() == 0) +} + +// Return the number of nodes for the given level. +func (l *level[T]) numNodes() int { + num := len(l.nodes) + if l.pwc != nil { + num++ + } + if l.fwc != nil { + num++ + } + return num +} + +// Remove the sub for the given node. +func (s *GenericSublist[T]) removeFromNode(n *node[T], value T) (found bool) { + if n == nil { + return false + } + if _, found = n.subs[value]; found { + delete(n.subs, value) + } + return found +} + +// Count returns the number of subscriptions. +func (s *GenericSublist[T]) Count() uint32 { + s.RLock() + defer s.RUnlock() + return s.count +} + +// numLevels will return the maximum number of levels +// contained in the Sublist tree. +func (s *GenericSublist[T]) numLevels() int { + return visitLevel(s.root, 0) +} + +// visitLevel is used to descend the Sublist tree structure +// recursively. +func visitLevel[T comparable](l *level[T], depth int) int { + if l == nil || l.numNodes() == 0 { + return depth + } + + depth++ + maxDepth := depth + + for _, n := range l.nodes { + if n == nil { + continue + } + newDepth := visitLevel(n.next, depth) + if newDepth > maxDepth { + maxDepth = newDepth + } + } + if l.pwc != nil { + pwcDepth := visitLevel(l.pwc.next, depth) + if pwcDepth > maxDepth { + maxDepth = pwcDepth + } + } + if l.fwc != nil { + fwcDepth := visitLevel(l.fwc.next, depth) + if fwcDepth > maxDepth { + maxDepth = fwcDepth + } + } + return maxDepth +} + +// IntersectStree will match all items in the given subject tree that +// have interest expressed in the given sublist. The callback will only be called +// once for each subject, regardless of overlapping subscriptions in the sublist. +func IntersectStree[T1 any, T2 comparable](st *stree.SubjectTree[T1], sl *GenericSublist[T2], cb func(subj []byte, entry *T1)) { + var _subj [255]byte + intersectStree(st, sl.root, _subj[:0], cb) +} + +func intersectStree[T1 any, T2 comparable](st *stree.SubjectTree[T1], r *level[T2], subj []byte, cb func(subj []byte, entry *T1)) { + if r.numNodes() == 0 { + // For wildcards we can't avoid Match, but if it's a literal subject at + // this point, using Find is considerably cheaper. + if subjectHasWildcard(string(subj)) { + st.Match(subj, cb) + } else if e, ok := st.Find(subj); ok { + cb(subj, e) + } + return + } + nsubj := subj + if len(nsubj) > 0 { + nsubj = append(subj, '.') + } + switch { + case r.fwc != nil: + // We've reached a full wildcard, do a FWC match on the stree at this point + // and don't keep iterating downward. + nsubj := append(nsubj, '>') + st.Match(nsubj, cb) + case r.pwc != nil: + // We've found a partial wildcard. We'll keep iterating downwards, but first + // check whether there's interest at this level (without triggering dupes) and + // match if so. + nsubj := append(nsubj, '*') + if len(r.pwc.subs) > 0 && r.pwc.next != nil && r.pwc.next.numNodes() > 0 { + st.Match(nsubj, cb) + } + intersectStree(st, r.pwc.next, nsubj, cb) + case r.numNodes() > 0: + // Normal node with subject literals, keep iterating. + for t, n := range r.nodes { + nsubj := append(nsubj, t...) + intersectStree(st, n.next, nsubj, cb) + } + } +} + +// Determine if a subject has any wildcard tokens. +func subjectHasWildcard(subject string) bool { + // This one exits earlier then !subjectIsLiteral(subject) + for i, c := range subject { + if c == pwc || c == fwc { + if (i == 0 || subject[i-1] == btsep) && + (i+1 == len(subject) || subject[i+1] == btsep) { + return true + } + } + } + return false +} diff --git a/vendor/github.com/nats-io/nats-server/v2/server/ipqueue.go b/vendor/github.com/nats-io/nats-server/v2/server/ipqueue.go index 95bf27457e..b362631b56 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/ipqueue.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/ipqueue.go @@ -1,4 +1,4 @@ -// Copyright 2021-2023 The NATS Authors +// Copyright 2021-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/jetstream.go b/vendor/github.com/nats-io/nats-server/v2/server/jetstream.go index c1e709a19e..ceb14663db 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/jetstream.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/jetstream.go @@ -1,4 +1,4 @@ -// Copyright 2019-2024 The NATS Authors +// Copyright 2019-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -1501,12 +1501,14 @@ func (a *Account) filteredStreams(filter string) []*stream { var msets []*stream for _, mset := range jsa.streams { if filter != _EMPTY_ { + mset.cfgMu.RLock() for _, subj := range mset.cfg.Subjects { if SubjectsCollide(filter, subj) { msets = append(msets, mset) break } } + mset.cfgMu.RUnlock() } else { msets = append(msets, mset) } @@ -2147,14 +2149,11 @@ func (jsa *jsAccount) selectLimits(replicas int) (JetStreamAccountLimits, string } // Lock should be held. -func (jsa *jsAccount) countStreams(tier string, cfg *StreamConfig) int { - streams := len(jsa.streams) - if tier != _EMPTY_ { - streams = 0 - for _, sa := range jsa.streams { - if isSameTier(&sa.cfg, cfg) { - streams++ - } +func (jsa *jsAccount) countStreams(tier string, cfg *StreamConfig) (streams int) { + for _, sa := range jsa.streams { + // Don't count the stream toward the limit if it already exists. + if (tier == _EMPTY_ || isSameTier(&sa.cfg, cfg)) && sa.cfg.Name != cfg.Name { + streams++ } } return streams diff --git a/vendor/github.com/nats-io/nats-server/v2/server/jetstream_api.go b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_api.go index e7fb21c1a1..4edc99bbd3 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/jetstream_api.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_api.go @@ -1,4 +1,4 @@ -// Copyright 2020-2023 The NATS Authors +// Copyright 2020-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -765,7 +765,7 @@ func (js *jetStream) apiDispatch(sub *subscription, c *client, acc *Account, sub s, rr := js.srv, js.apiSubs.Match(subject) hdr, msg := c.msgParts(rmsg) - if len(getHeader(ClientInfoHdr, hdr)) == 0 { + if len(sliceHeader(ClientInfoHdr, hdr)) == 0 { // Check if this is the system account. We will let these through for the account info only. sacc := s.SystemAccount() if sacc != acc { @@ -1008,7 +1008,7 @@ func (s *Server) getRequestInfo(c *client, raw []byte) (pci *ClientInfo, acc *Ac var ci ClientInfo if len(hdr) > 0 { - if err := json.Unmarshal(getHeader(ClientInfoHdr, hdr), &ci); err != nil { + if err := json.Unmarshal(sliceHeader(ClientInfoHdr, hdr), &ci); err != nil { return nil, nil, nil, nil, err } } @@ -1873,13 +1873,14 @@ func (s *Server) jsStreamInfoRequest(sub *subscription, c *client, a *Account, s if cc.meta != nil { ourID = cc.meta.ID() } - // We have seen cases where rg or rg.node is nil at this point, - // so check explicitly on those conditions and bail if that is - // the case. - bail := rg == nil || rg.node == nil || !rg.isMember(ourID) + // We have seen cases where rg is nil at this point, + // so check explicitly and bail if that is the case. + bail := rg == nil || !rg.isMember(ourID) if !bail { // We know we are a member here, if this group is new and we are preferred allow us to answer. - bail = rg.Preferred != ourID || time.Since(rg.node.Created()) > lostQuorumIntervalDefault + // Also, we have seen cases where rg.node is nil at this point, + // so check explicitly and bail if that is the case. + bail = rg.Preferred != ourID || (rg.node != nil && time.Since(rg.node.Created()) > lostQuorumIntervalDefault) } js.mu.RUnlock() if bail { @@ -4271,7 +4272,7 @@ func (s *Server) jsConsumerInfoRequest(sub *subscription, c *client, _ *Account, // Since these could wait on the Raft group lock, don't do so under the JS lock. ourID := meta.ID() - groupLeader := meta.GroupLeader() + groupLeaderless := meta.Leaderless() groupCreated := meta.Created() js.mu.RLock() @@ -4289,7 +4290,7 @@ func (s *Server) jsConsumerInfoRequest(sub *subscription, c *client, _ *Account, // Also capture if we think there is no meta leader. var isLeaderLess bool if !isLeader { - isLeaderLess = groupLeader == _EMPTY_ && time.Since(groupCreated) > lostQuorumIntervalDefault + isLeaderLess = groupLeaderless && time.Since(groupCreated) > lostQuorumIntervalDefault } js.mu.RUnlock() @@ -4376,7 +4377,7 @@ func (s *Server) jsConsumerInfoRequest(sub *subscription, c *client, _ *Account, return } // If we are a member and we have a group leader or we had a previous leader consider bailing out. - if node.GroupLeader() != _EMPTY_ || node.HadPreviousLeader() { + if !node.Leaderless() || node.HadPreviousLeader() { if leaderNotPartOfGroup { resp.Error = NewJSConsumerOfflineError() s.sendDelayedAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp), nil) diff --git a/vendor/github.com/nats-io/nats-server/v2/server/jetstream_cluster.go b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_cluster.go index 57cec3873c..c1d7d255d9 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/jetstream_cluster.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_cluster.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 The NATS Authors +// Copyright 2020-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -142,6 +142,7 @@ type streamAssignment struct { responded bool recovering bool reassigning bool // i.e. due to placement issues, lack of resources, etc. + resetting bool // i.e. there was an error, and we're stopping and starting the stream err error } @@ -444,108 +445,113 @@ func (cc *jetStreamCluster) isStreamCurrent(account, stream string) bool { // isStreamHealthy will determine if the stream is up to date or very close. // For R1 it will make sure the stream is present on this server. -func (js *jetStream) isStreamHealthy(acc *Account, sa *streamAssignment) bool { +func (js *jetStream) isStreamHealthy(acc *Account, sa *streamAssignment) error { js.mu.RLock() s, cc := js.srv, js.cluster if cc == nil { // Non-clustered mode js.mu.RUnlock() - return true + return nil } - - // Pull the group out. - rg := sa.Group - if rg == nil { + if sa == nil || sa.Group == nil { js.mu.RUnlock() - return false + return errors.New("stream assignment or group missing") } - streamName := sa.Config.Name - node := rg.node + node := sa.Group.node js.mu.RUnlock() // First lookup stream and make sure its there. mset, err := acc.lookupStream(streamName) if err != nil { - return false + return errors.New("stream not found") } - // If R1 we are good. - if node == nil { - return true - } + switch { + case mset.cfg.Replicas <= 1: + return nil // No further checks for R=1 streams - // Here we are a replicated stream. - // First make sure our monitor routine is running. - if !mset.isMonitorRunning() { - return false - } + case node == nil: + return errors.New("group node missing") - if node.Healthy() { - // Check if we are processing a snapshot and are catching up. - if !mset.isCatchingUp() { - return true - } - } else { // node != nil - if node != mset.raftNode() { - s.Warnf("Detected stream cluster node skew '%s > %s'", acc.GetName(), streamName) - node.Delete() - mset.resetClusteredState(nil) - } + case node != mset.raftNode(): + s.Warnf("Detected stream cluster node skew '%s > %s'", acc.GetName(), streamName) + node.Delete() + mset.resetClusteredState(nil) + return errors.New("cluster node skew detected") + + case !mset.isMonitorRunning(): + return errors.New("monitor goroutine not running") + + case !node.Healthy(): + return errors.New("group node unhealthy") + + case mset.isCatchingUp(): + return errors.New("stream catching up") + + default: + return nil } - return false } // isConsumerHealthy will determine if the consumer is up to date. // For R1 it will make sure the consunmer is present on this server. -func (js *jetStream) isConsumerHealthy(mset *stream, consumer string, ca *consumerAssignment) bool { +func (js *jetStream) isConsumerHealthy(mset *stream, consumer string, ca *consumerAssignment) error { if mset == nil { - return false + return errors.New("stream missing") } - js.mu.RLock() - cc := js.cluster + s, cc := js.srv, js.cluster if cc == nil { // Non-clustered mode js.mu.RUnlock() - return true + return nil } - // These are required. if ca == nil || ca.Group == nil { js.mu.RUnlock() - return false + return errors.New("consumer assignment or group missing") } - s := js.srv - // Capture RAFT node from assignment. node := ca.Group.node js.mu.RUnlock() // Check if not running at all. o := mset.lookupConsumer(consumer) if o == nil { - return false + return errors.New("consumer not found") } - // Check RAFT node state. - if node == nil || node.Healthy() { - return true - } else if node != nil { - if node != o.raftNode() { - mset.mu.RLock() - accName, streamName := mset.acc.GetName(), mset.cfg.Name - mset.mu.RUnlock() - s.Warnf("Detected consumer cluster node skew '%s > %s > %s'", accName, streamName, consumer) - node.Delete() - o.deleteWithoutAdvisory() + rc, _ := o.replica() + switch { + case rc <= 1: + return nil // No further checks for R=1 consumers - // When we try to restart we nil out the node and reprocess the consumer assignment. - js.mu.Lock() - ca.Group.node = nil - js.mu.Unlock() - js.processConsumerAssignment(ca) - } + case node == nil: + return errors.New("group node missing") + + case node != o.raftNode(): + mset.mu.RLock() + accName, streamName := mset.acc.GetName(), mset.cfg.Name + mset.mu.RUnlock() + s.Warnf("Detected consumer cluster node skew '%s > %s > %s'", accName, streamName, consumer) + node.Delete() + o.deleteWithoutAdvisory() + + // When we try to restart we nil out the node and reprocess the consumer assignment. + js.mu.Lock() + ca.Group.node = nil + js.mu.Unlock() + js.processConsumerAssignment(ca) + return errors.New("cluster node skew detected") + + case !o.isMonitorRunning(): + return errors.New("monitor goroutine not running") + + case !node.Healthy(): + return errors.New("group node unhealthy") + + default: + return nil } - return false } // subjectsOverlap checks all existing stream assignments for the account cross-cluster for subject overlap @@ -819,7 +825,7 @@ func (js *jetStream) isLeaderless() bool { // If we don't have a leader. // Make sure we have been running for enough time. - if meta.GroupLeader() == _EMPTY_ && time.Since(meta.Created()) > lostQuorumIntervalDefault { + if meta.Leaderless() && time.Since(meta.Created()) > lostQuorumIntervalDefault { return true } return false @@ -851,7 +857,7 @@ func (js *jetStream) isGroupLeaderless(rg *raftGroup) bool { node := rg.node js.mu.RUnlock() // If we don't have a leader. - if node.GroupLeader() == _EMPTY_ { + if node.Leaderless() { // Threshold for jetstream startup. const startupThreshold = 10 * time.Second @@ -1067,7 +1073,7 @@ func (js *jetStream) checkForOrphans() { // We only want to cleanup any orphans if we know we are current with the meta-leader. meta := cc.meta - if meta == nil || meta.GroupLeader() == _EMPTY_ { + if meta == nil || meta.Leaderless() { js.mu.Unlock() s.Debugf("JetStream cluster skipping check for orphans, no meta-leader") return @@ -1366,7 +1372,7 @@ func (js *jetStream) monitorCluster() { // If we have a current leader or had one in the past we can cancel this here since the metaleader // will be in charge of all peer state changes. // For cold boot only. - if n.GroupLeader() != _EMPTY_ || n.HadPreviousLeader() { + if !n.Leaderless() || n.HadPreviousLeader() { lt.Stop() continue } @@ -1581,10 +1587,11 @@ func (js *jetStream) applyMetaSnapshot(buf []byte, ru *recoveryUpdates, isRecove } if osa := js.streamAssignment(sa.Client.serviceAccount(), sa.Config.Name); osa != nil { for _, ca := range osa.consumers { - if sa.consumers[ca.Name] == nil { + // Consumer was either removed, or recreated with a different raft group. + if nca := sa.consumers[ca.Name]; nca == nil { + caDel = append(caDel, ca) + } else if nca.Group != nil && ca.Group != nil && nca.Group.Name != ca.Group.Name { caDel = append(caDel, ca) - } else { - caAdd = append(caAdd, ca) } } } @@ -2503,7 +2510,8 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps ce.ReturnToPool() } else { // Our stream was closed out from underneath of us, simply return here. - if err == errStreamClosed { + if err == errStreamClosed || err == errCatchupStreamStopped || err == ErrServerNotRunning { + aq.recycle(&ces) return } s.Warnf("Error applying entries to '%s > %s': %v", accName, sa.Config.Name, err) @@ -2549,7 +2557,7 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps // Always cancel if this was running. stopDirectMonitoring() - } else if n.GroupLeader() != noLeader { + } else if !n.Leaderless() { js.setStreamAssignmentRecovering(sa) } @@ -2913,6 +2921,9 @@ func (mset *stream) resetClusteredState(err error) bool { } s.Warnf("Resetting stream cluster state for '%s > %s'", sa.Client.serviceAccount(), sa.Config.Name) + // Mark stream assignment as resetting, so we don't double-account reserved resources. + // But only if we're not also releasing the resources as part of the delete. + sa.resetting = !shouldDelete // Now wipe groups from assignments. sa.Group.node = nil var consumers []*consumerAssignment @@ -3146,7 +3157,7 @@ func (js *jetStream) applyStreamEntries(mset *stream, ce *CommittedEntry, isReco } } else if e.Type == EntrySnapshot { if mset == nil { - return nil + continue } // Everything operates on new replicated state. Will convert legacy snapshots to this for processing. @@ -3216,7 +3227,6 @@ func (js *jetStream) applyStreamEntries(mset *stream, ce *CommittedEntry, isReco mset.stop(true, false) } } - return nil } } return nil @@ -4046,7 +4056,7 @@ func (js *jetStream) processClusterDeleteStream(sa *streamAssignment, isMember, js.mu.RLock() s := js.srv node := sa.Group.node - hadLeader := node == nil || node.GroupLeader() != noLeader + hadLeader := node == nil || !node.Leaderless() offline := s.allPeersOffline(sa.Group) var isMetaLeader bool if cc := js.cluster; cc != nil { @@ -5034,7 +5044,6 @@ func (js *jetStream) applyConsumerEntries(o *consumer, ce *CommittedEntry, isLea o.stopWithFlags(true, false, false, false) } } - return nil } else if e.Type == EntryAddPeer { // Ignore for now. } else { @@ -5994,15 +6003,16 @@ func groupName(prefix string, peers []string, storage StorageType) string { return fmt.Sprintf("%s-R%d%s-%s", prefix, len(peers), storage.String()[:1], gns) } -// returns stream count for this tier as well as applicable reservation size (not including reservations for cfg) +// returns stream count for this tier as well as applicable reservation size (not including cfg) // jetStream read lock should be held func tieredStreamAndReservationCount(asa map[string]*streamAssignment, tier string, cfg *StreamConfig) (int, int64) { var numStreams int var reservation int64 for _, sa := range asa { - if tier == _EMPTY_ || isSameTier(sa.Config, cfg) { + // Don't count the stream toward the limit if it already exists. + if (tier == _EMPTY_ || isSameTier(sa.Config, cfg)) && sa.Config.Name != cfg.Name { numStreams++ - if sa.Config.MaxBytes > 0 && sa.Config.Storage == cfg.Storage && sa.Config.Name != cfg.Name { + if sa.Config.MaxBytes > 0 && sa.Config.Storage == cfg.Storage { // If tier is empty, all storage is flat and we should adjust for replicas. // Otherwise if tiered, storage replication already taken into consideration. if tier == _EMPTY_ && cfg.Replicas > 1 { @@ -6084,7 +6094,14 @@ func (js *jetStream) jsClusteredStreamLimitsCheck(acc *Account, cfg *StreamConfi numStreams, reservations := tieredStreamAndReservationCount(asa, tier, cfg) // Check for inflight proposals... if cc := js.cluster; cc != nil && cc.inflight != nil { - numStreams += len(cc.inflight[acc.Name]) + streams := cc.inflight[acc.Name] + numStreams += len(streams) + // If inflight contains the same stream, don't count toward exceeding maximum. + if cfg != nil { + if _, ok := streams[cfg.Name]; ok { + numStreams-- + } + } } if selectedLimits.MaxStreams > 0 && numStreams >= selectedLimits.MaxStreams { return NewJSMaximumStreamsLimitError() @@ -6192,7 +6209,7 @@ func (s *Server) jsClusteredStreamRequest(ci *ClientInfo, acc *Account, subject, // On success, add this as an inflight proposal so we can apply limits // on concurrent create requests while this stream assignment has // possibly not been processed yet. - if streams, ok := cc.inflight[acc.Name]; ok { + if streams, ok := cc.inflight[acc.Name]; ok && self == nil { streams[cfg.Name] = &inflightInfo{rg, syncSubject} } } @@ -7328,13 +7345,11 @@ func (s *Server) jsClusteredConsumerRequest(ci *ClientInfo, acc *Account, subjec // Don't count DIRECTS. total := 0 for cn, ca := range sa.consumers { - if action == ActionCreateOrUpdate { - // If the consumer name is specified and we think it already exists, then - // we're likely updating an existing consumer, so don't count it. Otherwise - // we will incorrectly return NewJSMaximumConsumersLimitError for an update. - if oname != _EMPTY_ && cn == oname && sa.consumers[oname] != nil { - continue - } + // If the consumer name is specified and we think it already exists, then + // we're likely updating an existing consumer, so don't count it. Otherwise + // we will incorrectly return NewJSMaximumConsumersLimitError for an update. + if oname != _EMPTY_ && cn == oname && sa.consumers[oname] != nil { + continue } if ca.Config != nil && !ca.Config.Direct { total++ @@ -8035,6 +8050,12 @@ func (mset *stream) processClusteredInboundMsg(subject, reply string, hdr, msg [ err := node.Propose(esm) if err == nil { mset.clseq++ + // If we are using the system account for NRG, add in the extra sent msgs and bytes to our account + // so that the end user / account owner has visibility. + if node.IsSystemAccount() && mset.acc != nil && r > 1 { + atomic.AddInt64(&mset.acc.outMsgs, int64(r-1)) + atomic.AddInt64(&mset.acc.outBytes, int64(len(esm)*(r-1))) + } } // Check to see if we are being overrun. @@ -8316,13 +8337,13 @@ RETRY: // the semaphore. releaseSyncOutSem() - if n.GroupLeader() == _EMPTY_ { + if n.Leaderless() { // Prevent us from spinning if we've installed a snapshot from a leader but there's no leader online. // We wait a bit to check if a leader has come online in the meantime, if so we can continue. var canContinue bool if numRetries == 0 { time.Sleep(startInterval) - canContinue = n.GroupLeader() != _EMPTY_ + canContinue = !n.Leaderless() } if !canContinue { return fmt.Errorf("%w for stream '%s > %s'", errCatchupAbortedNoLeader, mset.account(), mset.name()) diff --git a/vendor/github.com/nats-io/nats-server/v2/server/jetstream_events.go b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_events.go index 8c099c7ad8..f813efc483 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/jetstream_events.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_events.go @@ -1,4 +1,4 @@ -// Copyright 2020-2021 The NATS Authors +// Copyright 2020-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/leafnode.go b/vendor/github.com/nats-io/nats-server/v2/server/leafnode.go index 6cd4b3c02f..1ec4cc1849 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/leafnode.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/leafnode.go @@ -1,4 +1,4 @@ -// Copyright 2019-2024 The NATS Authors +// Copyright 2019-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -855,9 +855,18 @@ func (c *client) sendLeafConnect(clusterName string, headers bool) error { pkey, _ := kp.PublicKey() cinfo.Nkey = pkey cinfo.Sig = sig - } else if userInfo := c.leaf.remote.curURL.User; userInfo != nil { + } + // In addition, and this is to allow auth callout, set user/password or + // token if applicable. + if userInfo := c.leaf.remote.curURL.User; userInfo != nil { + // For backward compatibility, if only username is provided, set both + // Token and User, not just Token. cinfo.User = userInfo.Username() - cinfo.Pass, _ = userInfo.Password() + var ok bool + cinfo.Pass, ok = userInfo.Password() + if !ok { + cinfo.Token = cinfo.User + } } else if c.leaf.remote.username != _EMPTY_ { cinfo.User = c.leaf.remote.username cinfo.Pass = c.leaf.remote.password @@ -988,6 +997,7 @@ func (s *Server) createLeafNode(conn net.Conn, rURL *url.URL, remote *leafNodeCf c.Noticef("Leafnode connection created%s %s", remoteSuffix, c.opts.Name) var tlsFirst bool + var infoTimeout time.Duration if remote != nil { solicited = true remote.Lock() @@ -997,6 +1007,7 @@ func (s *Server) createLeafNode(conn net.Conn, rURL *url.URL, remote *leafNodeCf c.leaf.isSpoke = true } tlsFirst = remote.TLSHandshakeFirst + infoTimeout = remote.FirstInfoTimeout remote.Unlock() c.acc = acc } else { @@ -1054,7 +1065,7 @@ func (s *Server) createLeafNode(conn net.Conn, rURL *url.URL, remote *leafNodeCf } } // We need to wait for the info, but not for too long. - c.nc.SetReadDeadline(time.Now().Add(DEFAULT_LEAFNODE_INFO_WAIT)) + c.nc.SetReadDeadline(time.Now().Add(infoTimeout)) } // We will process the INFO from the readloop and finish by @@ -1725,6 +1736,7 @@ type leafConnectInfo struct { Sig string `json:"sig,omitempty"` User string `json:"user,omitempty"` Pass string `json:"pass,omitempty"` + Token string `json:"auth_token,omitempty"` ID string `json:"server_id,omitempty"` Domain string `json:"domain,omitempty"` Name string `json:"name,omitempty"` @@ -2771,7 +2783,7 @@ func (c *client) processInboundLeafMsg(msg []byte) { // Now deal with gateways if c.srv.gateway.enabled { - c.sendMsgToGateways(acc, msg, c.pa.subject, c.pa.reply, qnames) + c.sendMsgToGateways(acc, msg, c.pa.subject, c.pa.reply, qnames, true) } } @@ -2887,6 +2899,7 @@ func (c *client) leafNodeSolicitWSConnection(opts *Options, rURL *url.URL, remot compress := remote.Websocket.Compression // By default the server will mask outbound frames, but it can be disabled with this option. noMasking := remote.Websocket.NoMasking + infoTimeout := remote.FirstInfoTimeout remote.RUnlock() // Will do the client-side TLS handshake if needed. tlsRequired, err := c.leafClientHandshakeIfNeeded(remote, opts) @@ -2939,6 +2952,7 @@ func (c *client) leafNodeSolicitWSConnection(opts *Options, rURL *url.URL, remot if noMasking { req.Header.Add(wsNoMaskingHeader, wsNoMaskingValue) } + c.nc.SetDeadline(time.Now().Add(infoTimeout)) if err := req.Write(c.nc); err != nil { return nil, WriteError, err } @@ -2946,7 +2960,6 @@ func (c *client) leafNodeSolicitWSConnection(opts *Options, rURL *url.URL, remot var resp *http.Response br := bufio.NewReaderSize(c.nc, MAX_CONTROL_LINE_SIZE) - c.nc.SetReadDeadline(time.Now().Add(DEFAULT_LEAFNODE_INFO_WAIT)) resp, err = http.ReadResponse(br, req) if err == nil && (resp.StatusCode != 101 || diff --git a/vendor/github.com/nats-io/nats-server/v2/server/log.go b/vendor/github.com/nats-io/nats-server/v2/server/log.go index 6822265823..9a4b7ed4bb 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/log.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/log.go @@ -1,4 +1,4 @@ -// Copyright 2012-2020 The NATS Authors +// Copyright 2012-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/memstore.go b/vendor/github.com/nats-io/nats-server/v2/server/memstore.go index 350cfa388e..a72e1a4249 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/memstore.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/memstore.go @@ -1,4 +1,4 @@ -// Copyright 2019-2024 The NATS Authors +// Copyright 2019-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -92,7 +92,7 @@ func (ms *memStore) UpdateConfig(cfg *StreamConfig) error { // If the value is smaller, or was unset before, we need to enforce that. if ms.maxp > 0 && (maxp == 0 || ms.maxp < maxp) { lm := uint64(ms.maxp) - ms.fss.Iter(func(subj []byte, ss *SimpleState) bool { + ms.fss.IterFast(func(subj []byte, ss *SimpleState) bool { if ss.Msgs > lm { ms.enforcePerSubjectLimit(bytesToString(subj), ss) } @@ -196,6 +196,7 @@ func (ms *memStore) storeRawMsg(subj string, hdr, msg []byte, seq uint64, ts int if ss != nil { ss.Msgs++ ss.Last = seq + ss.lastNeedsUpdate = false // Check per subject limits. if ms.maxp > 0 && ss.Msgs > uint64(ms.maxp) { ms.enforcePerSubjectLimit(subj, ss) @@ -1012,6 +1013,8 @@ func (ms *memStore) Compact(seq uint64) (uint64, error) { ms.removeSeqPerSubject(sm.subj, seq) // Must delete message after updating per-subject info, to be consistent with file store. delete(ms.msgs, seq) + } else if !ms.dmap.IsEmpty() { + ms.dmap.Delete(seq) } } if purged > ms.state.Msgs { @@ -1032,9 +1035,10 @@ func (ms *memStore) Compact(seq uint64) (uint64, error) { ms.state.FirstSeq = seq ms.state.FirstTime = time.Time{} ms.state.LastSeq = seq - 1 - // Reset msgs and fss. + // Reset msgs, fss and dmap. ms.msgs = make(map[uint64]*StoreMsg) ms.fss = stree.NewSubjectTree[SimpleState]() + ms.dmap.Empty() } ms.mu.Unlock() @@ -1066,9 +1070,10 @@ func (ms *memStore) reset() error { // Update msgs and bytes. ms.state.Msgs = 0 ms.state.Bytes = 0 - // Reset msgs and fss. + // Reset msgs, fss and dmap. ms.msgs = make(map[uint64]*StoreMsg) ms.fss = stree.NewSubjectTree[SimpleState]() + ms.dmap.Empty() ms.mu.Unlock() @@ -1102,6 +1107,8 @@ func (ms *memStore) Truncate(seq uint64) error { ms.removeSeqPerSubject(sm.subj, i) // Must delete message after updating per-subject info, to be consistent with file store. delete(ms.msgs, i) + } else if !ms.dmap.IsEmpty() { + ms.dmap.Delete(i) } } // Reset last. @@ -1299,6 +1306,33 @@ func (ms *memStore) LoadNextMsg(filter string, wc bool, start uint64, smp *Store return nil, ms.state.LastSeq, ErrStoreEOF } +// Will load the next non-deleted msg starting at the start sequence and walking backwards. +func (ms *memStore) LoadPrevMsg(start uint64, smp *StoreMsg) (sm *StoreMsg, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() + + if ms.msgs == nil { + return nil, ErrStoreClosed + } + if ms.state.Msgs == 0 || start < ms.state.FirstSeq { + return nil, ErrStoreEOF + } + if start > ms.state.LastSeq { + start = ms.state.LastSeq + } + + for seq := start; seq >= ms.state.FirstSeq; seq-- { + if sm, ok := ms.msgs[seq]; ok { + if smp == nil { + smp = new(StoreMsg) + } + sm.copy(smp) + return smp, nil + } + } + return nil, ErrStoreEOF +} + // RemoveMsg will remove the message from this store. // Will return the number of bytes removed. func (ms *memStore) RemoveMsg(seq uint64) (bool, error) { diff --git a/vendor/github.com/nats-io/nats-server/v2/server/monitor.go b/vendor/github.com/nats-io/nats-server/v2/server/monitor.go index 77a6c1fe71..7de9703b03 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/monitor.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/monitor.go @@ -1,4 +1,4 @@ -// Copyright 2013-2024 The NATS Authors +// Copyright 2013-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -831,6 +831,7 @@ func (s *Server) Routez(routezOpts *RoutezOptions) (*Routez, error) { OutBytes: r.outBytes, NumSubs: uint32(len(r.subs)), Import: r.opts.Import, + Pending: int(r.out.pb), Export: r.opts.Export, RTT: r.getRTT().String(), Start: r.start, @@ -1122,20 +1123,16 @@ func (s *Server) HandleStacksz(w http.ResponseWriter, r *http.Request) { ResponseHandler(w, r, buf[:n]) } -type monitorIPQueue struct { +type IpqueueszStatusIPQ struct { Pending int `json:"pending"` InProgress int `json:"in_progress,omitempty"` } -func (s *Server) HandleIPQueuesz(w http.ResponseWriter, r *http.Request) { - all, err := decodeBool(w, r, "all") - if err != nil { - return - } - qfilter := r.URL.Query().Get("queues") - - queues := map[string]monitorIPQueue{} +type IpqueueszStatus map[string]IpqueueszStatusIPQ +func (s *Server) Ipqueuesz(opts *IpqueueszOptions) *IpqueueszStatus { + all, qfilter := opts.All, opts.Filter + queues := IpqueueszStatus{} s.ipQueues.Range(func(k, v any) bool { var pending, inProgress int name := k.(string) @@ -1152,9 +1149,23 @@ func (s *Server) HandleIPQueuesz(w http.ResponseWriter, r *http.Request) { } else if qfilter != _EMPTY_ && !strings.Contains(name, qfilter) { return true } - queues[name] = monitorIPQueue{Pending: pending, InProgress: inProgress} + queues[name] = IpqueueszStatusIPQ{Pending: pending, InProgress: inProgress} return true }) + return &queues +} + +func (s *Server) HandleIPQueuesz(w http.ResponseWriter, r *http.Request) { + all, err := decodeBool(w, r, "all") + if err != nil { + return + } + qfilter := r.URL.Query().Get("queues") + + queues := s.Ipqueuesz(&IpqueueszOptions{ + All: all, + Filter: qfilter, + }) b, _ := json.MarshalIndent(queues, "", " ") ResponseHandler(w, r, b) @@ -1858,6 +1869,14 @@ type GatewayzOptions struct { // AccountName will limit the list of accounts to that account name (makes Accounts implicit) AccountName string `json:"account_name"` + + // AccountSubscriptions indicates if subscriptions should be included in the results. + // Note: This is used only if `Accounts` or `AccountName` are specified. + AccountSubscriptions bool `json:"subscriptions"` + + // AccountSubscriptionsDetail indicates if subscription details should be included in the results. + // Note: This is used only if `Accounts` or `AccountName` are specified. + AccountSubscriptionsDetail bool `json:"subscriptions_detail"` } // Gatewayz represents detailed information on Gateways @@ -1880,12 +1899,14 @@ type RemoteGatewayz struct { // AccountGatewayz represents interest mode for this account type AccountGatewayz struct { - Name string `json:"name"` - InterestMode string `json:"interest_mode"` - NoInterestCount int `json:"no_interest_count,omitempty"` - InterestOnlyThreshold int `json:"interest_only_threshold,omitempty"` - TotalSubscriptions int `json:"num_subs,omitempty"` - NumQueueSubscriptions int `json:"num_queue_subs,omitempty"` + Name string `json:"name"` + InterestMode string `json:"interest_mode"` + NoInterestCount int `json:"no_interest_count,omitempty"` + InterestOnlyThreshold int `json:"interest_only_threshold,omitempty"` + TotalSubscriptions int `json:"num_subs,omitempty"` + NumQueueSubscriptions int `json:"num_queue_subs,omitempty"` + Subs []string `json:"subscriptions_list,omitempty"` + SubsDetail []SubDetail `json:"subscriptions_list_detail,omitempty"` } // Gatewayz returns a Gatewayz struct containing information about gateways. @@ -2011,14 +2032,14 @@ func createOutboundAccountsGatewayz(opts *GatewayzOptions, gw *gateway) []*Accou if !ok { return nil } - a := createAccountOutboundGatewayz(accName, ei) + a := createAccountOutboundGatewayz(opts, accName, ei) return []*AccountGatewayz{a} } accs := make([]*AccountGatewayz, 0, 4) gw.outsim.Range(func(k, v any) bool { name := k.(string) - a := createAccountOutboundGatewayz(name, v) + a := createAccountOutboundGatewayz(opts, name, v) accs = append(accs, a) return true }) @@ -2026,7 +2047,7 @@ func createOutboundAccountsGatewayz(opts *GatewayzOptions, gw *gateway) []*Accou } // Returns an AccountGatewayz for this gateway outbound connection -func createAccountOutboundGatewayz(name string, ei any) *AccountGatewayz { +func createAccountOutboundGatewayz(opts *GatewayzOptions, name string, ei any) *AccountGatewayz { a := &AccountGatewayz{ Name: name, InterestOnlyThreshold: gatewayMaxRUnsubBeforeSwitch, @@ -2038,6 +2059,23 @@ func createAccountOutboundGatewayz(name string, ei any) *AccountGatewayz { a.NoInterestCount = len(e.ni) a.NumQueueSubscriptions = e.qsubs a.TotalSubscriptions = int(e.sl.Count()) + if opts.AccountSubscriptions || opts.AccountSubscriptionsDetail { + var subsa [4096]*subscription + subs := subsa[:0] + e.sl.All(&subs) + if opts.AccountSubscriptions { + a.Subs = make([]string, 0, len(subs)) + } else { + a.SubsDetail = make([]SubDetail, 0, len(subs)) + } + for _, sub := range subs { + if opts.AccountSubscriptions { + a.Subs = append(a.Subs, string(sub.subject)) + } else { + a.SubsDetail = append(a.SubsDetail, newClientSubDetail(sub)) + } + } + } e.RUnlock() } else { a.InterestMode = Optimistic.String() @@ -2129,6 +2167,10 @@ func (s *Server) HandleGatewayz(w http.ResponseWriter, r *http.Request) { s.httpReqStats[GatewayzPath]++ s.mu.Unlock() + subs, subsDet, err := decodeSubs(w, r) + if err != nil { + return + } accs, err := decodeBool(w, r, "accs") if err != nil { return @@ -2140,9 +2182,11 @@ func (s *Server) HandleGatewayz(w http.ResponseWriter, r *http.Request) { } opts := &GatewayzOptions{ - Name: gwName, - Accounts: accs, - AccountName: accName, + Name: gwName, + Accounts: accs, + AccountName: accName, + AccountSubscriptions: subs, + AccountSubscriptionsDetail: subsDet, } gw, err := s.Gatewayz(opts) if err != nil { @@ -2282,7 +2326,7 @@ type AccountStatz struct { Accounts []*AccountStat `json:"account_statz"` } -// LeafzOptions are options passed to Leafz +// AccountStatzOptions are options passed to account stats requests. type AccountStatzOptions struct { Accounts []string `json:"accounts"` IncludeUnused bool `json:"include_unused"` @@ -2760,6 +2804,18 @@ type ProfilezOptions struct { Duration time.Duration `json:"duration,omitempty"` } +// IpqueueszOptions are options passed to Ipqueuesz +type IpqueueszOptions struct { + All bool `json:"all"` + Filter string `json:"filter"` +} + +// RaftzOptions are options passed to Raftz +type RaftzOptions struct { + AccountFilter string `json:"account"` + GroupFilter string `json:"group"` +} + // StreamDetail shows information about the stream state and its consumers. type StreamDetail struct { Name string `json:"name"` @@ -3676,27 +3732,27 @@ func (s *Server) healthz(opts *HealthzOptions) *HealthStatus { for stream, sa := range asa { // Make sure we can look up - if !js.isStreamHealthy(acc, sa) { + if err := js.isStreamHealthy(acc, sa); err != nil { if !details { health.Status = na - health.Error = fmt.Sprintf("JetStream stream '%s > %s' is not current", accName, stream) + health.Error = fmt.Sprintf("JetStream stream '%s > %s' is not current: %s", accName, stream, err) return health } health.Errors = append(health.Errors, HealthzError{ Type: HealthzErrorStream, Account: accName, Stream: stream, - Error: fmt.Sprintf("JetStream stream '%s > %s' is not current", accName, stream), + Error: fmt.Sprintf("JetStream stream '%s > %s' is not current: %s", accName, stream, err), }) continue } mset, _ := acc.lookupStream(stream) // Now check consumers. for consumer, ca := range sa.consumers { - if !js.isConsumerHealthy(mset, consumer, ca) { + if err := js.isConsumerHealthy(mset, consumer, ca); err != nil { if !details { health.Status = na - health.Error = fmt.Sprintf("JetStream consumer '%s > %s > %s' is not current", acc, stream, consumer) + health.Error = fmt.Sprintf("JetStream consumer '%s > %s > %s' is not current: %s", acc, stream, consumer, err) return health } health.Errors = append(health.Errors, HealthzError{ @@ -3704,7 +3760,7 @@ func (s *Server) healthz(opts *HealthzOptions) *HealthStatus { Account: accName, Stream: stream, Consumer: consumer, - Error: fmt.Sprintf("JetStream consumer '%s > %s > %s' is not current", acc, stream, consumer), + Error: fmt.Sprintf("JetStream consumer '%s > %s > %s' is not current: %s", acc, stream, consumer, err), }) } } @@ -3813,6 +3869,8 @@ type RaftzGroupPeer struct { LastSeen string `json:"last_seen,omitempty"` } +type RaftzStatus map[string]map[string]RaftzGroup + func (s *Server) HandleRaftz(w http.ResponseWriter, r *http.Request) { if s.raftNodes == nil { w.WriteHeader(404) @@ -3820,20 +3878,34 @@ func (s *Server) HandleRaftz(w http.ResponseWriter, r *http.Request) { return } - gfilter := r.URL.Query().Get("group") - afilter := r.URL.Query().Get("acc") + groups := s.Raftz(&RaftzOptions{ + AccountFilter: r.URL.Query().Get("acc"), + GroupFilter: r.URL.Query().Get("group"), + }) + + if groups == nil { + w.WriteHeader(404) + w.Write([]byte("No Raft nodes returned, check supplied filters")) + return + } + + b, _ := json.MarshalIndent(groups, "", " ") + ResponseHandler(w, r, b) +} + +func (s *Server) Raftz(opts *RaftzOptions) *RaftzStatus { + afilter, gfilter := opts.AccountFilter, opts.GroupFilter + if afilter == _EMPTY_ { if sys := s.SystemAccount(); sys != nil { afilter = sys.Name } else { - w.WriteHeader(404) - w.Write([]byte("System account not found, the server may be shutting down")) - return + return nil } } groups := map[string]RaftNode{} - infos := map[string]map[string]RaftzGroup{} // account -> group ID + infos := RaftzStatus{} // account -> group ID s.rnMu.RLock() if gfilter != _EMPTY_ { @@ -3859,12 +3931,6 @@ func (s *Server) HandleRaftz(w http.ResponseWriter, r *http.Request) { } s.rnMu.RUnlock() - if len(groups) == 0 { - w.WriteHeader(404) - w.Write([]byte("No Raft nodes found, does the specified account/group exist?")) - return - } - for name, rg := range groups { n, ok := rg.(*raft) if n == nil || !ok { @@ -3887,7 +3953,7 @@ func (s *Server) HandleRaftz(w http.ResponseWriter, r *http.Request) { Applied: n.applied, CatchingUp: n.catchup != nil, Leader: n.leader, - EverHadLeader: n.pleader, + EverHadLeader: n.pleader.Load(), Term: n.term, Vote: n.vote, PTerm: n.pterm, @@ -3918,6 +3984,5 @@ func (s *Server) HandleRaftz(w http.ResponseWriter, r *http.Request) { infos[n.accName][name] = info } - b, _ := json.MarshalIndent(infos, "", " ") - ResponseHandler(w, r, b) + return &infos } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/monitor_sort_opts.go b/vendor/github.com/nats-io/nats-server/v2/server/monitor_sort_opts.go index 2fcaf2e9f3..6ab1095b2e 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/monitor_sort_opts.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/monitor_sort_opts.go @@ -1,4 +1,4 @@ -// Copyright 2013-2018 The NATS Authors +// Copyright 2013-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/mqtt.go b/vendor/github.com/nats-io/nats-server/v2/server/mqtt.go index 35c18ba154..a511c4f514 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/mqtt.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/mqtt.go @@ -1,4 +1,4 @@ -// Copyright 2020-2023 The NATS Authors +// Copyright 2020-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/nkey.go b/vendor/github.com/nats-io/nats-server/v2/server/nkey.go index c0d168a5ba..0e5d0ee08b 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/nkey.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/nkey.go @@ -1,4 +1,4 @@ -// Copyright 2018 The NATS Authors +// Copyright 2018-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/ocsp.go b/vendor/github.com/nats-io/nats-server/v2/server/ocsp.go index 0f8efeb51a..cf3b150414 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/ocsp.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/ocsp.go @@ -1,4 +1,4 @@ -// Copyright 2021-2023 The NATS Authors +// Copyright 2021-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/ocsp_peer.go b/vendor/github.com/nats-io/nats-server/v2/server/ocsp_peer.go index 44b822f560..fd73509487 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/ocsp_peer.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/ocsp_peer.go @@ -1,4 +1,4 @@ -// Copyright 2023 The NATS Authors +// Copyright 2023-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/ocsp_responsecache.go b/vendor/github.com/nats-io/nats-server/v2/server/ocsp_responsecache.go index 455fdd3a27..c384812901 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/ocsp_responsecache.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/ocsp_responsecache.go @@ -1,4 +1,4 @@ -// Copyright 2023 The NATS Authors +// Copyright 2023-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/opts.go b/vendor/github.com/nats-io/nats-server/v2/server/opts.go index c73127e530..abd6d5774b 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/opts.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/opts.go @@ -1,4 +1,4 @@ -// Copyright 2012-2024 The NATS Authors +// Copyright 2012-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -205,6 +205,11 @@ type RemoteLeafOpts struct { DenyImports []string `json:"-"` DenyExports []string `json:"-"` + // FirstInfoTimeout is the amount of time the server will wait for the + // initial INFO protocol from the remote server before closing the + // connection. + FirstInfoTimeout time.Duration `json:"-"` + // Compression options for this remote. Each remote could have a different // setting and also be different from the LeafNode options. Compression CompressionOpts `json:"-"` @@ -290,6 +295,7 @@ type Options struct { MaxControlLine int32 `json:"max_control_line"` MaxPayload int32 `json:"max_payload"` MaxPending int64 `json:"max_pending"` + NoFastProducerStall bool `json:"-"` Cluster ClusterOpts `json:"cluster,omitempty"` Gateway GatewayOpts `json:"gateway,omitempty"` LeafNode LeafNodeOpts `json:"leaf,omitempty"` @@ -1570,6 +1576,10 @@ func (o *Options) processConfigFileLine(k string, v any, errors *[]error, warnin *errors = append(*errors, err) return } + case "no_fast_producer_stall": + o.NoFastProducerStall = v.(bool) + case "max_closed_clients": + o.MaxClosedClients = int(v.(int64)) default: if au := atomic.LoadInt32(&allowUnknownTopLevelField); au == 0 && !tk.IsUsedVariable() { err := &unknownConfigFieldErr{ @@ -2607,6 +2617,8 @@ func parseRemoteLeafNodes(v any, errors *[]error, warnings *[]error) ([]*RemoteL *errors = append(*errors, err) continue } + case "first_info_timeout": + remote.FirstInfoTimeout = parseDuration(k, tk, v, errors, warnings) default: if !tk.IsUsedVariable() { err := &unknownConfigFieldErr{ @@ -5193,6 +5205,10 @@ func setBaselineOptions(opts *Options) { c.Mode = CompressionS2Auto } } + // Set default first info timeout value if not set. + if r.FirstInfoTimeout <= 0 { + r.FirstInfoTimeout = DEFAULT_LEAFNODE_INFO_WAIT + } } } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/parser.go b/vendor/github.com/nats-io/nats-server/v2/server/parser.go index 663a1dc126..50b504b7f6 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/parser.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/parser.go @@ -1,4 +1,4 @@ -// Copyright 2012-2020 The NATS Authors +// Copyright 2012-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -35,20 +35,21 @@ type parseState struct { } type pubArg struct { - arg []byte - pacache []byte - origin []byte - account []byte - subject []byte - deliver []byte - mapped []byte - reply []byte - szb []byte - hdb []byte - queues [][]byte - size int - hdr int - psi []*serviceImport + arg []byte + pacache []byte + origin []byte + account []byte + subject []byte + deliver []byte + mapped []byte + reply []byte + szb []byte + hdb []byte + queues [][]byte + size int + hdr int + psi []*serviceImport + delivered bool // Only used for service imports } // Parser constants @@ -500,6 +501,7 @@ func (c *client) parse(buf []byte) error { // Drop all pub args c.pa.arg, c.pa.pacache, c.pa.origin, c.pa.account, c.pa.subject, c.pa.mapped = nil, nil, nil, nil, nil, nil c.pa.reply, c.pa.hdr, c.pa.size, c.pa.szb, c.pa.hdb, c.pa.queues = nil, -1, 0, nil, nil, nil + c.pa.delivered = false lmsg = false case OP_A: switch b { diff --git a/vendor/github.com/nats-io/nats-server/v2/server/pse/pse_freebsd.go b/vendor/github.com/nats-io/nats-server/v2/server/pse/pse_freebsd.go index 8884e5299b..952ccb2dfd 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/pse/pse_freebsd.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/pse/pse_freebsd.go @@ -1,4 +1,4 @@ -// Copyright 2015-2018 The NATS Authors +// Copyright 2015-2021 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/pse/pse_linux.go b/vendor/github.com/nats-io/nats-server/v2/server/pse/pse_linux.go index 77eb8f6ac9..5414b327d6 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/pse/pse_linux.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/pse/pse_linux.go @@ -1,4 +1,4 @@ -// Copyright 2015-2018 The NATS Authors +// Copyright 2015-2022 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/pse/pse_rumprun.go b/vendor/github.com/nats-io/nats-server/v2/server/pse/pse_rumprun.go index 9f0fb01d2f..93f53a0775 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/pse/pse_rumprun.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/pse/pse_rumprun.go @@ -1,4 +1,4 @@ -// Copyright 2015-2018 The NATS Authors +// Copyright 2015-2021 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/pse/pse_windows.go b/vendor/github.com/nats-io/nats-server/v2/server/pse/pse_windows.go index 04e3ae8bb9..88d7fb0763 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/pse/pse_windows.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/pse/pse_windows.go @@ -1,4 +1,4 @@ -// Copyright 2015-2018 The NATS Authors +// Copyright 2015-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/raft.go b/vendor/github.com/nats-io/nats-server/v2/server/raft.go index 427e8ce677..245e419492 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/raft.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/raft.go @@ -1,4 +1,4 @@ -// Copyright 2020-2024 The NATS Authors +// Copyright 2020-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -52,6 +52,7 @@ type RaftNode interface { Current() bool Healthy() bool Term() uint64 + Leaderless() bool GroupLeader() string HadPreviousLeader() bool StepDown(preferred ...string) error @@ -77,6 +78,7 @@ type RaftNode interface { Stop() WaitForStop() Delete() + IsSystemAccount() bool } type WAL interface { @@ -174,9 +176,10 @@ type raft struct { c *client // Internal client for subscriptions js *jetStream // JetStream, if running, to see if we are out of resources - dflag bool // Debug flag - pleader bool // Has the group ever had a leader? - observer bool // The node is observing, i.e. not participating in voting + dflag bool // Debug flag + hasleader atomic.Bool // Is there a group leader right now? + pleader atomic.Bool // Has the group ever had a leader? + observer bool // The node is observing, i.e. not participating in voting extSt extensionState // Extension state @@ -542,6 +545,12 @@ func (s *Server) startRaftNode(accName string, cfg *RaftConfig, labels pprofLabe return n, nil } +// Whether we are using the system account or not. +// In 2.10.x this is always true as there is no account NRG like in 2.11.x. +func (n *raft) IsSystemAccount() bool { + return true +} + // outOfResources checks to see if we are out of resources. func (n *raft) outOfResources() bool { js := n.js @@ -830,7 +839,7 @@ func (n *raft) AdjustBootClusterSize(csz int) error { n.Lock() defer n.Unlock() - if n.leader != noLeader || n.pleader { + if n.leader != noLeader || n.pleader.Load() { return errAdjustBootCluster } // Same floor as bootstrap. @@ -1386,9 +1395,7 @@ func (n *raft) Healthy() bool { // HadPreviousLeader indicates if this group ever had a leader. func (n *raft) HadPreviousLeader() bool { - n.RLock() - defer n.RUnlock() - return n.pleader + return n.pleader.Load() } // GroupLeader returns the current leader of the group. @@ -1401,6 +1408,17 @@ func (n *raft) GroupLeader() string { return n.leader } +// Leaderless is a lockless way of finding out if the group has a +// leader or not. Use instead of GroupLeader in hot paths. +func (n *raft) Leaderless() bool { + if n == nil { + return true + } + // Negated because we want the default state of hasLeader to be + // false until the first setLeader() call. + return !n.hasleader.Load() +} + // Guess the best next leader. Stepdown will check more thoroughly. // Lock should be held. func (n *raft) selectNextLeader() string { @@ -3146,8 +3164,9 @@ func (n *raft) resetWAL() { // Lock should be held func (n *raft) updateLeader(newLeader string) { n.leader = newLeader - if !n.pleader && newLeader != noLeader { - n.pleader = true + n.hasleader.Store(newLeader != _EMPTY_) + if !n.pleader.Load() && newLeader != noLeader { + n.pleader.Store(true) } } @@ -3424,8 +3443,13 @@ CONTINUE: if l > paeWarnThreshold && l%paeWarnModulo == 0 { n.warn("%d append entries pending", len(n.pae)) } - } else if l%paeWarnModulo == 0 { - n.debug("Not saving to append entries pending") + } else { + // Invalidate cache entry at this index, we might have + // stored it previously with a different value. + delete(n.pae, n.pindex) + if l%paeWarnModulo == 0 { + n.debug("Not saving to append entries pending") + } } } else { // This is a replay on startup so just take the appendEntry version. @@ -4000,11 +4024,10 @@ func (n *raft) processVoteRequest(vr *voteRequest) error { n.vote = vr.candidate n.writeTermVote() n.resetElectionTimeout() - } else { - if vr.term >= n.term && n.vote == noVote { - n.term = vr.term - n.resetElect(randCampaignTimeout()) - } + } else if n.vote == noVote && n.State() != Candidate { + // We have a more up-to-date log, and haven't voted yet. + // Start campaigning earlier, but only if not candidate already, as that would short-circuit us. + n.resetElect(randCampaignTimeout()) } // Term might have changed, make sure response has the most current diff --git a/vendor/github.com/nats-io/nats-server/v2/server/rate_counter.go b/vendor/github.com/nats-io/nats-server/v2/server/rate_counter.go index 37b47dc7d4..247793744d 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/rate_counter.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/rate_counter.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 The NATS Authors +// Copyright 2021-2021 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/reload.go b/vendor/github.com/nats-io/nats-server/v2/server/reload.go index 07e5d021ad..aea3348429 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/reload.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/reload.go @@ -1,4 +1,4 @@ -// Copyright 2017-2023 The NATS Authors +// Copyright 2017-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -916,6 +916,19 @@ func (l *leafNodeOption) Apply(s *Server) { } } +type noFastProdStallReload struct { + noopOption + noStall bool +} + +func (l *noFastProdStallReload) Apply(s *Server) { + var not string + if l.noStall { + not = "not " + } + s.Noticef("Reloaded: fast producers will %sbe stalled", not) +} + // Compares options and disconnects clients that are no longer listed in pinned certs. Lock must not be held. func (s *Server) recheckPinnedCerts(curOpts *Options, newOpts *Options) { s.mu.Lock() @@ -1623,6 +1636,8 @@ func (s *Server) diffOptions(newOpts *Options) ([]option, error) { if new != old { diffOpts = append(diffOpts, &profBlockRateReload{newValue: new}) } + case "nofastproducerstall": + diffOpts = append(diffOpts, &noFastProdStallReload{noStall: newValue.(bool)}) default: // TODO(ik): Implement String() on those options to have a nice print. // %v is difficult to figure what's what, %+v print private fields and diff --git a/vendor/github.com/nats-io/nats-server/v2/server/ring.go b/vendor/github.com/nats-io/nats-server/v2/server/ring.go index 2673a170ec..1db3961382 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/ring.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/ring.go @@ -1,4 +1,4 @@ -// Copyright 2018 The NATS Authors +// Copyright 2018-2020 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/route.go b/vendor/github.com/nats-io/nats-server/v2/server/route.go index a865122e61..51e7712352 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/route.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/route.go @@ -1,4 +1,4 @@ -// Copyright 2013-2023 The NATS Authors +// Copyright 2013-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/sendq.go b/vendor/github.com/nats-io/nats-server/v2/server/sendq.go index e567d7aeee..178ec5d76c 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/sendq.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/sendq.go @@ -1,4 +1,4 @@ -// Copyright 2020-2023 The NATS Authors +// Copyright 2020-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/service.go b/vendor/github.com/nats-io/nats-server/v2/server/service.go index dd01c970d6..ab3239b380 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/service.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/service.go @@ -1,4 +1,4 @@ -// Copyright 2012-2018 The NATS Authors +// Copyright 2012-2021 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/service_windows.go b/vendor/github.com/nats-io/nats-server/v2/server/service_windows.go index 8f49ef6956..eed399f68c 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/service_windows.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/service_windows.go @@ -1,4 +1,4 @@ -// Copyright 2012-2019 The NATS Authors +// Copyright 2012-2022 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/signal.go b/vendor/github.com/nats-io/nats-server/v2/server/signal.go index aad65e828f..18b37a0222 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/signal.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/signal.go @@ -1,4 +1,4 @@ -// Copyright 2012-2019 The NATS Authors +// Copyright 2012-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/signal_windows.go b/vendor/github.com/nats-io/nats-server/v2/server/signal_windows.go index b262bc0bd1..2f5a27c51d 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/signal_windows.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/signal_windows.go @@ -1,4 +1,4 @@ -// Copyright 2012-2019 The NATS Authors +// Copyright 2012-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/store.go b/vendor/github.com/nats-io/nats-server/v2/server/store.go index 2d72f69474..03ef7b29cd 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/store.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/store.go @@ -1,4 +1,4 @@ -// Copyright 2019-2024 The NATS Authors +// Copyright 2019-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -91,6 +91,7 @@ type StreamStore interface { LoadNextMsg(filter string, wc bool, start uint64, smp *StoreMsg) (sm *StoreMsg, skip uint64, err error) LoadNextMsgMulti(sl *Sublist, start uint64, smp *StoreMsg) (sm *StoreMsg, skip uint64, err error) LoadLastMsg(subject string, sm *StoreMsg) (*StoreMsg, error) + LoadPrevMsg(start uint64, smp *StoreMsg) (sm *StoreMsg, err error) RemoveMsg(seq uint64) (bool, error) EraseMsg(seq uint64) (bool, error) Purge() (uint64, error) diff --git a/vendor/github.com/nats-io/nats-server/v2/server/stream.go b/vendor/github.com/nats-io/nats-server/v2/server/stream.go index a2883631d4..e7d7512e42 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/stream.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/stream.go @@ -1,4 +1,4 @@ -// Copyright 2019-2024 The NATS Authors +// Copyright 2019-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -16,7 +16,6 @@ package server import ( "archive/tar" "bytes" - "encoding/binary" "encoding/json" "errors" "fmt" @@ -33,6 +32,7 @@ import ( "time" "github.com/klauspost/compress/s2" + "github.com/nats-io/nats-server/v2/server/gsl" "github.com/nats-io/nuid" ) @@ -271,10 +271,10 @@ type stream struct { // For processing consumers without main stream lock. clsMu sync.RWMutex - cList []*consumer // Consumer list. - sch chan struct{} // Channel to signal consumers. - sigq *ipQueue[*cMsg] // Intra-process queue for the messages to signal to the consumers. - csl *Sublist // Consumer subscription list. + cList []*consumer // Consumer list. + sch chan struct{} // Channel to signal consumers. + sigq *ipQueue[*cMsg] // Intra-process queue for the messages to signal to the consumers. + csl *gsl.GenericSublist[*consumer] // Consumer subscription list. // For non limits policy streams when they process an ack before the actual msg. // Can happen in stretch clusters, multi-cloud, or during catchup for a restarted server. @@ -660,15 +660,25 @@ func (a *Account) addStreamWithAssignment(config *StreamConfig, fsConfig *FileSt } // Set our stream assignment if in clustered mode. + reserveResources := true if sa != nil { mset.setStreamAssignment(sa) + + // If the stream is resetting we must not double-account resources, they were already accounted for. + js.mu.Lock() + if sa.resetting { + reserveResources, sa.resetting = false, false + } + js.mu.Unlock() } // Setup our internal send go routine. mset.setupSendCapabilities() // Reserve resources if MaxBytes present. - mset.js.reserveStreamResources(&mset.cfg) + if reserveResources { + mset.js.reserveStreamResources(&mset.cfg) + } // Call directly to set leader if not in clustered mode. // This can be called though before we actually setup clustering, so check both. @@ -3488,16 +3498,21 @@ func (mset *stream) setStartingSequenceForSources(iNames map[string]struct{}) { } var smv StoreMsg - for seq := state.LastSeq; seq >= state.FirstSeq; seq-- { - sm, err := mset.store.LoadMsg(seq, &smv) - if err != nil || len(sm.hdr) == 0 { + for seq := state.LastSeq; seq >= state.FirstSeq; { + sm, err := mset.store.LoadPrevMsg(seq, &smv) + if err == ErrStoreEOF || err != nil { + break + } + seq = sm.seq - 1 + if len(sm.hdr) == 0 { continue } + ss := getHeader(JSStreamSource, sm.hdr) if len(ss) == 0 { continue } - streamName, indexName, sseq := streamAndSeq(string(ss)) + streamName, indexName, sseq := streamAndSeq(bytesToString(ss)) if _, ok := iNames[indexName]; ok { si := mset.sources[indexName] @@ -3603,9 +3618,13 @@ func (mset *stream) startingSequenceForSources() { } var smv StoreMsg - for seq := state.LastSeq; seq >= state.FirstSeq; seq-- { - sm, err := mset.store.LoadMsg(seq, &smv) - if err != nil || sm == nil || len(sm.hdr) == 0 { + for seq := state.LastSeq; ; { + sm, err := mset.store.LoadPrevMsg(seq, &smv) + if err == ErrStoreEOF || err != nil { + break + } + seq = sm.seq - 1 + if len(sm.hdr) == 0 { continue } ss := getHeader(JSStreamSource, sm.hdr) @@ -3613,7 +3632,7 @@ func (mset *stream) startingSequenceForSources() { continue } - streamName, iName, sseq := streamAndSeq(string(ss)) + streamName, iName, sseq := streamAndSeq(bytesToString(ss)) if iName == _EMPTY_ { // Pre-2.10 message header means it's a match for any source using that stream name for _, ssi := range mset.cfg.Sources { if streamName == ssi.Name || (ssi.External != nil && streamName == ssi.Name+":"+getHash(ssi.External.ApiPrefix)) { @@ -3932,9 +3951,14 @@ func (mset *stream) storeUpdates(md, bd int64, seq uint64, subj string) { if md == -1 && seq > 0 && subj != _EMPTY_ { // We use our consumer list mutex here instead of the main stream lock since it may be held already. mset.clsMu.RLock() - // TODO(dlc) - Do sublist like signaling so we do not have to match? - for _, o := range mset.cList { - o.decStreamPending(seq, subj) + if mset.csl != nil { + mset.csl.Match(subj, func(o *consumer) { + o.decStreamPending(seq, subj) + }) + } else { + for _, o := range mset.cList { + o.decStreamPending(seq, subj) + } } mset.clsMu.RUnlock() } else if md < 0 { @@ -4806,24 +4830,14 @@ func (mset *stream) signalConsumersLoop() { // This will update and signal all consumers that match. func (mset *stream) signalConsumers(subj string, seq uint64) { mset.clsMu.RLock() - if mset.csl == nil { - mset.clsMu.RUnlock() + defer mset.clsMu.RUnlock() + csl := mset.csl + if csl == nil { return } - r := mset.csl.Match(subj) - mset.clsMu.RUnlock() - - if len(r.psubs) == 0 { - return - } - // Encode the sequence here. - var eseq [8]byte - var le = binary.LittleEndian - le.PutUint64(eseq[:], seq) - msg := eseq[:] - for _, sub := range r.psubs { - sub.icb(sub, nil, nil, subj, _EMPTY_, msg) - } + csl.Match(subj, func(o *consumer) { + o.processStreamSignal(seq) + }) } // Internal message for use by jetstream subsystem. @@ -5367,10 +5381,10 @@ func (mset *stream) setConsumer(o *consumer) { mset.clsMu.Lock() mset.cList = append(mset.cList, o) if mset.csl == nil { - mset.csl = NewSublistWithCache() + mset.csl = gsl.NewSublist[*consumer]() } for _, sub := range o.signalSubs() { - mset.csl.Insert(sub) + mset.csl.Insert(sub, o) } mset.clsMu.Unlock() } @@ -5396,7 +5410,7 @@ func (mset *stream) removeConsumer(o *consumer) { // Always remove from the leader sublist. if mset.csl != nil { for _, sub := range o.signalSubs() { - mset.csl.Remove(sub) + mset.csl.Remove(sub, o) } } mset.clsMu.Unlock() @@ -5418,7 +5432,7 @@ func (mset *stream) swapSigSubs(o *consumer, newFilters []string) { if o.sigSubs != nil { if mset.csl != nil { for _, sub := range o.sigSubs { - mset.csl.Remove(sub) + mset.csl.Remove(sub, o) } } o.sigSubs = nil @@ -5426,19 +5440,17 @@ func (mset *stream) swapSigSubs(o *consumer, newFilters []string) { if o.isLeader() { if mset.csl == nil { - mset.csl = NewSublistWithCache() + mset.csl = gsl.NewSublist[*consumer]() } // If no filters are preset, add fwcs to sublist for that consumer. if newFilters == nil { - sub := &subscription{subject: []byte(fwcs), icb: o.processStreamSignal} - mset.csl.Insert(sub) - o.sigSubs = append(o.sigSubs, sub) + mset.csl.Insert(fwcs, o) + o.sigSubs = append(o.sigSubs, fwcs) // If there are filters, add their subjects to sublist. } else { for _, filter := range newFilters { - sub := &subscription{subject: []byte(filter), icb: o.processStreamSignal} - mset.csl.Insert(sub) - o.sigSubs = append(o.sigSubs, sub) + mset.csl.Insert(filter, o) + o.sigSubs = append(o.sigSubs, filter) } } } @@ -5671,16 +5683,17 @@ func (mset *stream) clearPreAck(o *consumer, seq uint64) { } // ackMsg is called into from a consumer when we have a WorkQueue or Interest Retention Policy. -func (mset *stream) ackMsg(o *consumer, seq uint64) { +// Returns whether the message at seq was removed as a result of the ACK. +func (mset *stream) ackMsg(o *consumer, seq uint64) bool { if seq == 0 { - return + return false } // Don't make this RLock(). We need to have only 1 running at a time to gauge interest across all consumers. mset.mu.Lock() if mset.closed.Load() || mset.cfg.Retention == LimitsPolicy { mset.mu.Unlock() - return + return false } store := mset.store @@ -5691,7 +5704,9 @@ func (mset *stream) ackMsg(o *consumer, seq uint64) { if seq > state.LastSeq { mset.registerPreAck(o, seq) mset.mu.Unlock() - return + // We have not removed the message, but should still signal so we could retry later + // since we potentially need to remove it then. + return true } // Always clear pre-ack if here. @@ -5700,7 +5715,7 @@ func (mset *stream) ackMsg(o *consumer, seq uint64) { // Make sure this sequence is not below our first sequence. if seq < state.FirstSeq { mset.mu.Unlock() - return + return false } var shouldRemove bool @@ -5716,7 +5731,7 @@ func (mset *stream) ackMsg(o *consumer, seq uint64) { // If nothing else to do. if !shouldRemove { - return + return false } // If we are here we should attempt to remove. @@ -5724,6 +5739,7 @@ func (mset *stream) ackMsg(o *consumer, seq uint64) { // This should not happen, but being pedantic. mset.registerPreAckLock(o, seq) } + return true } // Snapshot creates a snapshot for the stream and possibly consumers. diff --git a/vendor/github.com/nats-io/nats-server/v2/server/stree/stree.go b/vendor/github.com/nats-io/nats-server/v2/server/stree/stree.go index 828631888f..7cb23a56eb 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/stree/stree.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/stree/stree.go @@ -1,4 +1,4 @@ -// Copyright 2023-2024 The NATS Authors +// Copyright 2023-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -124,13 +124,22 @@ func (t *SubjectTree[T]) Match(filter []byte, cb func(subject []byte, val *T)) { t.match(t.root, parts, _pre[:0], cb) } -// Iter will walk all entries in the SubjectTree lexographically. The callback can return false to terminate the walk. -func (t *SubjectTree[T]) Iter(cb func(subject []byte, val *T) bool) { +// IterOrdered will walk all entries in the SubjectTree lexographically. The callback can return false to terminate the walk. +func (t *SubjectTree[T]) IterOrdered(cb func(subject []byte, val *T) bool) { if t == nil || t.root == nil { return } var _pre [256]byte - t.iter(t.root, _pre[:0], cb) + t.iter(t.root, _pre[:0], true, cb) +} + +// IterFast will walk all entries in the SubjectTree with no guarantees of ordering. The callback can return false to terminate the walk. +func (t *SubjectTree[T]) IterFast(cb func(subject []byte, val *T) bool) { + if t == nil || t.root == nil { + return + } + var _pre [256]byte + t.iter(t.root, _pre[:0], false, cb) } // Internal methods @@ -369,7 +378,7 @@ func (t *SubjectTree[T]) match(n node, parts [][]byte, pre []byte, cb func(subje } // Interal iter function to walk nodes in lexigraphical order. -func (t *SubjectTree[T]) iter(n node, pre []byte, cb func(subject []byte, val *T) bool) bool { +func (t *SubjectTree[T]) iter(n node, pre []byte, ordered bool, cb func(subject []byte, val *T) bool) bool { if n.isLeaf() { ln := n.(*leaf[T]) return cb(append(pre, ln.suffix...), &ln.value) @@ -378,6 +387,19 @@ func (t *SubjectTree[T]) iter(n node, pre []byte, cb func(subject []byte, val *T bn := n.base() // Note that this append may reallocate, but it doesn't modify "pre" at the "iter" callsite. pre = append(pre, bn.prefix...) + // Not everything requires lexicographical sorting, so support a fast path for iterating in + // whatever order the stree has things stored instead. + if !ordered { + for _, cn := range n.children() { + if cn == nil { + continue + } + if !t.iter(cn, pre, false, cb) { + return false + } + } + return true + } // Collect nodes since unsorted. var _nodes [256]node nodes := _nodes[:0] @@ -390,7 +412,7 @@ func (t *SubjectTree[T]) iter(n node, pre []byte, cb func(subject []byte, val *T slices.SortStableFunc(nodes, func(a, b node) int { return bytes.Compare(a.path(), b.path()) }) // Now walk the nodes in order and call into next iter. for i := range nodes { - if !t.iter(nodes[i], pre, cb) { + if !t.iter(nodes[i], pre, true, cb) { return false } } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/stree/util.go b/vendor/github.com/nats-io/nats-server/v2/server/stree/util.go index 108f78fda9..8cb6224fec 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/stree/util.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/stree/util.go @@ -1,4 +1,4 @@ -// Copyright 2023-2024 The NATS Authors +// Copyright 2023-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -55,11 +55,3 @@ func pivot[N position](subject []byte, pos N) byte { } return subject[pos] } - -// TODO(dlc) - Can be removed with Go 1.21 once server is on Go 1.22. -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/github.com/nats-io/nats-server/v2/server/subject_transform.go b/vendor/github.com/nats-io/nats-server/v2/server/subject_transform.go index 8292d48732..41e42722d1 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/subject_transform.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/subject_transform.go @@ -1,4 +1,4 @@ -// Copyright 2023 The NATS Authors +// Copyright 2023-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/sublist.go b/vendor/github.com/nats-io/nats-server/v2/server/sublist.go index b7650ede6f..9f79dfe18b 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/sublist.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/sublist.go @@ -1,4 +1,4 @@ -// Copyright 2016-2024 The NATS Authors +// Copyright 2016-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -1744,7 +1744,13 @@ func IntersectStree[T any](st *stree.SubjectTree[T], sl *Sublist, cb func(subj [ func intersectStree[T any](st *stree.SubjectTree[T], r *level, subj []byte, cb func(subj []byte, entry *T)) { if r.numNodes() == 0 { - st.Match(subj, cb) + // For wildcards we can't avoid Match, but if it's a literal subject at + // this point, using Find is considerably cheaper. + if subjectHasWildcard(bytesToString(subj)) { + st.Match(subj, cb) + } else if e, ok := st.Find(subj); ok { + cb(subj, e) + } return } nsubj := subj diff --git a/vendor/github.com/nats-io/nats-server/v2/server/sysmem/mem_bsd.go b/vendor/github.com/nats-io/nats-server/v2/server/sysmem/mem_bsd.go index ddab47ba54..341b31a72d 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/sysmem/mem_bsd.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/sysmem/mem_bsd.go @@ -1,4 +1,4 @@ -// Copyright 2019 The NATS Authors +// Copyright 2019-2021 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/sysmem/mem_darwin.go b/vendor/github.com/nats-io/nats-server/v2/server/sysmem/mem_darwin.go index 28944b335f..ae078443d8 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/sysmem/mem_darwin.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/sysmem/mem_darwin.go @@ -1,4 +1,4 @@ -// Copyright 2019 The NATS Authors +// Copyright 2019-2021 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/sysmem/mem_linux.go b/vendor/github.com/nats-io/nats-server/v2/server/sysmem/mem_linux.go index f4ed058744..6bfa73a0be 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/sysmem/mem_linux.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/sysmem/mem_linux.go @@ -1,4 +1,4 @@ -// Copyright 2019 The NATS Authors +// Copyright 2019-2021 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/sysmem/mem_windows.go b/vendor/github.com/nats-io/nats-server/v2/server/sysmem/mem_windows.go index f557c0b8ee..bf02133e8f 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/sysmem/mem_windows.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/sysmem/mem_windows.go @@ -1,4 +1,4 @@ -// Copyright 2019 The NATS Authors +// Copyright 2019-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/sysmem/mem_zos.go b/vendor/github.com/nats-io/nats-server/v2/server/sysmem/mem_zos.go index 2435943314..e798a80bfd 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/sysmem/mem_zos.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/sysmem/mem_zos.go @@ -1,4 +1,4 @@ -// Copyright 2022 The NATS Authors +// Copyright 2022-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/util.go b/vendor/github.com/nats-io/nats-server/v2/server/util.go index aea3dcf17e..10a8d8d67b 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/util.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/util.go @@ -1,4 +1,4 @@ -// Copyright 2012-2019 The NATS Authors +// Copyright 2012-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/nats-io/nats-server/v2/server/websocket.go b/vendor/github.com/nats-io/nats-server/v2/server/websocket.go index 69e6e1a9a7..90aa3d82f4 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/websocket.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/websocket.go @@ -1,4 +1,4 @@ -// Copyright 2020-2023 The NATS Authors +// Copyright 2020-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -1311,6 +1311,9 @@ func (c *client) wsCollapsePtoNB() (net.Buffers, int64) { } if usz <= wsCompressThreshold { compress = false + if cp := c.ws.compressor; cp != nil { + cp.Reset(nil) + } } } if compress && len(nb) > 0 { @@ -1331,13 +1334,11 @@ func (c *client) wsCollapsePtoNB() (net.Buffers, int64) { for len(b) > 0 { n, err := cp.Write(b) if err != nil { - if err == io.EOF { - break - } - c.Errorf("Error during compression: %v", err) - c.markConnAsClosed(WriteError) - nbPoolPut(b) - return nil, 0 + // Whatever this error is, it'll be handled by the cp.Flush() + // call below, as the same error will be returned there. + // Let the outer loop return all the buffers back to the pool + // and fall through naturally. + break } b = b[n:] } @@ -1346,6 +1347,7 @@ func (c *client) wsCollapsePtoNB() (net.Buffers, int64) { if err := cp.Flush(); err != nil { c.Errorf("Error during compression: %v", err) c.markConnAsClosed(WriteError) + cp.Reset(nil) return nil, 0 } b := buf.Bytes() @@ -1461,6 +1463,7 @@ func (c *client) wsCollapsePtoNB() (net.Buffers, int64) { bufs = append(bufs, c.ws.closeMsg) c.ws.fs += int64(len(c.ws.closeMsg)) c.ws.closeMsg = nil + c.ws.compressor = nil } c.ws.frames = nil return bufs, c.ws.fs diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/trashbin/trashbin.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/trashbin/trashbin.go index 5db0418f34..8dfa296a32 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/trashbin/trashbin.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/trashbin/trashbin.go @@ -46,6 +46,25 @@ type Trashbin struct { log *zerolog.Logger } +// trashNode is a helper struct to make trash items available for manipulation in the metadata backend +type trashNode struct { + spaceID string + id string + path string +} + +func (tn *trashNode) GetSpaceID() string { + return tn.spaceID +} + +func (tn *trashNode) GetID() string { + return tn.id +} + +func (tn *trashNode) InternalPath() string { + return tn.path +} + const ( trashHeader = `[Trash Info]` timeFormat = "2006-01-02T15:04:05" @@ -254,7 +273,7 @@ func (tb *Trashbin) RestoreRecycleItem(ctx context.Context, ref *provider.Refere return fmt.Errorf("trashbin: parent id not found for %s", restorePath) } - trashNode := node.NewBaseNode(spaceID, id, tb.lu) + trashNode := &trashNode{spaceID: spaceID, id: id, path: trashPath} err = tb.lu.MetadataBackend().Set(ctx, trashNode, prefixes.ParentidAttr, []byte(parentID)) if err != nil { return err diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go index 8c9566a628..42295ba1e0 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go @@ -701,7 +701,7 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error { sizes := make(map[string]int64) err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { // skip lock and upload files - if isInternal(path) || isLockFile(path) { + if t.isInternal(path) || isLockFile(path) { return nil } if isTrash(path) || t.isUpload(path) { diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go index e5dea69136..d2a4876b78 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go @@ -325,6 +325,12 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) if newNode.ID == "" { newNode.ID = oldNode.ID } + // invalidate old tree + err = t.lookup.IDCache.DeleteByPath(ctx, filepath.Join(oldNode.ParentPath(), oldNode.Name)) + if err != nil { + return err + } + if err := t.lookup.CacheID(ctx, newNode.SpaceID, newNode.ID, filepath.Join(newNode.ParentPath(), newNode.Name)); err != nil { t.log.Error().Err(err).Str("spaceID", newNode.SpaceID).Str("id", newNode.ID).Str("path", filepath.Join(newNode.ParentPath(), newNode.Name)).Msg("could not cache id") } @@ -408,7 +414,7 @@ func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, erro g.Go(func() error { defer close(work) for _, name := range names { - if isInternal(name) || isLockFile(name) || isTrash(name) { + if t.isInternal(name) || isLockFile(name) || isTrash(name) { continue } @@ -649,15 +655,15 @@ func (t *Tree) createDirNode(ctx context.Context, n *node.Node) (err error) { var nodeIDRegep = regexp.MustCompile(`.*/nodes/([^.]*).*`) func (t *Tree) isIgnored(path string) bool { - return isLockFile(path) || isTrash(path) || t.isUpload(path) || isInternal(path) + return isLockFile(path) || isTrash(path) || t.isUpload(path) || t.isInternal(path) } func (t *Tree) isUpload(path string) bool { return strings.HasPrefix(path, t.options.UploadDirectory) } -func isInternal(path string) bool { - return strings.Contains(path, lookup.RevisionsDir) +func (t *Tree) isInternal(path string) bool { + return path == t.options.Root || strings.HasPrefix(path, filepath.Join(t.options.Root, "indexes")) || strings.Contains(path, lookup.RevisionsDir) } func isLockFile(path string) bool { diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/recycle.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/recycle.go index 4e4c6c66c6..8810a52085 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/recycle.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/recycle.go @@ -475,6 +475,5 @@ func (tb *DecomposedfsTrashbin) EmptyRecycle(ctx context.Context, ref *provider. } func (tb *DecomposedfsTrashbin) getRecycleRoot(spaceID string) string { - rootNode := node.NewBaseNode(spaceID, spaceID, tb.fs.lu) - return filepath.Join(rootNode.InternalPath(), "trash") + return filepath.Join(tb.fs.o.Root, "spaces", lookup.Pathify(spaceID, 1, 2), "trash") } diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/propagator/async.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/propagator/async.go index b9015bb81d..3762930c94 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/propagator/async.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/propagator/async.go @@ -33,8 +33,6 @@ import ( "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes" "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node" "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options" - "github.com/pkg/errors" - "github.com/rogpeppe/go-internal/lockedfile" "github.com/rs/zerolog" "github.com/shamaton/msgpack/v2" ) @@ -283,28 +281,18 @@ func (p AsyncPropagator) propagate(ctx context.Context, pn PropagationNode, reca attrs := node.Attributes{} - var f *lockedfile.File // lock parent before reading treesize or tree time - _, subspan = tracer.Start(ctx, "lockedfile.OpenFile") - lockFilepath := p.lookup.MetadataBackend().LockfilePath(pn) - f, err = lockedfile.OpenFile(lockFilepath, os.O_RDWR|os.O_CREATE, 0600) + unlock, err := p.lookup.MetadataBackend().Lock(pn) subspan.End() if err != nil { log.Error().Err(err). - Str("lock filepath", lockFilepath). + Str("lock filepath", p.lookup.MetadataBackend().LockfilePath(pn)). Msg("Propagation failed. Could not open metadata for node with lock.") cleanup() return } - // always log error if closing node fails - defer func() { - // ignore already closed error - cerr := f.Close() - if err == nil && cerr != nil && !errors.Is(cerr, os.ErrClosed) { - err = cerr // only overwrite err with en error from close if the former was nil - } - }() + defer func() { _ = unlock() }() _, subspan = tracer.Start(ctx, "node.ReadNode") n, err := node.ReadNode(ctx, p.lookup, pn.GetSpaceID(), pn.GetID(), false, nil, false) @@ -410,11 +398,8 @@ func (p AsyncPropagator) propagate(ctx context.Context, pn PropagationNode, reca // Release node lock early, ignore already closed error _, subspan = tracer.Start(ctx, "f.Close") - cerr := f.Close() + _ = unlock() subspan.End() - if cerr != nil && !errors.Is(cerr, os.ErrClosed) { - log.Error().Err(cerr).Msg("Failed to close node and release lock") - } log.Info().Msg("Propagation done. cleaning up") cleanup() diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/propagator/sync.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/propagator/sync.go index a30f22939b..9ffde5e037 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/propagator/sync.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/propagator/sync.go @@ -20,8 +20,6 @@ package propagator import ( "context" - "errors" - "os" "strconv" "time" @@ -29,7 +27,6 @@ import ( "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata" "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes" "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node" - "github.com/rogpeppe/go-internal/lockedfile" "github.com/rs/zerolog" ) @@ -94,28 +91,19 @@ func (p SyncPropagator) propagateItem(ctx context.Context, n *node.Node, sTime t attrs := node.Attributes{} - var f *lockedfile.File // lock parent before reading treesize or tree time _, subspan := tracer.Start(ctx, "lockedfile.OpenFile") parentNode := node.NewBaseNode(n.SpaceID, n.ParentID, p.lookup) - parentFilename := p.lookup.MetadataBackend().LockfilePath(parentNode) - f, err := lockedfile.OpenFile(parentFilename, os.O_RDWR|os.O_CREATE, 0600) + unlock, err := p.lookup.MetadataBackend().Lock(parentNode) subspan.End() if err != nil { log.Error().Err(err). - Str("parent filename", parentFilename). + Str("parent filename", parentNode.InternalPath()). Msg("Propagation failed. Could not open metadata for parent with lock.") return nil, true, err } - // always log error if closing node fails - defer func() { - // ignore already closed error - cerr := f.Close() - if err == nil && cerr != nil && !errors.Is(cerr, os.ErrClosed) { - err = cerr // only overwrite err with en error from close if the former was nil - } - }() + defer func() { _ = unlock() }() if n, err = n.Parent(ctx); err != nil { log.Error().Err(err). diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 93a798ab63..ec5f0cdd0c 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -405,8 +405,15 @@ func (limit Limit) durationFromTokens(tokens float64) time.Duration { if limit <= 0 { return InfDuration } - seconds := tokens / float64(limit) - return time.Duration(float64(time.Second) * seconds) + + duration := (tokens / float64(limit)) * float64(time.Second) + + // Cap the duration to the maximum representable int64 value, to avoid overflow. + if duration > float64(math.MaxInt64) { + return InfDuration + } + + return time.Duration(duration) } // tokensFromDuration is a unit conversion function from a time duration to the number of tokens diff --git a/vendor/modules.txt b/vendor/modules.txt index 71c200f6a7..c76c393736 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -600,8 +600,8 @@ github.com/go-redis/redis/v8/internal/util # github.com/go-resty/resty/v2 v2.7.0 ## explicit; go 1.11 github.com/go-resty/resty/v2 -# github.com/go-sql-driver/mysql v1.8.1 -## explicit; go 1.18 +# github.com/go-sql-driver/mysql v1.9.0 +## explicit; go 1.21 github.com/go-sql-driver/mysql # github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 ## explicit; go 1.13 @@ -827,14 +827,15 @@ github.com/justinas/alice # github.com/kevinburke/ssh_config v1.2.0 ## explicit github.com/kevinburke/ssh_config -# github.com/klauspost/compress v1.17.11 -## explicit; go 1.21 +# github.com/klauspost/compress v1.18.0 +## explicit; go 1.22 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/gzip github.com/klauspost/compress/huff0 github.com/klauspost/compress/internal/cpuinfo +github.com/klauspost/compress/internal/le github.com/klauspost/compress/internal/race github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/s2 @@ -990,8 +991,8 @@ github.com/munnerz/goautoneg # github.com/nats-io/jwt/v2 v2.7.3 ## explicit; go 1.22 github.com/nats-io/jwt/v2 -# github.com/nats-io/nats-server/v2 v2.10.25 -## explicit; go 1.22 +# github.com/nats-io/nats-server/v2 v2.10.26 +## explicit; go 1.23.0 github.com/nats-io/nats-server/v2/conf github.com/nats-io/nats-server/v2/internal/fastrand github.com/nats-io/nats-server/v2/internal/ldap @@ -1000,6 +1001,7 @@ github.com/nats-io/nats-server/v2/server github.com/nats-io/nats-server/v2/server/avl github.com/nats-io/nats-server/v2/server/certidp github.com/nats-io/nats-server/v2/server/certstore +github.com/nats-io/nats-server/v2/server/gsl github.com/nats-io/nats-server/v2/server/pse github.com/nats-io/nats-server/v2/server/stree github.com/nats-io/nats-server/v2/server/sysmem @@ -1009,7 +1011,7 @@ github.com/nats-io/nats.go github.com/nats-io/nats.go/encoders/builtin github.com/nats-io/nats.go/internal/parser github.com/nats-io/nats.go/util -# github.com/nats-io/nkeys v0.4.9 +# github.com/nats-io/nkeys v0.4.10 ## explicit; go 1.20 github.com/nats-io/nkeys # github.com/nats-io/nuid v1.0.1 @@ -1192,7 +1194,7 @@ github.com/open-policy-agent/opa/v1/types github.com/open-policy-agent/opa/v1/util github.com/open-policy-agent/opa/v1/util/decoding github.com/open-policy-agent/opa/v1/version -# github.com/opencloud-eu/reva/v2 v2.27.3-0.20250227091157-0f6d58900e83 +# github.com/opencloud-eu/reva/v2 v2.27.3-0.20250228155248-34dee069adce ## explicit; go 1.23.1 github.com/opencloud-eu/reva/v2/cmd/revad/internal/grace github.com/opencloud-eu/reva/v2/cmd/revad/runtime @@ -2094,8 +2096,8 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.33.0 -## explicit; go 1.20 +# golang.org/x/crypto v0.34.0 +## explicit; go 1.23.0 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt golang.org/x/crypto/blake2b @@ -2220,7 +2222,7 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/time v0.9.0 +# golang.org/x/time v0.10.0 ## explicit; go 1.18 golang.org/x/time/rate # golang.org/x/tools v0.28.0