diff --git a/go.mod b/go.mod index b43e0f55b..e8db3ec6c 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,7 @@ module github.com/opencloud-eu/opencloud -go 1.22.7 +go 1.23.1 + toolchain go1.23.6 require ( @@ -64,7 +65,7 @@ require ( github.com/onsi/ginkgo/v2 v2.22.2 github.com/onsi/gomega v1.36.2 github.com/open-policy-agent/opa v1.1.0 - github.com/opencloud-eu/reva/v2 v2.27.3-0.20250127153848-a84e6c39c206 + github.com/opencloud-eu/reva/v2 v2.27.3-0.20250213085913-418bbbbeed54 github.com/orcaman/concurrent-map v1.0.0 github.com/owncloud/libre-graph-api-go v1.0.5-0.20240829135935-80dc00d6f5ea github.com/pkg/errors v0.9.1 @@ -122,7 +123,7 @@ require ( github.com/Masterminds/sprig v2.22.0+incompatible // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/OneOfOne/xxhash v1.2.8 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect + github.com/ProtonMail/go-crypto v1.1.5 // indirect github.com/RoaringBitmap/roaring v1.9.3 // indirect github.com/agnivade/levenshtein v1.2.0 // indirect github.com/ajg/form v1.5.1 // indirect @@ -164,7 +165,7 @@ require ( github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect github.com/crewjam/httperr v0.2.0 // indirect github.com/crewjam/saml v0.4.14 // indirect - github.com/cyphar/filepath-securejoin v0.2.4 // indirect + github.com/cyphar/filepath-securejoin v0.3.6 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/deckarep/golang-set v1.8.0 // indirect github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect @@ -183,8 +184,8 @@ require ( github.com/go-acme/lego/v4 v4.4.0 // indirect github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect - github.com/go-git/go-billy/v5 v5.5.0 // indirect - github.com/go-git/go-git/v5 v5.11.0 // indirect + github.com/go-git/go-billy/v5 v5.6.2 // indirect + github.com/go-git/go-git/v5 v5.13.2 // indirect github.com/go-ini/ini v1.67.0 // indirect github.com/go-jose/go-jose/v3 v3.0.3 // indirect github.com/go-jose/go-jose/v4 v4.0.2 // indirect @@ -267,11 +268,11 @@ require ( github.com/nxadm/tail v1.4.8 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c // indirect - github.com/pablodz/inotifywaitgo v0.0.7 // indirect + github.com/pablodz/inotifywaitgo v0.0.9 // indirect github.com/patrickmn/go-cache v2.1.0+incompatible // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pierrec/lz4/v4 v4.1.15 // indirect - github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/pjbgf/sha1cd v0.3.2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pquerna/cachecontrol v0.2.0 // indirect github.com/prometheus/alertmanager v0.27.0 // indirect @@ -287,11 +288,11 @@ require ( github.com/segmentio/kafka-go v0.4.47 // indirect github.com/segmentio/ksuid v1.0.4 // indirect github.com/sercand/kuberesolver/v5 v5.1.1 // indirect - github.com/sergi/go-diff v1.3.1 // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/sethvargo/go-password v0.3.1 // indirect github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 // indirect github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 // indirect - github.com/skeema/knownhosts v1.2.1 // indirect + github.com/skeema/knownhosts v1.3.0 // indirect github.com/spacewander/go-suffix-tree v0.0.0-20191010040751-0865e368c784 // indirect github.com/spf13/pflag v1.0.6 // indirect github.com/stretchr/objx v0.5.2 // indirect diff --git a/go.sum b/go.sum index adb1da2c1..445ba0667 100644 --- a/go.sum +++ b/go.sum @@ -87,8 +87,8 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/OpenDNS/vegadns2client v0.0.0-20180418235048-a3fa4a771d87/go.mod h1:iGLljf5n9GjT6kc0HBvyI1nOKnGQbNB66VzSNbK5iks= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/ProtonMail/go-crypto v1.1.5 h1:eoAQfK2dwL+tFSFpr7TbOaPNUbPiJj4fLYwwGE1FQO4= +github.com/ProtonMail/go-crypto v1.1.5/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4S2OByM= github.com/RoaringBitmap/roaring v1.9.3/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= @@ -194,7 +194,6 @@ github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZ github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= github.com/butonic/go-micro/v4 v4.11.1-0.20241115112658-b5d4de5ed9b3 h1:h8Z0hBv5tg/uZMKu8V47+DKWYVQg0lYP8lXDQq7uRpE= github.com/butonic/go-micro/v4 v4.11.1-0.20241115112658-b5d4de5ed9b3/go.mod h1:eE/tD53n3KbVrzrWxKLxdkGw45Fg1qaNLWjpJMvIUF4= -github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA= github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q= github.com/c-bata/go-prompt v0.2.5/go.mod h1:vFnjEGDIIA/Lib7giyE4E9c50Lvl8j0S+7FVlAwDAVw= @@ -218,7 +217,6 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304= @@ -249,8 +247,8 @@ github.com/crewjam/saml v0.4.14/go.mod h1:UVSZCf18jJkk6GpWNVqcyQJMD5HsRugBPf4I1n github.com/cs3org/go-cs3apis v0.0.0-20241105092511-3ad35d174fc1 h1:RU6LT6mkD16xZs011+8foU7T3LrPvTTSWeTQ9OgfhkA= github.com/cs3org/go-cs3apis v0.0.0-20241105092511-3ad35d174fc1/go.mod h1:DedpcqXl193qF/08Y04IO0PpxyyMu8+GrkD6kWK2MEQ= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM= +github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -292,8 +290,8 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/egirna/icap v0.0.0-20181108071049-d5ee18bd70bc h1:6IxmRbXV8WXVkcYcTzkU219A3UZeNMX/e6X2sve1wXA= github.com/egirna/icap v0.0.0-20181108071049-d5ee18bd70bc/go.mod h1:FdVN2WHg7zOHhJ7kZQdDorfFhIfqZaHttjAzDDvAXHE= -github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= -github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= +github.com/elazarl/goproxy v1.4.0 h1:4GyuSbFa+s26+3rmYNSuUVsx+HgPrV1bk1jXI0l9wjM= +github.com/elazarl/goproxy v1.4.0/go.mod h1:X/5W/t+gzDyLfHW4DrMdpjqYjpXsURlBt9lpBDxZZZQ= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/emvi/iso-639-1 v1.1.0 h1:EhZiYVA+ysa/b7+0T2DD9hcX7E/5sh4o1KyDAIPu7VE= @@ -331,8 +329,8 @@ github.com/getkin/kin-openapi v0.13.0/go.mod h1:WGRs2ZMM1Q8LR1QBEwUxC6RJEfaBcD0s github.com/ggwhite/go-masker v1.1.0 h1:kN/KIvktu2U+hd3KWrSlLj7xBGD1iBfc9/xdbVgFbRc= github.com/ggwhite/go-masker v1.1.0/go.mod h1:xnTRHwrIU9FtBADwEjUC5Dy/BVedvoTxyOE7/d3CNwY= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= -github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= +github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= +github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= github.com/go-acme/lego/v4 v4.4.0 h1:uHhU5LpOYQOdp3aDU+XY2bajseu8fuExphTL1Ss6/Fc= github.com/go-acme/lego/v4 v4.4.0/go.mod h1:l3+tFUFZb590dWcqhWZegynUthtaHJbG2fevUpoOOE0= github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= @@ -348,12 +346,12 @@ github.com/go-cmd/cmd v1.0.5/go.mod h1:y8q8qlK5wQibcw63djSl/ntiHUHXHGdCkPk0j4QeW github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= -github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= +github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= -github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= -github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= +github.com/go-git/go-git/v5 v5.13.2 h1:7O7xvsK7K+rZPKW6AQR1YyNhfywkv7B8/FsP3ki6Zv0= +github.com/go-git/go-git/v5 v5.13.2/go.mod h1:hWdW5P4YZRjmpGHwRH2v3zkWcNl6HeXaXQEMGb3NJ9A= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -862,8 +860,8 @@ github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= github.com/open-policy-agent/opa v1.1.0 h1:HMz2evdEMTyNqtdLjmu3Vyx06BmhNYAx67Yz3Ll9q2s= github.com/open-policy-agent/opa v1.1.0/go.mod h1:T1pASQ1/vwfTa+e2fYcfpLCvWgYtqtiUv+IuA/dLPQs= -github.com/opencloud-eu/reva/v2 v2.27.3-0.20250127153848-a84e6c39c206 h1:sTbtA2hU40r6eh24aswG0oP7NiJrVyEiqM1nn72TrHA= -github.com/opencloud-eu/reva/v2 v2.27.3-0.20250127153848-a84e6c39c206/go.mod h1:lk0GfBt0cLaOcc1nWJikinTK5ibFtKRxp10ATxtCalU= +github.com/opencloud-eu/reva/v2 v2.27.3-0.20250213085913-418bbbbeed54 h1:uxSuQctr5AewEerqsCx9GwdzpEv4PESwkg9nyd30X6U= +github.com/opencloud-eu/reva/v2 v2.27.3-0.20250213085913-418bbbbeed54/go.mod h1:kAdY6yRvrlyhiqLaOCYOz+fHPCl7X522eonD8cIiTVE= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= @@ -876,8 +874,8 @@ github.com/owncloud/libre-graph-api-go v1.0.5-0.20240829135935-80dc00d6f5ea h1:C github.com/owncloud/libre-graph-api-go v1.0.5-0.20240829135935-80dc00d6f5ea/go.mod h1:yXI+rmE8yYx+ZsGVrnCpprw/gZMcxjwntnX2y2+VKxY= github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c h1:rp5dCmg/yLR3mgFuSOe4oEnDDmGLROTvMragMUXpTQw= github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c/go.mod h1:X07ZCGwUbLaax7L0S3Tw4hpejzu63ZrrQiUe6W0hcy0= -github.com/pablodz/inotifywaitgo v0.0.7 h1:1ii49dGBnRn0t1Sz7RGZS6/NberPEDQprwKHN49Bv6U= -github.com/pablodz/inotifywaitgo v0.0.7/go.mod h1:OtzRCsYTJlIr+vAzlOtauTkfQ1c25ebFuXq8tbbf8cw= +github.com/pablodz/inotifywaitgo v0.0.9 h1:njquRbBU7fuwIe5rEvtaniVBjwWzcpdUVptSgzFqZsw= +github.com/pablodz/inotifywaitgo v0.0.9/go.mod h1:hAfx2oN+WKg8miwUKPs52trySpPignlRBRxWcXVHku0= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= @@ -890,8 +888,8 @@ github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rK github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= -github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= +github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= +github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -999,8 +997,8 @@ github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY= github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ= -github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/sethvargo/go-password v0.3.1 h1:WqrLTjo7X6AcVYfC6R7GtSyuUQR9hGyAj/f1PYQZCJU= github.com/sethvargo/go-password v0.3.1/go.mod h1:rXofC1zT54N7R8K/h1WDUdkf9BOx5OptoxrMBcrXzvs= github.com/shamaton/msgpack/v2 v2.2.2 h1:GOIg0c9LV04VwzOOqZSrmsv/JzjNOOMxnS/HvOHGdgs= @@ -1018,8 +1016,8 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= -github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY= +github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M= github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= @@ -1216,9 +1214,7 @@ golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= @@ -1316,10 +1312,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= @@ -1428,8 +1422,6 @@ golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1445,9 +1437,7 @@ golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXct golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= @@ -1465,9 +1455,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= diff --git a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go index affb74a76..d558b9bd8 100644 --- a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go +++ b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go @@ -49,16 +49,16 @@ func ShiftNBytesLeft(dst, x []byte, n int) { dst = append(dst, make([]byte, n/8)...) } -// XorBytesMut assumes equal input length, replaces X with X XOR Y +// XorBytesMut replaces X with X XOR Y. len(X) must be >= len(Y). func XorBytesMut(X, Y []byte) { - for i := 0; i < len(X); i++ { + for i := 0; i < len(Y); i++ { X[i] ^= Y[i] } } -// XorBytes assumes equal input length, puts X XOR Y into Z +// XorBytes puts X XOR Y into Z. len(Z) and len(X) must be >= len(Y). func XorBytes(Z, X, Y []byte) { - for i := 0; i < len(X); i++ { + for i := 0; i < len(Y); i++ { Z[i] = X[i] ^ Y[i] } } diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go index 1a6f73502..24f893017 100644 --- a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go @@ -18,8 +18,9 @@ import ( "crypto/cipher" "crypto/subtle" "errors" - "github.com/ProtonMail/go-crypto/internal/byteutil" "math/bits" + + "github.com/ProtonMail/go-crypto/internal/byteutil" ) type ocb struct { @@ -108,8 +109,10 @@ func (o *ocb) Seal(dst, nonce, plaintext, adata []byte) []byte { if len(nonce) > o.nonceSize { panic("crypto/ocb: Incorrect nonce length given to OCB") } - ret, out := byteutil.SliceForAppend(dst, len(plaintext)+o.tagSize) - o.crypt(enc, out, nonce, adata, plaintext) + sep := len(plaintext) + ret, out := byteutil.SliceForAppend(dst, sep+o.tagSize) + tag := o.crypt(enc, out[:sep], nonce, adata, plaintext) + copy(out[sep:], tag) return ret } @@ -121,12 +124,10 @@ func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { return nil, ocbError("Ciphertext shorter than tag length") } sep := len(ciphertext) - o.tagSize - ret, out := byteutil.SliceForAppend(dst, len(ciphertext)) + ret, out := byteutil.SliceForAppend(dst, sep) ciphertextData := ciphertext[:sep] - tag := ciphertext[sep:] - o.crypt(dec, out, nonce, adata, ciphertextData) - if subtle.ConstantTimeCompare(ret[sep:], tag) == 1 { - ret = ret[:sep] + tag := o.crypt(dec, out, nonce, adata, ciphertextData) + if subtle.ConstantTimeCompare(tag, ciphertext[sep:]) == 1 { return ret, nil } for i := range out { @@ -136,7 +137,8 @@ func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { } // On instruction enc (resp. dec), crypt is the encrypt (resp. decrypt) -// function. It returns the resulting plain/ciphertext with the tag appended. +// function. It writes the resulting plain/ciphertext into Y and returns +// the tag. func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { // // Consider X as a sequence of 128-bit blocks @@ -153,7 +155,7 @@ func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { truncatedNonce := make([]byte, len(nonce)) copy(truncatedNonce, nonce) truncatedNonce[len(truncatedNonce)-1] &= 192 - Ktop := make([]byte, blockSize) + var Ktop []byte if bytes.Equal(truncatedNonce, o.reusableKtop.noncePrefix) { Ktop = o.reusableKtop.Ktop } else { @@ -193,13 +195,14 @@ func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { byteutil.XorBytesMut(offset, o.mask.L[bits.TrailingZeros(uint(i+1))]) blockX := X[i*blockSize : (i+1)*blockSize] blockY := Y[i*blockSize : (i+1)*blockSize] - byteutil.XorBytes(blockY, blockX, offset) switch instruction { case enc: + byteutil.XorBytesMut(checksum, blockX) + byteutil.XorBytes(blockY, blockX, offset) o.block.Encrypt(blockY, blockY) byteutil.XorBytesMut(blockY, offset) - byteutil.XorBytesMut(checksum, blockX) case dec: + byteutil.XorBytes(blockY, blockX, offset) o.block.Decrypt(blockY, blockY) byteutil.XorBytesMut(blockY, offset) byteutil.XorBytesMut(checksum, blockY) @@ -215,31 +218,24 @@ func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { o.block.Encrypt(pad, offset) chunkX := X[blockSize*m:] chunkY := Y[blockSize*m : len(X)] - byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)]) - // P_* || bit(1) || zeroes(127) - len(P_*) switch instruction { case enc: - paddedY := append(chunkX, byte(128)) - paddedY = append(paddedY, make([]byte, blockSize-len(chunkX)-1)...) - byteutil.XorBytesMut(checksum, paddedY) + byteutil.XorBytesMut(checksum, chunkX) + checksum[len(chunkX)] ^= 128 + byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)]) + // P_* || bit(1) || zeroes(127) - len(P_*) case dec: - paddedX := append(chunkY, byte(128)) - paddedX = append(paddedX, make([]byte, blockSize-len(chunkY)-1)...) - byteutil.XorBytesMut(checksum, paddedX) + byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)]) + // P_* || bit(1) || zeroes(127) - len(P_*) + byteutil.XorBytesMut(checksum, chunkY) + checksum[len(chunkY)] ^= 128 } - byteutil.XorBytes(tag, checksum, offset) - byteutil.XorBytesMut(tag, o.mask.lDol) - o.block.Encrypt(tag, tag) - byteutil.XorBytesMut(tag, o.hash(adata)) - copy(Y[blockSize*m+len(chunkY):], tag[:o.tagSize]) - } else { - byteutil.XorBytes(tag, checksum, offset) - byteutil.XorBytesMut(tag, o.mask.lDol) - o.block.Encrypt(tag, tag) - byteutil.XorBytesMut(tag, o.hash(adata)) - copy(Y[blockSize*m:], tag[:o.tagSize]) } - return Y + byteutil.XorBytes(tag, checksum, offset) + byteutil.XorBytesMut(tag, o.mask.lDol) + o.block.Encrypt(tag, tag) + byteutil.XorBytesMut(tag, o.hash(adata)) + return tag[:o.tagSize] } // This hash function is used to compute the tag. Per design, on empty input it diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go index d7af9141e..e0a677f28 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go @@ -23,7 +23,7 @@ import ( // Headers // // base64-encoded Bytes -// '=' base64 encoded checksum +// '=' base64 encoded checksum (optional) not checked anymore // -----END Type----- // // where Headers is a possibly empty sequence of Key: Value lines. @@ -40,36 +40,15 @@ type Block struct { var ArmorCorrupt error = errors.StructuralError("armor invalid") -const crc24Init = 0xb704ce -const crc24Poly = 0x1864cfb -const crc24Mask = 0xffffff - -// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 -func crc24(crc uint32, d []byte) uint32 { - for _, b := range d { - crc ^= uint32(b) << 16 - for i := 0; i < 8; i++ { - crc <<= 1 - if crc&0x1000000 != 0 { - crc ^= crc24Poly - } - } - } - return crc -} - var armorStart = []byte("-----BEGIN ") var armorEnd = []byte("-----END ") var armorEndOfLine = []byte("-----") -// lineReader wraps a line based reader. It watches for the end of an armor -// block and records the expected CRC value. +// lineReader wraps a line based reader. It watches for the end of an armor block type lineReader struct { - in *bufio.Reader - buf []byte - eof bool - crc uint32 - crcSet bool + in *bufio.Reader + buf []byte + eof bool } func (l *lineReader) Read(p []byte) (n int, err error) { @@ -98,26 +77,9 @@ func (l *lineReader) Read(p []byte) (n int, err error) { if len(line) == 5 && line[0] == '=' { // This is the checksum line - var expectedBytes [3]byte - var m int - m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) - if m != 3 || err != nil { - return - } - l.crc = uint32(expectedBytes[0])<<16 | - uint32(expectedBytes[1])<<8 | - uint32(expectedBytes[2]) - - line, _, err = l.in.ReadLine() - if err != nil && err != io.EOF { - return - } - if !bytes.HasPrefix(line, armorEnd) { - return 0, ArmorCorrupt - } + // Don't check the checksum l.eof = true - l.crcSet = true return 0, io.EOF } @@ -138,23 +100,14 @@ func (l *lineReader) Read(p []byte) (n int, err error) { return } -// openpgpReader passes Read calls to the underlying base64 decoder, but keeps -// a running CRC of the resulting data and checks the CRC against the value -// found by the lineReader at EOF. +// openpgpReader passes Read calls to the underlying base64 decoder. type openpgpReader struct { - lReader *lineReader - b64Reader io.Reader - currentCRC uint32 + lReader *lineReader + b64Reader io.Reader } func (r *openpgpReader) Read(p []byte) (n int, err error) { n, err = r.b64Reader.Read(p) - r.currentCRC = crc24(r.currentCRC, p[:n]) - - if err == io.EOF && r.lReader.crcSet && r.lReader.crc != uint32(r.currentCRC&crc24Mask) { - return 0, ArmorCorrupt - } - return } @@ -222,7 +175,6 @@ TryNextBlock: } p.lReader.in = r - p.oReader.currentCRC = crc24Init p.oReader.lReader = &p.lReader p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) p.Body = &p.oReader diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go index 5b6e16c19..550efddf0 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go @@ -7,6 +7,7 @@ package armor import ( "encoding/base64" "io" + "sort" ) var armorHeaderSep = []byte(": ") @@ -14,6 +15,23 @@ var blockEnd = []byte("\n=") var newline = []byte("\n") var armorEndOfLineOut = []byte("-----\n") +const crc24Init = 0xb704ce +const crc24Poly = 0x1864cfb + +// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 +func crc24(crc uint32, d []byte) uint32 { + for _, b := range d { + crc ^= uint32(b) << 16 + for i := 0; i < 8; i++ { + crc <<= 1 + if crc&0x1000000 != 0 { + crc ^= crc24Poly + } + } + } + return crc +} + // writeSlices writes its arguments to the given Writer. func writeSlices(out io.Writer, slices ...[]byte) (err error) { for _, s := range slices { @@ -99,15 +117,18 @@ func (l *lineBreaker) Close() (err error) { // // encoding -> base64 encoder -> lineBreaker -> out type encoding struct { - out io.Writer - breaker *lineBreaker - b64 io.WriteCloser - crc uint32 - blockType []byte + out io.Writer + breaker *lineBreaker + b64 io.WriteCloser + crc uint32 + crcEnabled bool + blockType []byte } func (e *encoding) Write(data []byte) (n int, err error) { - e.crc = crc24(e.crc, data) + if e.crcEnabled { + e.crc = crc24(e.crc, data) + } return e.b64.Write(data) } @@ -118,28 +139,36 @@ func (e *encoding) Close() (err error) { } e.breaker.Close() - var checksumBytes [3]byte - checksumBytes[0] = byte(e.crc >> 16) - checksumBytes[1] = byte(e.crc >> 8) - checksumBytes[2] = byte(e.crc) + if e.crcEnabled { + var checksumBytes [3]byte + checksumBytes[0] = byte(e.crc >> 16) + checksumBytes[1] = byte(e.crc >> 8) + checksumBytes[2] = byte(e.crc) - var b64ChecksumBytes [4]byte - base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) + var b64ChecksumBytes [4]byte + base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) - return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) + return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) + } + return writeSlices(e.out, newline, armorEnd, e.blockType, armorEndOfLine) } -// Encode returns a WriteCloser which will encode the data written to it in -// OpenPGP armor. -func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { +func encode(out io.Writer, blockType string, headers map[string]string, checksum bool) (w io.WriteCloser, err error) { bType := []byte(blockType) err = writeSlices(out, armorStart, bType, armorEndOfLineOut) if err != nil { return } - for k, v := range headers { - err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) + keys := make([]string, len(headers)) + i := 0 + for k := range headers { + keys[i] = k + i++ + } + sort.Strings(keys) + for _, k := range keys { + err = writeSlices(out, []byte(k), armorHeaderSep, []byte(headers[k]), newline) if err != nil { return } @@ -151,11 +180,27 @@ func Encode(out io.Writer, blockType string, headers map[string]string) (w io.Wr } e := &encoding{ - out: out, - breaker: newLineBreaker(out, 64), - crc: crc24Init, - blockType: bType, + out: out, + breaker: newLineBreaker(out, 64), + blockType: bType, + crc: crc24Init, + crcEnabled: checksum, } e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) return e, nil } + +// Encode returns a WriteCloser which will encode the data written to it in +// OpenPGP armor. +func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { + return encode(out, blockType, headers, true) +} + +// EncodeWithChecksumOption returns a WriteCloser which will encode the data written to it in +// OpenPGP armor and provides the option to include a checksum. +// When forming ASCII Armor, the CRC24 footer SHOULD NOT be generated, +// unless interoperability with implementations that require the CRC24 footer +// to be present is a concern. +func EncodeWithChecksumOption(out io.Writer, blockType string, headers map[string]string, doChecksum bool) (w io.WriteCloser, err error) { + return encode(out, blockType, headers, doChecksum) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go index a94f6150c..5b40e1375 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go @@ -30,8 +30,12 @@ func writeCanonical(cw io.Writer, buf []byte, s *int) (int, error) { if c == '\r' { *s = 1 } else if c == '\n' { - cw.Write(buf[start:i]) - cw.Write(newline) + if _, err := cw.Write(buf[start:i]); err != nil { + return 0, err + } + if _, err := cw.Write(newline); err != nil { + return 0, err + } start = i + 1 } case 1: @@ -39,7 +43,9 @@ func writeCanonical(cw io.Writer, buf []byte, s *int) (int, error) { } } - cw.Write(buf[start:]) + if _, err := cw.Write(buf[start:]); err != nil { + return 0, err + } return len(buf), nil } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go index c895bad6b..db8fb163b 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go @@ -163,13 +163,9 @@ func buildKey(pub *PublicKey, zb []byte, curveOID, fingerprint []byte, stripLead if _, err := param.Write([]byte("Anonymous Sender ")); err != nil { return nil, err } - // For v5 keys, the 20 leftmost octets of the fingerprint are used. - if _, err := param.Write(fingerprint[:20]); err != nil { + if _, err := param.Write(fingerprint[:]); err != nil { return nil, err } - if param.Len()-len(curveOID) != 45 { - return nil, errors.New("ecdh: malformed KDF Param") - } // MB = Hash ( 00 || 00 || 00 || 01 || ZB || Param ); h := pub.KDF.Hash.New() diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ed25519/ed25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ed25519/ed25519.go new file mode 100644 index 000000000..6abdf7c44 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ed25519/ed25519.go @@ -0,0 +1,115 @@ +// Package ed25519 implements the ed25519 signature algorithm for OpenPGP +// as defined in the Open PGP crypto refresh. +package ed25519 + +import ( + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + ed25519lib "github.com/cloudflare/circl/sign/ed25519" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys in this package. + PublicKeySize = ed25519lib.PublicKeySize + // SeedSize is the size, in bytes, of private key seeds. + // The private key representation used by RFC 8032. + SeedSize = ed25519lib.SeedSize + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = ed25519lib.SignatureSize +) + +type PublicKey struct { + // Point represents the elliptic curve point of the public key. + Point []byte +} + +type PrivateKey struct { + PublicKey + // Key the private key representation by RFC 8032, + // encoded as seed | pub key point. + Key []byte +} + +// NewPublicKey creates a new empty ed25519 public key. +func NewPublicKey() *PublicKey { + return &PublicKey{} +} + +// NewPrivateKey creates a new empty private key referencing the public key. +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +// Seed returns the ed25519 private key secret seed. +// The private key representation by RFC 8032. +func (pk *PrivateKey) Seed() []byte { + return pk.Key[:SeedSize] +} + +// MarshalByteSecret returns the underlying 32 byte seed of the private key. +func (pk *PrivateKey) MarshalByteSecret() []byte { + return pk.Seed() +} + +// UnmarshalByteSecret computes the private key from the secret seed +// and stores it in the private key object. +func (sk *PrivateKey) UnmarshalByteSecret(seed []byte) error { + sk.Key = ed25519lib.NewKeyFromSeed(seed) + return nil +} + +// GenerateKey generates a fresh private key with the provided randomness source. +func GenerateKey(rand io.Reader) (*PrivateKey, error) { + publicKey, privateKey, err := ed25519lib.GenerateKey(rand) + if err != nil { + return nil, err + } + privateKeyOut := new(PrivateKey) + privateKeyOut.PublicKey.Point = publicKey[:] + privateKeyOut.Key = privateKey[:] + return privateKeyOut, nil +} + +// Sign signs a message with the ed25519 algorithm. +// priv MUST be a valid key! Check this with Validate() before use. +func Sign(priv *PrivateKey, message []byte) ([]byte, error) { + return ed25519lib.Sign(priv.Key, message), nil +} + +// Verify verifies an ed25519 signature. +func Verify(pub *PublicKey, message []byte, signature []byte) bool { + return ed25519lib.Verify(pub.Point, message, signature) +} + +// Validate checks if the ed25519 private key is valid. +func Validate(priv *PrivateKey) error { + expectedPrivateKey := ed25519lib.NewKeyFromSeed(priv.Seed()) + if subtle.ConstantTimeCompare(priv.Key, expectedPrivateKey) == 0 { + return errors.KeyInvalidError("ed25519: invalid ed25519 secret") + } + if subtle.ConstantTimeCompare(priv.PublicKey.Point, expectedPrivateKey[SeedSize:]) == 0 { + return errors.KeyInvalidError("ed25519: invalid ed25519 public key") + } + return nil +} + +// ENCODING/DECODING signature: + +// WriteSignature encodes and writes an ed25519 signature to writer. +func WriteSignature(writer io.Writer, signature []byte) error { + _, err := writer.Write(signature) + return err +} + +// ReadSignature decodes an ed25519 signature from a reader. +func ReadSignature(reader io.Reader) ([]byte, error) { + signature := make([]byte, SignatureSize) + if _, err := io.ReadFull(reader, signature); err != nil { + return nil, err + } + return signature, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ed448/ed448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ed448/ed448.go new file mode 100644 index 000000000..b11fb4fb1 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ed448/ed448.go @@ -0,0 +1,119 @@ +// Package ed448 implements the ed448 signature algorithm for OpenPGP +// as defined in the Open PGP crypto refresh. +package ed448 + +import ( + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + ed448lib "github.com/cloudflare/circl/sign/ed448" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys in this package. + PublicKeySize = ed448lib.PublicKeySize + // SeedSize is the size, in bytes, of private key seeds. + // The private key representation used by RFC 8032. + SeedSize = ed448lib.SeedSize + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = ed448lib.SignatureSize +) + +type PublicKey struct { + // Point represents the elliptic curve point of the public key. + Point []byte +} + +type PrivateKey struct { + PublicKey + // Key the private key representation by RFC 8032, + // encoded as seed | public key point. + Key []byte +} + +// NewPublicKey creates a new empty ed448 public key. +func NewPublicKey() *PublicKey { + return &PublicKey{} +} + +// NewPrivateKey creates a new empty private key referencing the public key. +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +// Seed returns the ed448 private key secret seed. +// The private key representation by RFC 8032. +func (pk *PrivateKey) Seed() []byte { + return pk.Key[:SeedSize] +} + +// MarshalByteSecret returns the underlying seed of the private key. +func (pk *PrivateKey) MarshalByteSecret() []byte { + return pk.Seed() +} + +// UnmarshalByteSecret computes the private key from the secret seed +// and stores it in the private key object. +func (sk *PrivateKey) UnmarshalByteSecret(seed []byte) error { + sk.Key = ed448lib.NewKeyFromSeed(seed) + return nil +} + +// GenerateKey generates a fresh private key with the provided randomness source. +func GenerateKey(rand io.Reader) (*PrivateKey, error) { + publicKey, privateKey, err := ed448lib.GenerateKey(rand) + if err != nil { + return nil, err + } + privateKeyOut := new(PrivateKey) + privateKeyOut.PublicKey.Point = publicKey[:] + privateKeyOut.Key = privateKey[:] + return privateKeyOut, nil +} + +// Sign signs a message with the ed448 algorithm. +// priv MUST be a valid key! Check this with Validate() before use. +func Sign(priv *PrivateKey, message []byte) ([]byte, error) { + // Ed448 is used with the empty string as a context string. + // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-08#section-13.7 + return ed448lib.Sign(priv.Key, message, ""), nil +} + +// Verify verifies a ed448 signature +func Verify(pub *PublicKey, message []byte, signature []byte) bool { + // Ed448 is used with the empty string as a context string. + // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-08#section-13.7 + return ed448lib.Verify(pub.Point, message, signature, "") +} + +// Validate checks if the ed448 private key is valid +func Validate(priv *PrivateKey) error { + expectedPrivateKey := ed448lib.NewKeyFromSeed(priv.Seed()) + if subtle.ConstantTimeCompare(priv.Key, expectedPrivateKey) == 0 { + return errors.KeyInvalidError("ed448: invalid ed448 secret") + } + if subtle.ConstantTimeCompare(priv.PublicKey.Point, expectedPrivateKey[SeedSize:]) == 0 { + return errors.KeyInvalidError("ed448: invalid ed448 public key") + } + return nil +} + +// ENCODING/DECODING signature: + +// WriteSignature encodes and writes an ed448 signature to writer. +func WriteSignature(writer io.Writer, signature []byte) error { + _, err := writer.Write(signature) + return err +} + +// ReadSignature decodes an ed448 signature from a reader. +func ReadSignature(reader io.Reader) ([]byte, error) { + signature := make([]byte, SignatureSize) + if _, err := io.ReadFull(reader, signature); err != nil { + return nil, err + } + return signature, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go index 17e2bcfed..0eb3937b3 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go @@ -9,6 +9,18 @@ import ( "strconv" ) +var ( + // ErrDecryptSessionKeyParsing is a generic error message for parsing errors in decrypted data + // to reduce the risk of oracle attacks. + ErrDecryptSessionKeyParsing = DecryptWithSessionKeyError("parsing error") + // ErrAEADTagVerification is returned if one of the tag verifications in SEIPDv2 fails + ErrAEADTagVerification error = DecryptWithSessionKeyError("AEAD tag verification failed") + // ErrMDCHashMismatch + ErrMDCHashMismatch error = SignatureError("MDC hash mismatch") + // ErrMDCMissing + ErrMDCMissing error = SignatureError("MDC packet not found") +) + // A StructuralError is returned when OpenPGP data is found to be syntactically // invalid. type StructuralError string @@ -17,6 +29,34 @@ func (s StructuralError) Error() string { return "openpgp: invalid data: " + string(s) } +// A DecryptWithSessionKeyError is returned when a failure occurs when reading from symmetrically decrypted data or +// an authentication tag verification fails. +// Such an error indicates that the supplied session key is likely wrong or the data got corrupted. +type DecryptWithSessionKeyError string + +func (s DecryptWithSessionKeyError) Error() string { + return "openpgp: decryption with session key failed: " + string(s) +} + +// HandleSensitiveParsingError handles parsing errors when reading data from potentially decrypted data. +// The function makes parsing errors generic to reduce the risk of oracle attacks in SEIPDv1. +func HandleSensitiveParsingError(err error, decrypted bool) error { + if !decrypted { + // Data was not encrypted so we return the inner error. + return err + } + // The data is read from a stream that decrypts using a session key; + // therefore, we need to handle parsing errors appropriately. + // This is essential to mitigate the risk of oracle attacks. + if decError, ok := err.(*DecryptWithSessionKeyError); ok { + return decError + } + if decError, ok := err.(DecryptWithSessionKeyError); ok { + return decError + } + return ErrDecryptSessionKeyParsing +} + // UnsupportedError indicates that, although the OpenPGP data is valid, it // makes use of currently unimplemented features. type UnsupportedError string @@ -41,9 +81,6 @@ func (b SignatureError) Error() string { return "openpgp: invalid signature: " + string(b) } -var ErrMDCHashMismatch error = SignatureError("MDC hash mismatch") -var ErrMDCMissing error = SignatureError("MDC packet not found") - type signatureExpiredError int func (se signatureExpiredError) Error() string { @@ -58,6 +95,14 @@ func (ke keyExpiredError) Error() string { return "openpgp: key expired" } +var ErrSignatureOlderThanKey error = signatureOlderThanKeyError(0) + +type signatureOlderThanKeyError int + +func (ske signatureOlderThanKeyError) Error() string { + return "openpgp: signature is older than the key" +} + var ErrKeyExpired error = keyExpiredError(0) type keyIncorrectError int @@ -92,12 +137,24 @@ func (keyRevokedError) Error() string { var ErrKeyRevoked error = keyRevokedError(0) +type WeakAlgorithmError string + +func (e WeakAlgorithmError) Error() string { + return "openpgp: weak algorithms are rejected: " + string(e) +} + type UnknownPacketTypeError uint8 func (upte UnknownPacketTypeError) Error() string { return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) } +type CriticalUnknownPacketTypeError uint8 + +func (upte CriticalUnknownPacketTypeError) Error() string { + return "openpgp: unknown critical packet type: " + strconv.Itoa(int(upte)) +} + // AEADError indicates that there is a problem when initializing or using a // AEAD instance, configuration struct, nonces or index values. type AEADError string @@ -114,3 +171,10 @@ type ErrDummyPrivateKey string func (dke ErrDummyPrivateKey) Error() string { return "openpgp: s2k GNU dummy key: " + string(dke) } + +// ErrMalformedMessage results when the packet sequence is incorrect +type ErrMalformedMessage string + +func (dke ErrMalformedMessage) Error() string { + return "openpgp: malformed message " + string(dke) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go index 5760cff80..c76a75bcd 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go @@ -51,24 +51,14 @@ func (sk CipherFunction) Id() uint8 { return uint8(sk) } -var keySizeByID = map[uint8]int{ - TripleDES.Id(): 24, - CAST5.Id(): cast5.KeySize, - AES128.Id(): 16, - AES192.Id(): 24, - AES256.Id(): 32, -} - // KeySize returns the key size, in bytes, of cipher. func (cipher CipherFunction) KeySize() int { switch cipher { - case TripleDES: - return 24 case CAST5: return cast5.KeySize case AES128: return 16 - case AES192: + case AES192, TripleDES: return 24 case AES256: return 32 diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go index 35751034d..0da2d0d85 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go @@ -4,11 +4,14 @@ package ecc import ( "bytes" "crypto/elliptic" + "github.com/ProtonMail/go-crypto/bitcurves" "github.com/ProtonMail/go-crypto/brainpool" "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" ) +const Curve25519GenName = "Curve25519" + type CurveInfo struct { GenName string Oid *encoding.OID @@ -42,19 +45,19 @@ var Curves = []CurveInfo{ }, { // Curve25519 - GenName: "Curve25519", + GenName: Curve25519GenName, Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x05, 0x01}), Curve: NewCurve25519(), }, { - // X448 + // x448 GenName: "Curve448", Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x6F}), Curve: NewX448(), }, { // Ed25519 - GenName: "Curve25519", + GenName: Curve25519GenName, Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01}), Curve: NewEd25519(), }, diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go index 54a08a8a3..5a4c3a859 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go @@ -2,6 +2,7 @@ package ecc import ( + "bytes" "crypto/subtle" "io" @@ -90,7 +91,14 @@ func (c *ed25519) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) { } func getEd25519Sk(publicKey, privateKey []byte) ed25519lib.PrivateKey { - return append(privateKey, publicKey...) + privateKeyCap, privateKeyLen, publicKeyLen := cap(privateKey), len(privateKey), len(publicKey) + + if privateKeyCap >= privateKeyLen+publicKeyLen && + bytes.Equal(privateKey[privateKeyLen:privateKeyLen+publicKeyLen], publicKey) { + return privateKey[:privateKeyLen+publicKeyLen] + } + + return append(privateKey[:privateKeyLen:privateKeyLen], publicKey...) } func (c *ed25519) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go index 18cd80434..b6edda748 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go @@ -2,6 +2,7 @@ package ecc import ( + "bytes" "crypto/subtle" "io" @@ -84,7 +85,14 @@ func (c *ed448) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) { } func getEd448Sk(publicKey, privateKey []byte) ed448lib.PrivateKey { - return append(privateKey, publicKey...) + privateKeyCap, privateKeyLen, publicKeyLen := cap(privateKey), len(privateKey), len(publicKey) + + if privateKeyCap >= privateKeyLen+publicKeyLen && + bytes.Equal(privateKey[privateKeyLen:privateKeyLen+publicKeyLen], publicKey) { + return privateKey[:privateKeyLen+publicKeyLen] + } + + return append(privateKey[:privateKeyLen:privateKeyLen], publicKey...) } func (c *ed448) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go index ffdd51513..df04262e9 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go @@ -73,7 +73,9 @@ func (c *x448) GenerateECDH(rand io.Reader) (point []byte, secret []byte, err er func (c *x448) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) { var pk, ss x448lib.Key seed, e, err := c.generateKeyPairBytes(rand) - + if err != nil { + return nil, nil, err + } copy(pk[:], point) x448lib.Shared(&ss, &seed, &pk) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go index 0e71934cd..77213f66b 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go @@ -15,11 +15,15 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/ecdh" "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/ed25519" + "github.com/ProtonMail/go-crypto/openpgp/ed448" "github.com/ProtonMail/go-crypto/openpgp/eddsa" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" "github.com/ProtonMail/go-crypto/openpgp/packet" + "github.com/ProtonMail/go-crypto/openpgp/x25519" + "github.com/ProtonMail/go-crypto/openpgp/x448" ) // NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a @@ -36,8 +40,10 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err return nil, err } primary := packet.NewSignerPrivateKey(creationTime, primaryPrivRaw) - if config != nil && config.V5Keys { - primary.UpgradeToV5() + if config.V6() { + if err := primary.UpgradeToV6(); err != nil { + return nil, err + } } e := &Entity{ @@ -45,9 +51,25 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err PrivateKey: primary, Identities: make(map[string]*Identity), Subkeys: []Subkey{}, + Signatures: []*packet.Signature{}, } - err = e.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs) + if config.V6() { + // In v6 keys algorithm preferences should be stored in direct key signatures + selfSignature := createSignaturePacket(&primary.PublicKey, packet.SigTypeDirectSignature, config) + err = writeKeyProperties(selfSignature, creationTime, keyLifetimeSecs, config) + if err != nil { + return nil, err + } + err = selfSignature.SignDirectKeyBinding(&primary.PublicKey, primary, config) + if err != nil { + return nil, err + } + e.Signatures = append(e.Signatures, selfSignature) + e.SelfSignature = selfSignature + } + + err = e.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs, !config.V6()) if err != nil { return nil, err } @@ -65,32 +87,19 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err func (t *Entity) AddUserId(name, comment, email string, config *packet.Config) error { creationTime := config.Now() keyLifetimeSecs := config.KeyLifetime() - return t.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs) + return t.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs, !config.V6()) } -func (t *Entity) addUserId(name, comment, email string, config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32) error { - uid := packet.NewUserId(name, comment, email) - if uid == nil { - return errors.InvalidArgumentError("user id field contained invalid characters") - } +func writeKeyProperties(selfSignature *packet.Signature, creationTime time.Time, keyLifetimeSecs uint32, config *packet.Config) error { + advertiseAead := config.AEAD() != nil - if _, ok := t.Identities[uid.Id]; ok { - return errors.InvalidArgumentError("user id exist") - } - - primary := t.PrivateKey - - isPrimaryId := len(t.Identities) == 0 - - selfSignature := createSignaturePacket(&primary.PublicKey, packet.SigTypePositiveCert, config) selfSignature.CreationTime = creationTime selfSignature.KeyLifetimeSecs = &keyLifetimeSecs - selfSignature.IsPrimaryId = &isPrimaryId selfSignature.FlagsValid = true selfSignature.FlagSign = true selfSignature.FlagCertify = true selfSignature.SEIPDv1 = true // true by default, see 5.8 vs. 5.14 - selfSignature.SEIPDv2 = config.AEAD() != nil + selfSignature.SEIPDv2 = advertiseAead // Set the PreferredHash for the SelfSignature from the packet.Config. // If it is not the must-implement algorithm from rfc4880bis, append that. @@ -119,18 +128,44 @@ func (t *Entity) addUserId(name, comment, email string, config *packet.Config, c selfSignature.PreferredCompression = append(selfSignature.PreferredCompression, uint8(config.Compression())) } - // And for DefaultMode. - modes := []uint8{uint8(config.AEAD().Mode())} - if config.AEAD().Mode() != packet.AEADModeOCB { - modes = append(modes, uint8(packet.AEADModeOCB)) - } + if advertiseAead { + // Get the preferred AEAD mode from the packet.Config. + // If it is not the must-implement algorithm from rfc9580, append that. + modes := []uint8{uint8(config.AEAD().Mode())} + if config.AEAD().Mode() != packet.AEADModeOCB { + modes = append(modes, uint8(packet.AEADModeOCB)) + } - // For preferred (AES256, GCM), we'll generate (AES256, GCM), (AES256, OCB), (AES128, GCM), (AES128, OCB) - for _, cipher := range selfSignature.PreferredSymmetric { - for _, mode := range modes { - selfSignature.PreferredCipherSuites = append(selfSignature.PreferredCipherSuites, [2]uint8{cipher, mode}) + // For preferred (AES256, GCM), we'll generate (AES256, GCM), (AES256, OCB), (AES128, GCM), (AES128, OCB) + for _, cipher := range selfSignature.PreferredSymmetric { + for _, mode := range modes { + selfSignature.PreferredCipherSuites = append(selfSignature.PreferredCipherSuites, [2]uint8{cipher, mode}) + } } } + return nil +} + +func (t *Entity) addUserId(name, comment, email string, config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32, writeProperties bool) error { + uid := packet.NewUserId(name, comment, email) + if uid == nil { + return errors.InvalidArgumentError("user id field contained invalid characters") + } + + if _, ok := t.Identities[uid.Id]; ok { + return errors.InvalidArgumentError("user id exist") + } + + primary := t.PrivateKey + isPrimaryId := len(t.Identities) == 0 + selfSignature := createSignaturePacket(&primary.PublicKey, packet.SigTypePositiveCert, config) + if writeProperties { + err := writeKeyProperties(selfSignature, creationTime, keyLifetimeSecs, config) + if err != nil { + return err + } + } + selfSignature.IsPrimaryId = &isPrimaryId // User ID binding signature err := selfSignature.SignUserId(uid.Id, &primary.PublicKey, primary, config) @@ -158,8 +193,10 @@ func (e *Entity) AddSigningSubkey(config *packet.Config) error { } sub := packet.NewSignerPrivateKey(creationTime, subPrivRaw) sub.IsSubkey = true - if config != nil && config.V5Keys { - sub.UpgradeToV5() + if config.V6() { + if err := sub.UpgradeToV6(); err != nil { + return err + } } subkey := Subkey{ @@ -203,8 +240,10 @@ func (e *Entity) addEncryptionSubkey(config *packet.Config, creationTime time.Ti } sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw) sub.IsSubkey = true - if config != nil && config.V5Keys { - sub.UpgradeToV5() + if config.V6() { + if err := sub.UpgradeToV6(); err != nil { + return err + } } subkey := Subkey{ @@ -242,6 +281,11 @@ func newSigner(config *packet.Config) (signer interface{}, err error) { } return rsa.GenerateKey(config.Random(), bits) case packet.PubKeyAlgoEdDSA: + if config.V6() { + // Implementations MUST NOT accept or generate v6 key material + // using the deprecated OIDs. + return nil, errors.InvalidArgumentError("EdDSALegacy cannot be used for v6 keys") + } curve := ecc.FindEdDSAByGenName(string(config.CurveName())) if curve == nil { return nil, errors.InvalidArgumentError("unsupported curve") @@ -263,6 +307,18 @@ func newSigner(config *packet.Config) (signer interface{}, err error) { return nil, err } return priv, nil + case packet.PubKeyAlgoEd25519: + priv, err := ed25519.GenerateKey(config.Random()) + if err != nil { + return nil, err + } + return priv, nil + case packet.PubKeyAlgoEd448: + priv, err := ed448.GenerateKey(config.Random()) + if err != nil { + return nil, err + } + return priv, nil default: return nil, errors.InvalidArgumentError("unsupported public key algorithm") } @@ -285,6 +341,13 @@ func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { case packet.PubKeyAlgoEdDSA, packet.PubKeyAlgoECDSA: fallthrough // When passing EdDSA or ECDSA, we generate an ECDH subkey case packet.PubKeyAlgoECDH: + if config.V6() && + (config.CurveName() == packet.Curve25519 || + config.CurveName() == packet.Curve448) { + // Implementations MUST NOT accept or generate v6 key material + // using the deprecated OIDs. + return nil, errors.InvalidArgumentError("ECDH with Curve25519/448 legacy cannot be used for v6 keys") + } var kdf = ecdh.KDF{ Hash: algorithm.SHA512, Cipher: algorithm.AES256, @@ -294,6 +357,10 @@ func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { return nil, errors.InvalidArgumentError("unsupported curve") } return ecdh.GenerateKey(config.Random(), curve, kdf) + case packet.PubKeyAlgoEd25519, packet.PubKeyAlgoX25519: // When passing Ed25519, we generate an x25519 subkey + return x25519.GenerateKey(config.Random()) + case packet.PubKeyAlgoEd448, packet.PubKeyAlgoX448: // When passing Ed448, we generate an x448 subkey + return x448.GenerateKey(config.Random()) default: return nil, errors.InvalidArgumentError("unsupported public key algorithm") } @@ -302,7 +369,7 @@ func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { var bigOne = big.NewInt(1) // generateRSAKeyWithPrimes generates a multi-prime RSA keypair of the -// given bit size, using the given random source and prepopulated primes. +// given bit size, using the given random source and pre-populated primes. func generateRSAKeyWithPrimes(random io.Reader, nprimes int, bits int, prepopulatedPrimes []*big.Int) (*rsa.PrivateKey, error) { priv := new(rsa.PrivateKey) priv.E = 65537 diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go index 2d7b0cf37..a071353e2 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go @@ -6,6 +6,7 @@ package openpgp import ( goerrors "errors" + "fmt" "io" "time" @@ -24,11 +25,13 @@ var PrivateKeyType = "PGP PRIVATE KEY BLOCK" // (which must be a signing key), one or more identities claimed by that key, // and zero or more subkeys, which may be encryption keys. type Entity struct { - PrimaryKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Identities map[string]*Identity // indexed by Identity.Name - Revocations []*packet.Signature - Subkeys []Subkey + PrimaryKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Identities map[string]*Identity // indexed by Identity.Name + Revocations []*packet.Signature + Subkeys []Subkey + SelfSignature *packet.Signature // Direct-key self signature of the PrimaryKey (contains primary key properties in v6) + Signatures []*packet.Signature // all (potentially unverified) self-signatures, revocations, and third-party signatures } // An Identity represents an identity claimed by an Entity and zero or more @@ -120,12 +123,12 @@ func shouldPreferIdentity(existingId, potentialNewId *Identity) bool { // given Entity. func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { // Fail to find any encryption key if the... - i := e.PrimaryIdentity() - if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired - i.SelfSignature == nil || // user ID has no self-signature - i.SelfSignature.SigExpired(now) || // user ID self-signature has expired + primarySelfSignature, primaryIdentity := e.PrimarySelfSignature() + if primarySelfSignature == nil || // no self-signature found + e.PrimaryKey.KeyExpired(primarySelfSignature, now) || // primary key has expired e.Revoked(now) || // primary key has been revoked - i.Revoked(now) { // user ID has been revoked + primarySelfSignature.SigExpired(now) || // user ID or or direct self-signature has expired + (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // user ID has been revoked (for v4 keys) return Key{}, false } @@ -152,9 +155,9 @@ func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { // If we don't have any subkeys for encryption and the primary key // is marked as OK to encrypt with, then we can use it. - if i.SelfSignature.FlagsValid && i.SelfSignature.FlagEncryptCommunications && + if primarySelfSignature.FlagsValid && primarySelfSignature.FlagEncryptCommunications && e.PrimaryKey.PubKeyAlgo.CanEncrypt() { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true + return Key{e, e.PrimaryKey, e.PrivateKey, primarySelfSignature, e.Revocations}, true } return Key{}, false @@ -186,12 +189,12 @@ func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { func (e *Entity) signingKeyByIdUsage(now time.Time, id uint64, flags int) (Key, bool) { // Fail to find any signing key if the... - i := e.PrimaryIdentity() - if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired - i.SelfSignature == nil || // user ID has no self-signature - i.SelfSignature.SigExpired(now) || // user ID self-signature has expired + primarySelfSignature, primaryIdentity := e.PrimarySelfSignature() + if primarySelfSignature == nil || // no self-signature found + e.PrimaryKey.KeyExpired(primarySelfSignature, now) || // primary key has expired e.Revoked(now) || // primary key has been revoked - i.Revoked(now) { // user ID has been revoked + primarySelfSignature.SigExpired(now) || // user ID or direct self-signature has expired + (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // user ID has been revoked (for v4 keys) return Key{}, false } @@ -220,12 +223,12 @@ func (e *Entity) signingKeyByIdUsage(now time.Time, id uint64, flags int) (Key, // If we don't have any subkeys for signing and the primary key // is marked as OK to sign with, then we can use it. - if i.SelfSignature.FlagsValid && - (flags&packet.KeyFlagCertify == 0 || i.SelfSignature.FlagCertify) && - (flags&packet.KeyFlagSign == 0 || i.SelfSignature.FlagSign) && + if primarySelfSignature.FlagsValid && + (flags&packet.KeyFlagCertify == 0 || primarySelfSignature.FlagCertify) && + (flags&packet.KeyFlagSign == 0 || primarySelfSignature.FlagSign) && e.PrimaryKey.PubKeyAlgo.CanSign() && (id == 0 || e.PrimaryKey.KeyId == id) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true + return Key{e, e.PrimaryKey, e.PrivateKey, primarySelfSignature, e.Revocations}, true } // No keys with a valid Signing Flag or no keys matched the id passed in @@ -259,7 +262,7 @@ func (e *Entity) EncryptPrivateKeys(passphrase []byte, config *packet.Config) er var keysToEncrypt []*packet.PrivateKey // Add entity private key to encrypt. if e.PrivateKey != nil && !e.PrivateKey.Dummy() && !e.PrivateKey.Encrypted { - keysToEncrypt = append(keysToEncrypt, e.PrivateKey) + keysToEncrypt = append(keysToEncrypt, e.PrivateKey) } // Add subkeys to encrypt. @@ -271,7 +274,7 @@ func (e *Entity) EncryptPrivateKeys(passphrase []byte, config *packet.Config) er return packet.EncryptPrivateKeys(keysToEncrypt, passphrase, config) } -// DecryptPrivateKeys decrypts all encrypted keys in the entitiy with the given passphrase. +// DecryptPrivateKeys decrypts all encrypted keys in the entity with the given passphrase. // Avoids recomputation of similar s2k key derivations. Public keys and dummy keys are ignored, // and don't cause an error to be returned. func (e *Entity) DecryptPrivateKeys(passphrase []byte) error { @@ -284,7 +287,7 @@ func (e *Entity) DecryptPrivateKeys(passphrase []byte) error { // Add subkeys to decrypt. for _, sub := range e.Subkeys { if sub.PrivateKey != nil && !sub.PrivateKey.Dummy() && sub.PrivateKey.Encrypted { - keysToDecrypt = append(keysToDecrypt, sub.PrivateKey) + keysToDecrypt = append(keysToDecrypt, sub.PrivateKey) } } return packet.DecryptPrivateKeys(keysToDecrypt, passphrase) @@ -318,8 +321,7 @@ type EntityList []*Entity func (el EntityList) KeysById(id uint64) (keys []Key) { for _, e := range el { if e.PrimaryKey.KeyId == id { - ident := e.PrimaryIdentity() - selfSig := ident.SelfSignature + selfSig, _ := e.PrimarySelfSignature() keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig, e.Revocations}) } @@ -441,7 +443,6 @@ func readToNextPublicKey(packets *packet.Reader) (err error) { return } else if err != nil { if _, ok := err.(errors.UnsupportedError); ok { - err = nil continue } return @@ -479,6 +480,7 @@ func ReadEntity(packets *packet.Reader) (*Entity, error) { } var revocations []*packet.Signature + var directSignatures []*packet.Signature EachPacket: for { p, err := packets.Next() @@ -497,9 +499,7 @@ EachPacket: if pkt.SigType == packet.SigTypeKeyRevocation { revocations = append(revocations, pkt) } else if pkt.SigType == packet.SigTypeDirectSignature { - // TODO: RFC4880 5.2.1 permits signatures - // directly on keys (eg. to bind additional - // revocation keys). + directSignatures = append(directSignatures, pkt) } // Else, ignoring the signature as it does not follow anything // we would know to attach it to. @@ -522,12 +522,39 @@ EachPacket: return nil, err } default: - // we ignore unknown packets + // we ignore unknown packets. } } - if len(e.Identities) == 0 { - return nil, errors.StructuralError("entity without any identities") + if len(e.Identities) == 0 && e.PrimaryKey.Version < 6 { + return nil, errors.StructuralError(fmt.Sprintf("v%d entity without any identities", e.PrimaryKey.Version)) + } + + // An implementation MUST ensure that a valid direct-key signature is present before using a v6 key. + if e.PrimaryKey.Version == 6 { + if len(directSignatures) == 0 { + return nil, errors.StructuralError("v6 entity without a valid direct-key signature") + } + // Select main direct key signature. + var mainDirectKeySelfSignature *packet.Signature + for _, directSignature := range directSignatures { + if directSignature.SigType == packet.SigTypeDirectSignature && + directSignature.CheckKeyIdOrFingerprint(e.PrimaryKey) && + (mainDirectKeySelfSignature == nil || + directSignature.CreationTime.After(mainDirectKeySelfSignature.CreationTime)) { + mainDirectKeySelfSignature = directSignature + } + } + if mainDirectKeySelfSignature == nil { + return nil, errors.StructuralError("no valid direct-key self-signature for v6 primary key found") + } + // Check that the main self-signature is valid. + err = e.PrimaryKey.VerifyDirectKeySignature(mainDirectKeySelfSignature) + if err != nil { + return nil, errors.StructuralError("invalid direct-key self-signature for v6 primary key") + } + e.SelfSignature = mainDirectKeySelfSignature + e.Signatures = directSignatures } for _, revocation := range revocations { @@ -672,6 +699,12 @@ func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign boo return err } } + for _, directSignature := range e.Signatures { + err := directSignature.Serialize(w) + if err != nil { + return err + } + } for _, ident := range e.Identities { err = ident.UserId.Serialize(w) if err != nil { @@ -738,6 +771,12 @@ func (e *Entity) Serialize(w io.Writer) error { return err } } + for _, directSignature := range e.Signatures { + err := directSignature.Serialize(w) + if err != nil { + return err + } + } for _, ident := range e.Identities { err = ident.UserId.Serialize(w) if err != nil { @@ -840,3 +879,23 @@ func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, rea sk.Revocations = append(sk.Revocations, revSig) return nil } + +func (e *Entity) primaryDirectSignature() *packet.Signature { + return e.SelfSignature +} + +// PrimarySelfSignature searches the entity for the self-signature that stores key preferences. +// For V4 keys, returns the self-signature of the primary identity, and the identity. +// For V6 keys, returns the latest valid direct-key self-signature, and no identity (nil). +// This self-signature is to be used to check the key expiration, +// algorithm preferences, and so on. +func (e *Entity) PrimarySelfSignature() (*packet.Signature, *Identity) { + if e.PrimaryKey.Version == 6 { + return e.primaryDirectSignature(), nil + } + primaryIdentity := e.PrimaryIdentity() + if primaryIdentity == nil { + return nil, nil + } + return primaryIdentity.SelfSignature, primaryIdentity +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go index cee83bdc7..5e4604656 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go @@ -3,7 +3,6 @@ package packet import ( - "bytes" "crypto/cipher" "encoding/binary" "io" @@ -15,12 +14,11 @@ import ( type aeadCrypter struct { aead cipher.AEAD chunkSize int - initialNonce []byte + nonce []byte associatedData []byte // Chunk-independent associated data chunkIndex []byte // Chunk counter packetTag packetType // SEIP packet (v2) or AEAD Encrypted Data packet bytesProcessed int // Amount of plaintext bytes encrypted/decrypted - buffer bytes.Buffer // Buffered bytes across chunks } // computeNonce takes the incremental index and computes an eXclusive OR with @@ -28,12 +26,12 @@ type aeadCrypter struct { // 5.16.1 and 5.16.2). It returns the resulting nonce. func (wo *aeadCrypter) computeNextNonce() (nonce []byte) { if wo.packetTag == packetTypeSymmetricallyEncryptedIntegrityProtected { - return append(wo.initialNonce, wo.chunkIndex...) + return wo.nonce } - nonce = make([]byte, len(wo.initialNonce)) - copy(nonce, wo.initialNonce) - offset := len(wo.initialNonce) - 8 + nonce = make([]byte, len(wo.nonce)) + copy(nonce, wo.nonce) + offset := len(wo.nonce) - 8 for i := 0; i < 8; i++ { nonce[i+offset] ^= wo.chunkIndex[i] } @@ -62,8 +60,9 @@ func (wo *aeadCrypter) incrementIndex() error { type aeadDecrypter struct { aeadCrypter // Embedded ciphertext opener reader io.Reader // 'reader' is a partialLengthReader + chunkBytes []byte peekedBytes []byte // Used to detect last chunk - eof bool + buffer []byte // Buffered decrypted bytes } // Read decrypts bytes and reads them into dst. It decrypts when necessary and @@ -71,51 +70,45 @@ type aeadDecrypter struct { // and an error. func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) { // Return buffered plaintext bytes from previous calls - if ar.buffer.Len() > 0 { - return ar.buffer.Read(dst) - } - - // Return EOF if we've previously validated the final tag - if ar.eof { - return 0, io.EOF + if len(ar.buffer) > 0 { + n = copy(dst, ar.buffer) + ar.buffer = ar.buffer[n:] + return } // Read a chunk tagLen := ar.aead.Overhead() - cipherChunkBuf := new(bytes.Buffer) - _, errRead := io.CopyN(cipherChunkBuf, ar.reader, int64(ar.chunkSize+tagLen)) - cipherChunk := cipherChunkBuf.Bytes() - if errRead != nil && errRead != io.EOF { + copy(ar.chunkBytes, ar.peekedBytes) // Copy bytes peeked in previous chunk or in initialization + bytesRead, errRead := io.ReadFull(ar.reader, ar.chunkBytes[tagLen:]) + if errRead != nil && errRead != io.EOF && errRead != io.ErrUnexpectedEOF { return 0, errRead } - decrypted, errChunk := ar.openChunk(cipherChunk) - if errChunk != nil { - return 0, errChunk - } - // Return decrypted bytes, buffering if necessary - if len(dst) < len(decrypted) { - n = copy(dst, decrypted[:len(dst)]) - ar.buffer.Write(decrypted[len(dst):]) - } else { - n = copy(dst, decrypted) - } + if bytesRead > 0 { + ar.peekedBytes = ar.chunkBytes[bytesRead:bytesRead+tagLen] - // Check final authentication tag - if errRead == io.EOF { - errChunk := ar.validateFinalTag(ar.peekedBytes) + decrypted, errChunk := ar.openChunk(ar.chunkBytes[:bytesRead]) if errChunk != nil { - return n, errChunk + return 0, errChunk } - ar.eof = true // Mark EOF for when we've returned all buffered data + + // Return decrypted bytes, buffering if necessary + n = copy(dst, decrypted) + ar.buffer = decrypted[n:] + return } - return + + return 0, io.EOF } -// Close is noOp. The final authentication tag of the stream was already -// checked in the last Read call. In the future, this function could be used to -// wipe the reader and peeked, decrypted bytes, if necessary. +// Close checks the final authentication tag of the stream. +// In the future, this function could also be used to wipe the reader +// and peeked & decrypted bytes, if necessary. func (ar *aeadDecrypter) Close() (err error) { + errChunk := ar.validateFinalTag(ar.peekedBytes) + if errChunk != nil { + return errChunk + } return nil } @@ -123,22 +116,15 @@ func (ar *aeadDecrypter) Close() (err error) { // the underlying plaintext and an error. It accesses peeked bytes from next // chunk, to identify the last chunk and decrypt/validate accordingly. func (ar *aeadDecrypter) openChunk(data []byte) ([]byte, error) { - tagLen := ar.aead.Overhead() - // Restore carried bytes from last call - chunkExtra := append(ar.peekedBytes, data...) - // 'chunk' contains encrypted bytes, followed by an authentication tag. - chunk := chunkExtra[:len(chunkExtra)-tagLen] - ar.peekedBytes = chunkExtra[len(chunkExtra)-tagLen:] - adata := ar.associatedData if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted { adata = append(ar.associatedData, ar.chunkIndex...) } nonce := ar.computeNextNonce() - plainChunk, err := ar.aead.Open(nil, nonce, chunk, adata) + plainChunk, err := ar.aead.Open(data[:0:len(data)], nonce, data, adata) if err != nil { - return nil, err + return nil, errors.ErrAEADTagVerification } ar.bytesProcessed += len(plainChunk) if err = ar.aeadCrypter.incrementIndex(); err != nil { @@ -163,9 +149,8 @@ func (ar *aeadDecrypter) validateFinalTag(tag []byte) error { // ... and total number of encrypted octets adata = append(adata, amountBytes...) nonce := ar.computeNextNonce() - _, err := ar.aead.Open(nil, nonce, tag, adata) - if err != nil { - return err + if _, err := ar.aead.Open(nil, nonce, tag, adata); err != nil { + return errors.ErrAEADTagVerification } return nil } @@ -175,27 +160,29 @@ func (ar *aeadDecrypter) validateFinalTag(tag []byte) error { type aeadEncrypter struct { aeadCrypter // Embedded plaintext sealer writer io.WriteCloser // 'writer' is a partialLengthWriter + chunkBytes []byte + offset int } // Write encrypts and writes bytes. It encrypts when necessary and buffers extra // plaintext bytes for next call. When the stream is finished, Close() MUST be // called to append the final tag. func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) { - // Append plaintextBytes to existing buffered bytes - n, err = aw.buffer.Write(plaintextBytes) - if err != nil { - return n, err - } - // Encrypt and write chunks - for aw.buffer.Len() >= aw.chunkSize { - plainChunk := aw.buffer.Next(aw.chunkSize) - encryptedChunk, err := aw.sealChunk(plainChunk) - if err != nil { - return n, err - } - _, err = aw.writer.Write(encryptedChunk) - if err != nil { - return n, err + for n != len(plaintextBytes) { + copied := copy(aw.chunkBytes[aw.offset:aw.chunkSize], plaintextBytes[n:]) + n += copied + aw.offset += copied + + if aw.offset == aw.chunkSize { + encryptedChunk, err := aw.sealChunk(aw.chunkBytes[:aw.offset]) + if err != nil { + return n, err + } + _, err = aw.writer.Write(encryptedChunk) + if err != nil { + return n, err + } + aw.offset = 0 } } return @@ -207,9 +194,8 @@ func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) { func (aw *aeadEncrypter) Close() (err error) { // Encrypt and write a chunk if there's buffered data left, or if we haven't // written any chunks yet. - if aw.buffer.Len() > 0 || aw.bytesProcessed == 0 { - plainChunk := aw.buffer.Bytes() - lastEncryptedChunk, err := aw.sealChunk(plainChunk) + if aw.offset > 0 || aw.bytesProcessed == 0 { + lastEncryptedChunk, err := aw.sealChunk(aw.chunkBytes[:aw.offset]) if err != nil { return err } @@ -255,7 +241,7 @@ func (aw *aeadEncrypter) sealChunk(data []byte) ([]byte, error) { } nonce := aw.computeNextNonce() - encrypted := aw.aead.Seal(nil, nonce, data, adata) + encrypted := aw.aead.Seal(data[:0], nonce, data, adata) aw.bytesProcessed += len(data) if err := aw.aeadCrypter.incrementIndex(); err != nil { return nil, err diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go index 98bd876bf..583765d87 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go @@ -65,24 +65,28 @@ func (ae *AEADEncrypted) decrypt(key []byte) (io.ReadCloser, error) { blockCipher := ae.cipher.new(key) aead := ae.mode.new(blockCipher) // Carry the first tagLen bytes + chunkSize := decodeAEADChunkSize(ae.chunkSizeByte) tagLen := ae.mode.TagLength() - peekedBytes := make([]byte, tagLen) + chunkBytes := make([]byte, chunkSize+tagLen*2) + peekedBytes := chunkBytes[chunkSize+tagLen:] n, err := io.ReadFull(ae.Contents, peekedBytes) if n < tagLen || (err != nil && err != io.EOF) { return nil, errors.AEADError("Not enough data to decrypt:" + err.Error()) } - chunkSize := decodeAEADChunkSize(ae.chunkSizeByte) + return &aeadDecrypter{ aeadCrypter: aeadCrypter{ aead: aead, chunkSize: chunkSize, - initialNonce: ae.initialNonce, + nonce: ae.initialNonce, associatedData: ae.associatedData(), chunkIndex: make([]byte, 8), packetTag: packetTypeAEADEncrypted, }, reader: ae.Contents, - peekedBytes: peekedBytes}, nil + chunkBytes: chunkBytes, + peekedBytes: peekedBytes, + }, nil } // associatedData for chunks: tag, version, cipher, mode, chunk size byte diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go index 2f5cad71d..0bcb38cac 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go @@ -8,9 +8,10 @@ import ( "compress/bzip2" "compress/flate" "compress/zlib" - "github.com/ProtonMail/go-crypto/openpgp/errors" "io" "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" ) // Compressed represents a compressed OpenPGP packet. The decompressed contents @@ -39,6 +40,37 @@ type CompressionConfig struct { Level int } +// decompressionReader ensures that the whole compression packet is read. +type decompressionReader struct { + compressed io.Reader + decompressed io.ReadCloser + readAll bool +} + +func newDecompressionReader(r io.Reader, decompressor io.ReadCloser) *decompressionReader { + return &decompressionReader{ + compressed: r, + decompressed: decompressor, + } +} + +func (dr *decompressionReader) Read(data []byte) (n int, err error) { + if dr.readAll { + return 0, io.EOF + } + n, err = dr.decompressed.Read(data) + if err == io.EOF { + dr.readAll = true + // Close the decompressor. + if errDec := dr.decompressed.Close(); errDec != nil { + return n, errDec + } + // Consume all remaining data from the compressed packet. + consumeAll(dr.compressed) + } + return n, err +} + func (c *Compressed) parse(r io.Reader) error { var buf [1]byte _, err := readFull(r, buf[:]) @@ -50,11 +82,15 @@ func (c *Compressed) parse(r io.Reader) error { case 0: c.Body = r case 1: - c.Body = flate.NewReader(r) + c.Body = newDecompressionReader(r, flate.NewReader(r)) case 2: - c.Body, err = zlib.NewReader(r) + decompressor, err := zlib.NewReader(r) + if err != nil { + return err + } + c.Body = newDecompressionReader(r, decompressor) case 3: - c.Body = bzip2.NewReader(r) + c.Body = newDecompressionReader(r, io.NopCloser(bzip2.NewReader(r))) default: err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go index 04994bec9..8bf8e6e51 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go @@ -14,6 +14,34 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/s2k" ) +var ( + defaultRejectPublicKeyAlgorithms = map[PublicKeyAlgorithm]bool{ + PubKeyAlgoElGamal: true, + PubKeyAlgoDSA: true, + } + defaultRejectHashAlgorithms = map[crypto.Hash]bool{ + crypto.MD5: true, + crypto.RIPEMD160: true, + } + defaultRejectMessageHashAlgorithms = map[crypto.Hash]bool{ + crypto.SHA1: true, + crypto.MD5: true, + crypto.RIPEMD160: true, + } + defaultRejectCurves = map[Curve]bool{ + CurveSecP256k1: true, + } +) + +// A global feature flag to indicate v5 support. +// Can be set via a build tag, e.g.: `go build -tags v5 ./...` +// If the build tag is missing config_v5.go will set it to true. +// +// Disables parsing of v5 keys and v5 signatures. +// These are non-standard entities, which in the crypto-refresh have been superseded +// by v6 keys, v6 signatures and SEIPDv2 encrypted data, respectively. +var V5Disabled = false + // Config collects a number of parameters along with sensible defaults. // A nil *Config is valid and results in all default values. type Config struct { @@ -73,9 +101,16 @@ type Config struct { // **Note: using this option may break compatibility with other OpenPGP // implementations, as well as future versions of this library.** AEADConfig *AEADConfig - // V5Keys configures version 5 key generation. If false, this package still - // supports version 5 keys, but produces version 4 keys. - V5Keys bool + // V6Keys configures version 6 key generation. If false, this package still + // supports version 6 keys, but produces version 4 keys. + V6Keys bool + // Minimum RSA key size allowed for key generation and message signing, verification and encryption. + MinRSABits uint16 + // Reject insecure algorithms, only works with v2 api + RejectPublicKeyAlgorithms map[PublicKeyAlgorithm]bool + RejectHashAlgorithms map[crypto.Hash]bool + RejectMessageHashAlgorithms map[crypto.Hash]bool + RejectCurves map[Curve]bool // "The validity period of the key. This is the number of seconds after // the key creation time that the key expires. If this is not present // or has a value of zero, the key never expires. This is found only on @@ -104,12 +139,40 @@ type Config struct { // might be no other way than to tolerate the missing MDC. Setting this flag, allows this // mode of operation. It should be considered a measure of last resort. InsecureAllowUnauthenticatedMessages bool + // InsecureAllowDecryptionWithSigningKeys allows decryption with keys marked as signing keys in the v2 API. + // This setting is potentially insecure, but it is needed as some libraries + // ignored key flags when selecting a key for encryption. + // Not relevant for the v1 API, as all keys were allowed in decryption. + InsecureAllowDecryptionWithSigningKeys bool // KnownNotations is a map of Notation Data names to bools, which controls // the notation names that are allowed to be present in critical Notation Data // signature subpackets. KnownNotations map[string]bool // SignatureNotations is a list of Notations to be added to any signatures. SignatureNotations []*Notation + // CheckIntendedRecipients controls, whether the OpenPGP Intended Recipient Fingerprint feature + // should be enabled for encryption and decryption. + // (See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-12.html#name-intended-recipient-fingerpr). + // When the flag is set, encryption produces Intended Recipient Fingerprint signature sub-packets and decryption + // checks whether the key it was encrypted to is one of the included fingerprints in the signature. + // If the flag is disabled, no Intended Recipient Fingerprint sub-packets are created or checked. + // The default behavior, when the config or flag is nil, is to enable the feature. + CheckIntendedRecipients *bool + // CacheSessionKey controls if decryption should return the session key used for decryption. + // If the flag is set, the session key is cached in the message details struct. + CacheSessionKey bool + // CheckPacketSequence is a flag that controls if the pgp message reader should strictly check + // that the packet sequence conforms with the grammar mandated by rfc4880. + // The default behavior, when the config or flag is nil, is to check the packet sequence. + CheckPacketSequence *bool + // NonDeterministicSignaturesViaNotation is a flag to enable randomization of signatures. + // If true, a salt notation is used to randomize signatures generated by v4 and v5 keys + // (v6 signatures are always non-deterministic, by design). + // This protects EdDSA signatures from potentially leaking the secret key in case of faults (i.e. bitflips) which, in principle, could occur + // during the signing computation. It is added to signatures of any algo for simplicity, and as it may also serve as protection in case of + // weaknesses in the hash algo, potentially hindering e.g. some chosen-prefix attacks. + // The default behavior, when the config or flag is nil, is to enable the feature. + NonDeterministicSignaturesViaNotation *bool } func (c *Config) Random() io.Reader { @@ -197,7 +260,7 @@ func (c *Config) S2K() *s2k.Config { return nil } // for backwards compatibility - if c != nil && c.S2KCount > 0 && c.S2KConfig == nil { + if c.S2KCount > 0 && c.S2KConfig == nil { return &s2k.Config{ S2KCount: c.S2KCount, } @@ -233,6 +296,13 @@ func (c *Config) AllowUnauthenticatedMessages() bool { return c.InsecureAllowUnauthenticatedMessages } +func (c *Config) AllowDecryptionWithSigningKeys() bool { + if c == nil { + return false + } + return c.InsecureAllowDecryptionWithSigningKeys +} + func (c *Config) KnownNotation(notationName string) bool { if c == nil { return false @@ -246,3 +316,95 @@ func (c *Config) Notations() []*Notation { } return c.SignatureNotations } + +func (c *Config) V6() bool { + if c == nil { + return false + } + return c.V6Keys +} + +func (c *Config) IntendedRecipients() bool { + if c == nil || c.CheckIntendedRecipients == nil { + return true + } + return *c.CheckIntendedRecipients +} + +func (c *Config) RetrieveSessionKey() bool { + if c == nil { + return false + } + return c.CacheSessionKey +} + +func (c *Config) MinimumRSABits() uint16 { + if c == nil || c.MinRSABits == 0 { + return 2047 + } + return c.MinRSABits +} + +func (c *Config) RejectPublicKeyAlgorithm(alg PublicKeyAlgorithm) bool { + var rejectedAlgorithms map[PublicKeyAlgorithm]bool + if c == nil || c.RejectPublicKeyAlgorithms == nil { + // Default + rejectedAlgorithms = defaultRejectPublicKeyAlgorithms + } else { + rejectedAlgorithms = c.RejectPublicKeyAlgorithms + } + return rejectedAlgorithms[alg] +} + +func (c *Config) RejectHashAlgorithm(hash crypto.Hash) bool { + var rejectedAlgorithms map[crypto.Hash]bool + if c == nil || c.RejectHashAlgorithms == nil { + // Default + rejectedAlgorithms = defaultRejectHashAlgorithms + } else { + rejectedAlgorithms = c.RejectHashAlgorithms + } + return rejectedAlgorithms[hash] +} + +func (c *Config) RejectMessageHashAlgorithm(hash crypto.Hash) bool { + var rejectedAlgorithms map[crypto.Hash]bool + if c == nil || c.RejectMessageHashAlgorithms == nil { + // Default + rejectedAlgorithms = defaultRejectMessageHashAlgorithms + } else { + rejectedAlgorithms = c.RejectMessageHashAlgorithms + } + return rejectedAlgorithms[hash] +} + +func (c *Config) RejectCurve(curve Curve) bool { + var rejectedCurve map[Curve]bool + if c == nil || c.RejectCurves == nil { + // Default + rejectedCurve = defaultRejectCurves + } else { + rejectedCurve = c.RejectCurves + } + return rejectedCurve[curve] +} + +func (c *Config) StrictPacketSequence() bool { + if c == nil || c.CheckPacketSequence == nil { + return true + } + return *c.CheckPacketSequence +} + +func (c *Config) RandomizeSignaturesViaNotation() bool { + if c == nil || c.NonDeterministicSignaturesViaNotation == nil { + return true + } + return *c.NonDeterministicSignaturesViaNotation +} + +// BoolPointer is a helper function to set a boolean pointer in the Config. +// e.g., config.CheckPacketSequence = BoolPointer(true) +func BoolPointer(value bool) *bool { + return &value +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config_v5.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config_v5.go new file mode 100644 index 000000000..f2415906b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config_v5.go @@ -0,0 +1,7 @@ +//go:build !v5 + +package packet + +func init() { + V5Disabled = true +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go index eeff2902c..b90bb2891 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go @@ -5,9 +5,11 @@ package packet import ( + "bytes" "crypto" "crypto/rsa" "encoding/binary" + "encoding/hex" "io" "math/big" "strconv" @@ -16,32 +18,85 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/elgamal" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "github.com/ProtonMail/go-crypto/openpgp/x25519" + "github.com/ProtonMail/go-crypto/openpgp/x448" ) -const encryptedKeyVersion = 3 - // EncryptedKey represents a public-key encrypted session key. See RFC 4880, // section 5.1. type EncryptedKey struct { - KeyId uint64 - Algo PublicKeyAlgorithm - CipherFunc CipherFunction // only valid after a successful Decrypt for a v3 packet - Key []byte // only valid after a successful Decrypt + Version int + KeyId uint64 + KeyVersion int // v6 + KeyFingerprint []byte // v6 + Algo PublicKeyAlgorithm + CipherFunc CipherFunction // only valid after a successful Decrypt for a v3 packet + Key []byte // only valid after a successful Decrypt encryptedMPI1, encryptedMPI2 encoding.Field + ephemeralPublicX25519 *x25519.PublicKey // used for x25519 + ephemeralPublicX448 *x448.PublicKey // used for x448 + encryptedSession []byte // used for x25519 and x448 } func (e *EncryptedKey) parse(r io.Reader) (err error) { - var buf [10]byte - _, err = readFull(r, buf[:]) + var buf [8]byte + _, err = readFull(r, buf[:versionSize]) if err != nil { return } - if buf[0] != encryptedKeyVersion { + e.Version = int(buf[0]) + if e.Version != 3 && e.Version != 6 { return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) } - e.KeyId = binary.BigEndian.Uint64(buf[1:9]) - e.Algo = PublicKeyAlgorithm(buf[9]) + if e.Version == 6 { + //Read a one-octet size of the following two fields. + if _, err = readFull(r, buf[:1]); err != nil { + return + } + // The size may also be zero, and the key version and + // fingerprint omitted for an "anonymous recipient" + if buf[0] != 0 { + // non-anonymous case + _, err = readFull(r, buf[:versionSize]) + if err != nil { + return + } + e.KeyVersion = int(buf[0]) + if e.KeyVersion != 4 && e.KeyVersion != 6 { + return errors.UnsupportedError("unknown public key version " + strconv.Itoa(e.KeyVersion)) + } + var fingerprint []byte + if e.KeyVersion == 6 { + fingerprint = make([]byte, fingerprintSizeV6) + } else if e.KeyVersion == 4 { + fingerprint = make([]byte, fingerprintSize) + } + _, err = readFull(r, fingerprint) + if err != nil { + return + } + e.KeyFingerprint = fingerprint + if e.KeyVersion == 6 { + e.KeyId = binary.BigEndian.Uint64(e.KeyFingerprint[:keyIdSize]) + } else if e.KeyVersion == 4 { + e.KeyId = binary.BigEndian.Uint64(e.KeyFingerprint[fingerprintSize-keyIdSize : fingerprintSize]) + } + } + } else { + _, err = readFull(r, buf[:8]) + if err != nil { + return + } + e.KeyId = binary.BigEndian.Uint64(buf[:keyIdSize]) + } + + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + e.Algo = PublicKeyAlgorithm(buf[0]) + var cipherFunction byte switch e.Algo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: e.encryptedMPI1 = new(encoding.MPI) @@ -68,26 +123,39 @@ func (e *EncryptedKey) parse(r io.Reader) (err error) { if _, err = e.encryptedMPI2.ReadFrom(r); err != nil { return } + case PubKeyAlgoX25519: + e.ephemeralPublicX25519, e.encryptedSession, cipherFunction, err = x25519.DecodeFields(r, e.Version == 6) + if err != nil { + return + } + case PubKeyAlgoX448: + e.ephemeralPublicX448, e.encryptedSession, cipherFunction, err = x448.DecodeFields(r, e.Version == 6) + if err != nil { + return + } } + if e.Version < 6 { + switch e.Algo { + case PubKeyAlgoX25519, PubKeyAlgoX448: + e.CipherFunc = CipherFunction(cipherFunction) + // Check for validiy is in the Decrypt method + } + } + _, err = consumeAll(r) return } -func checksumKeyMaterial(key []byte) uint16 { - var checksum uint16 - for _, v := range key { - checksum += uint16(v) - } - return checksum -} - // Decrypt decrypts an encrypted session key with the given private key. The // private key must have been decrypted first. // If config is nil, sensible defaults will be used. func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { - if e.KeyId != 0 && e.KeyId != priv.KeyId { + if e.Version < 6 && e.KeyId != 0 && e.KeyId != priv.KeyId { return errors.InvalidArgumentError("cannot decrypt encrypted session key for key id " + strconv.FormatUint(e.KeyId, 16) + " with private key id " + strconv.FormatUint(priv.KeyId, 16)) } + if e.Version == 6 && e.KeyVersion != 0 && !bytes.Equal(e.KeyFingerprint, priv.Fingerprint) { + return errors.InvalidArgumentError("cannot decrypt encrypted session key for key fingerprint " + hex.EncodeToString(e.KeyFingerprint) + " with private key fingerprint " + hex.EncodeToString(priv.Fingerprint)) + } if e.Algo != priv.PubKeyAlgo { return errors.InvalidArgumentError("cannot decrypt encrypted session key of type " + strconv.Itoa(int(e.Algo)) + " with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) } @@ -113,52 +181,116 @@ func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { vsG := e.encryptedMPI1.Bytes() m := e.encryptedMPI2.Bytes() oid := priv.PublicKey.oid.EncodedBytes() - b, err = ecdh.Decrypt(priv.PrivateKey.(*ecdh.PrivateKey), vsG, m, oid, priv.PublicKey.Fingerprint[:]) + fp := priv.PublicKey.Fingerprint[:] + if priv.PublicKey.Version == 5 { + // For v5 the, the fingerprint must be restricted to 20 bytes + fp = fp[:20] + } + b, err = ecdh.Decrypt(priv.PrivateKey.(*ecdh.PrivateKey), vsG, m, oid, fp) + case PubKeyAlgoX25519: + b, err = x25519.Decrypt(priv.PrivateKey.(*x25519.PrivateKey), e.ephemeralPublicX25519, e.encryptedSession) + case PubKeyAlgoX448: + b, err = x448.Decrypt(priv.PrivateKey.(*x448.PrivateKey), e.ephemeralPublicX448, e.encryptedSession) default: err = errors.InvalidArgumentError("cannot decrypt encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) } - if err != nil { return err } - e.CipherFunc = CipherFunction(b[0]) - if !e.CipherFunc.IsSupported() { - return errors.UnsupportedError("unsupported encryption function") + var key []byte + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH: + keyOffset := 0 + if e.Version < 6 { + e.CipherFunc = CipherFunction(b[0]) + keyOffset = 1 + if !e.CipherFunc.IsSupported() { + return errors.UnsupportedError("unsupported encryption function") + } + } + key, err = decodeChecksumKey(b[keyOffset:]) + if err != nil { + return err + } + case PubKeyAlgoX25519, PubKeyAlgoX448: + if e.Version < 6 { + switch e.CipherFunc { + case CipherAES128, CipherAES192, CipherAES256: + break + default: + return errors.StructuralError("v3 PKESK mandates AES as cipher function for x25519 and x448") + } + } + key = b[:] + default: + return errors.UnsupportedError("unsupported algorithm for decryption") } - - e.Key = b[1 : len(b)-2] - expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) - checksum := checksumKeyMaterial(e.Key) - if checksum != expectedChecksum { - return errors.StructuralError("EncryptedKey checksum incorrect") - } - + e.Key = key return nil } // Serialize writes the encrypted key packet, e, to w. func (e *EncryptedKey) Serialize(w io.Writer) error { - var mpiLen int + var encodedLength int switch e.Algo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - mpiLen = int(e.encryptedMPI1.EncodedLength()) + encodedLength = int(e.encryptedMPI1.EncodedLength()) case PubKeyAlgoElGamal: - mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) + encodedLength = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) case PubKeyAlgoECDH: - mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) + encodedLength = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) + case PubKeyAlgoX25519: + encodedLength = x25519.EncodedFieldsLength(e.encryptedSession, e.Version == 6) + case PubKeyAlgoX448: + encodedLength = x448.EncodedFieldsLength(e.encryptedSession, e.Version == 6) default: return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) } - err := serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) + packetLen := versionSize /* version */ + keyIdSize /* key id */ + algorithmSize /* algo */ + encodedLength + if e.Version == 6 { + packetLen = versionSize /* version */ + algorithmSize /* algo */ + encodedLength + keyVersionSize /* key version */ + if e.KeyVersion == 6 { + packetLen += fingerprintSizeV6 + } else if e.KeyVersion == 4 { + packetLen += fingerprintSize + } + } + + err := serializeHeader(w, packetTypeEncryptedKey, packetLen) if err != nil { return err } - w.Write([]byte{encryptedKeyVersion}) - binary.Write(w, binary.BigEndian, e.KeyId) - w.Write([]byte{byte(e.Algo)}) + _, err = w.Write([]byte{byte(e.Version)}) + if err != nil { + return err + } + if e.Version == 6 { + _, err = w.Write([]byte{byte(e.KeyVersion)}) + if err != nil { + return err + } + // The key version number may also be zero, + // and the fingerprint omitted + if e.KeyVersion != 0 { + _, err = w.Write(e.KeyFingerprint) + if err != nil { + return err + } + } + } else { + // Write KeyID + err = binary.Write(w, binary.BigEndian, e.KeyId) + if err != nil { + return err + } + } + _, err = w.Write([]byte{byte(e.Algo)}) + if err != nil { + return err + } switch e.Algo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: @@ -176,34 +308,115 @@ func (e *EncryptedKey) Serialize(w io.Writer) error { } _, err := w.Write(e.encryptedMPI2.EncodedBytes()) return err + case PubKeyAlgoX25519: + err := x25519.EncodeFields(w, e.ephemeralPublicX25519, e.encryptedSession, byte(e.CipherFunc), e.Version == 6) + return err + case PubKeyAlgoX448: + err := x448.EncodeFields(w, e.ephemeralPublicX448, e.encryptedSession, byte(e.CipherFunc), e.Version == 6) + return err default: panic("internal error") } } -// SerializeEncryptedKey serializes an encrypted key packet to w that contains +// SerializeEncryptedKeyAEAD serializes an encrypted key packet to w that contains // key, encrypted to pub. +// If aeadSupported is set, PKESK v6 is used, otherwise v3. +// Note: aeadSupported MUST match the value passed to SerializeSymmetricallyEncrypted. // If config is nil, sensible defaults will be used. -func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { - var buf [10]byte - buf[0] = encryptedKeyVersion - binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) - buf[9] = byte(pub.PubKeyAlgo) +func SerializeEncryptedKeyAEAD(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, aeadSupported bool, key []byte, config *Config) error { + return SerializeEncryptedKeyAEADwithHiddenOption(w, pub, cipherFunc, aeadSupported, key, false, config) +} - keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) - keyBlock[0] = byte(cipherFunc) - copy(keyBlock[1:], key) - checksum := checksumKeyMaterial(key) - keyBlock[1+len(key)] = byte(checksum >> 8) - keyBlock[1+len(key)+1] = byte(checksum) +// SerializeEncryptedKeyAEADwithHiddenOption serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// Offers the hidden flag option to indicated if the PKESK packet should include a wildcard KeyID. +// If aeadSupported is set, PKESK v6 is used, otherwise v3. +// Note: aeadSupported MUST match the value passed to SerializeSymmetricallyEncrypted. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKeyAEADwithHiddenOption(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, aeadSupported bool, key []byte, hidden bool, config *Config) error { + var buf [36]byte // max possible header size is v6 + lenHeaderWritten := versionSize + version := 3 + + if aeadSupported { + version = 6 + } + // An implementation MUST NOT generate ElGamal v6 PKESKs. + if version == 6 && pub.PubKeyAlgo == PubKeyAlgoElGamal { + return errors.InvalidArgumentError("ElGamal v6 PKESK are not allowed") + } + // In v3 PKESKs, for x25519 and x448, mandate using AES + if version == 3 && (pub.PubKeyAlgo == PubKeyAlgoX25519 || pub.PubKeyAlgo == PubKeyAlgoX448) { + switch cipherFunc { + case CipherAES128, CipherAES192, CipherAES256: + break + default: + return errors.InvalidArgumentError("v3 PKESK mandates AES for x25519 and x448") + } + } + + buf[0] = byte(version) + + // If hidden is set, the key should be hidden + // An implementation MAY accept or use a Key ID of all zeros, + // or a key version of zero and no key fingerprint, to hide the intended decryption key. + // See Section 5.1.8. in the open pgp crypto refresh + if version == 6 { + if !hidden { + // A one-octet size of the following two fields. + buf[1] = byte(keyVersionSize + len(pub.Fingerprint)) + // A one octet key version number. + buf[2] = byte(pub.Version) + lenHeaderWritten += keyVersionSize + 1 + // The fingerprint of the public key + copy(buf[lenHeaderWritten:lenHeaderWritten+len(pub.Fingerprint)], pub.Fingerprint) + lenHeaderWritten += len(pub.Fingerprint) + } else { + // The size may also be zero, and the key version + // and fingerprint omitted for an "anonymous recipient" + buf[1] = 0 + lenHeaderWritten += 1 + } + } else { + if !hidden { + binary.BigEndian.PutUint64(buf[versionSize:(versionSize+keyIdSize)], pub.KeyId) + } + lenHeaderWritten += keyIdSize + } + buf[lenHeaderWritten] = byte(pub.PubKeyAlgo) + lenHeaderWritten += algorithmSize + + var keyBlock []byte + switch pub.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH: + lenKeyBlock := len(key) + 2 + if version < 6 { + lenKeyBlock += 1 // cipher type included + } + keyBlock = make([]byte, lenKeyBlock) + keyOffset := 0 + if version < 6 { + keyBlock[0] = byte(cipherFunc) + keyOffset = 1 + } + encodeChecksumKey(keyBlock[keyOffset:], key) + case PubKeyAlgoX25519, PubKeyAlgoX448: + // algorithm is added in plaintext below + keyBlock = key + } switch pub.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) + return serializeEncryptedKeyRSA(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*rsa.PublicKey), keyBlock) case PubKeyAlgoElGamal: - return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) + return serializeEncryptedKeyElGamal(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*elgamal.PublicKey), keyBlock) case PubKeyAlgoECDH: - return serializeEncryptedKeyECDH(w, config.Random(), buf, pub.PublicKey.(*ecdh.PublicKey), keyBlock, pub.oid, pub.Fingerprint) + return serializeEncryptedKeyECDH(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*ecdh.PublicKey), keyBlock, pub.oid, pub.Fingerprint) + case PubKeyAlgoX25519: + return serializeEncryptedKeyX25519(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*x25519.PublicKey), keyBlock, byte(cipherFunc), version) + case PubKeyAlgoX448: + return serializeEncryptedKeyX448(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*x448.PublicKey), keyBlock, byte(cipherFunc), version) case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) } @@ -211,14 +424,32 @@ func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunctio return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) } -func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { +// SerializeEncryptedKey serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// PKESKv6 is used if config.AEAD() is not nil. +// If config is nil, sensible defaults will be used. +// Deprecated: Use SerializeEncryptedKeyAEAD instead. +func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { + return SerializeEncryptedKeyAEAD(w, pub, cipherFunc, config.AEAD() != nil, key, config) +} + +// SerializeEncryptedKeyWithHiddenOption serializes an encrypted key packet to w that contains +// key, encrypted to pub. PKESKv6 is used if config.AEAD() is not nil. +// The hidden option controls if the packet should be anonymous, i.e., omit key metadata. +// If config is nil, sensible defaults will be used. +// Deprecated: Use SerializeEncryptedKeyAEADwithHiddenOption instead. +func SerializeEncryptedKeyWithHiddenOption(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, hidden bool, config *Config) error { + return SerializeEncryptedKeyAEADwithHiddenOption(w, pub, cipherFunc, config.AEAD() != nil, key, hidden, config) +} + +func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header []byte, pub *rsa.PublicKey, keyBlock []byte) error { cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) if err != nil { return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) } cipherMPI := encoding.NewMPI(cipherText) - packetLen := 10 /* header length */ + int(cipherMPI.EncodedLength()) + packetLen := len(header) /* header length */ + int(cipherMPI.EncodedLength()) err = serializeHeader(w, packetTypeEncryptedKey, packetLen) if err != nil { @@ -232,13 +463,13 @@ func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub return err } -func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { +func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header []byte, pub *elgamal.PublicKey, keyBlock []byte) error { c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) if err != nil { return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) } - packetLen := 10 /* header length */ + packetLen := len(header) /* header length */ packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 @@ -257,7 +488,7 @@ func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, return err } -func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header [10]byte, pub *ecdh.PublicKey, keyBlock []byte, oid encoding.Field, fingerprint []byte) error { +func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header []byte, pub *ecdh.PublicKey, keyBlock []byte, oid encoding.Field, fingerprint []byte) error { vsG, c, err := ecdh.Encrypt(rand, pub, keyBlock, oid.EncodedBytes(), fingerprint) if err != nil { return errors.InvalidArgumentError("ECDH encryption failed: " + err.Error()) @@ -266,7 +497,7 @@ func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header [10]byte, pub g := encoding.NewMPI(vsG) m := encoding.NewOID(c) - packetLen := 10 /* header length */ + packetLen := len(header) /* header length */ packetLen += int(g.EncodedLength()) + int(m.EncodedLength()) err = serializeHeader(w, packetTypeEncryptedKey, packetLen) @@ -284,3 +515,70 @@ func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header [10]byte, pub _, err = w.Write(m.EncodedBytes()) return err } + +func serializeEncryptedKeyX25519(w io.Writer, rand io.Reader, header []byte, pub *x25519.PublicKey, keyBlock []byte, cipherFunc byte, version int) error { + ephemeralPublicX25519, ciphertext, err := x25519.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("x25519 encryption failed: " + err.Error()) + } + + packetLen := len(header) /* header length */ + packetLen += x25519.EncodedFieldsLength(ciphertext, version == 6) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + + _, err = w.Write(header[:]) + if err != nil { + return err + } + return x25519.EncodeFields(w, ephemeralPublicX25519, ciphertext, cipherFunc, version == 6) +} + +func serializeEncryptedKeyX448(w io.Writer, rand io.Reader, header []byte, pub *x448.PublicKey, keyBlock []byte, cipherFunc byte, version int) error { + ephemeralPublicX448, ciphertext, err := x448.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("x448 encryption failed: " + err.Error()) + } + + packetLen := len(header) /* header length */ + packetLen += x448.EncodedFieldsLength(ciphertext, version == 6) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + + _, err = w.Write(header[:]) + if err != nil { + return err + } + return x448.EncodeFields(w, ephemeralPublicX448, ciphertext, cipherFunc, version == 6) +} + +func checksumKeyMaterial(key []byte) uint16 { + var checksum uint16 + for _, v := range key { + checksum += uint16(v) + } + return checksum +} + +func decodeChecksumKey(msg []byte) (key []byte, err error) { + key = msg[:len(msg)-2] + expectedChecksum := uint16(msg[len(msg)-2])<<8 | uint16(msg[len(msg)-1]) + checksum := checksumKeyMaterial(key) + if checksum != expectedChecksum { + err = errors.StructuralError("session key checksum is incorrect") + } + return +} + +func encodeChecksumKey(buffer []byte, key []byte) { + copy(buffer, key) + checksum := checksumKeyMaterial(key) + buffer[len(key)] = byte(checksum >> 8) + buffer[len(key)+1] = byte(checksum) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go index 4be987609..8a028c8a1 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go @@ -58,9 +58,9 @@ func (l *LiteralData) parse(r io.Reader) (err error) { // on completion. The fileName is truncated to 255 bytes. func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { var buf [4]byte - buf[0] = 't' - if isBinary { - buf[0] = 'b' + buf[0] = 'b' + if !isBinary { + buf[0] = 'u' } if len(fileName) > 255 { fileName = fileName[:255] diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/marker.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/marker.go new file mode 100644 index 000000000..1ee378ba3 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/marker.go @@ -0,0 +1,33 @@ +package packet + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +type Marker struct{} + +const markerString = "PGP" + +// parse just checks if the packet contains "PGP". +func (m *Marker) parse(reader io.Reader) error { + var buffer [3]byte + if _, err := io.ReadFull(reader, buffer[:]); err != nil { + return err + } + if string(buffer[:]) != markerString { + return errors.StructuralError("invalid marker packet") + } + return nil +} + +// SerializeMarker writes a marker packet to writer. +func SerializeMarker(writer io.Writer) error { + err := serializeHeader(writer, packetTypeMarker, len(markerString)) + if err != nil { + return err + } + _, err = writer.Write([]byte(markerString)) + return err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go index 033fb2d7e..f393c4063 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go @@ -7,34 +7,37 @@ package packet import ( "crypto" "encoding/binary" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" "io" "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" ) // OnePassSignature represents a one-pass signature packet. See RFC 4880, // section 5.4. type OnePassSignature struct { - SigType SignatureType - Hash crypto.Hash - PubKeyAlgo PublicKeyAlgorithm - KeyId uint64 - IsLast bool + Version int + SigType SignatureType + Hash crypto.Hash + PubKeyAlgo PublicKeyAlgorithm + KeyId uint64 + IsLast bool + Salt []byte // v6 only + KeyFingerprint []byte // v6 only } -const onePassSignatureVersion = 3 - func (ops *OnePassSignature) parse(r io.Reader) (err error) { - var buf [13]byte - - _, err = readFull(r, buf[:]) + var buf [8]byte + // Read: version | signature type | hash algorithm | public-key algorithm + _, err = readFull(r, buf[:4]) if err != nil { return } - if buf[0] != onePassSignatureVersion { - err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) + if buf[0] != 3 && buf[0] != 6 { + return errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) } + ops.Version = int(buf[0]) var ok bool ops.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2]) @@ -44,15 +47,69 @@ func (ops *OnePassSignature) parse(r io.Reader) (err error) { ops.SigType = SignatureType(buf[1]) ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) - ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) - ops.IsLast = buf[12] != 0 + + if ops.Version == 6 { + // Only for v6, a variable-length field containing the salt + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + saltLength := int(buf[0]) + var expectedSaltLength int + expectedSaltLength, err = SaltLengthForHash(ops.Hash) + if err != nil { + return + } + if saltLength != expectedSaltLength { + err = errors.StructuralError("unexpected salt size for the given hash algorithm") + return + } + salt := make([]byte, expectedSaltLength) + _, err = readFull(r, salt) + if err != nil { + return + } + ops.Salt = salt + + // Only for v6 packets, 32 octets of the fingerprint of the signing key. + fingerprint := make([]byte, 32) + _, err = readFull(r, fingerprint) + if err != nil { + return + } + ops.KeyFingerprint = fingerprint + ops.KeyId = binary.BigEndian.Uint64(ops.KeyFingerprint[:8]) + } else { + _, err = readFull(r, buf[:8]) + if err != nil { + return + } + ops.KeyId = binary.BigEndian.Uint64(buf[:8]) + } + + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + ops.IsLast = buf[0] != 0 return } // Serialize marshals the given OnePassSignature to w. func (ops *OnePassSignature) Serialize(w io.Writer) error { - var buf [13]byte - buf[0] = onePassSignatureVersion + //v3 length 1+1+1+1+8+1 = + packetLength := 13 + if ops.Version == 6 { + // v6 length 1+1+1+1+1+len(salt)+32+1 = + packetLength = 38 + len(ops.Salt) + } + + if err := serializeHeader(w, packetTypeOnePassSignature, packetLength); err != nil { + return err + } + + var buf [8]byte + buf[0] = byte(ops.Version) buf[1] = uint8(ops.SigType) var ok bool buf[2], ok = algorithm.HashToHashIdWithSha1(ops.Hash) @@ -60,14 +117,41 @@ func (ops *OnePassSignature) Serialize(w io.Writer) error { return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) } buf[3] = uint8(ops.PubKeyAlgo) - binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) - if ops.IsLast { - buf[12] = 1 - } - if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { + _, err := w.Write(buf[:4]) + if err != nil { return err } - _, err := w.Write(buf[:]) + + if ops.Version == 6 { + // write salt for v6 signatures + _, err := w.Write([]byte{uint8(len(ops.Salt))}) + if err != nil { + return err + } + _, err = w.Write(ops.Salt) + if err != nil { + return err + } + + // write fingerprint v6 signatures + _, err = w.Write(ops.KeyFingerprint) + if err != nil { + return err + } + } else { + binary.BigEndian.PutUint64(buf[:8], ops.KeyId) + _, err := w.Write(buf[:8]) + if err != nil { + return err + } + } + + isLast := []byte{byte(0)} + if ops.IsLast { + isLast[0] = 1 + } + + _, err = w.Write(isLast) return err } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go index 4f8204079..cef7c661d 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go @@ -7,7 +7,6 @@ package packet import ( "bytes" "io" - "io/ioutil" "github.com/ProtonMail/go-crypto/openpgp/errors" ) @@ -26,7 +25,7 @@ type OpaquePacket struct { } func (op *OpaquePacket) parse(r io.Reader) (err error) { - op.Contents, err = ioutil.ReadAll(r) + op.Contents, err = io.ReadAll(r) return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go index 4d86a7da8..1e92e22c9 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go @@ -311,12 +311,15 @@ const ( packetTypePrivateSubkey packetType = 7 packetTypeCompressed packetType = 8 packetTypeSymmetricallyEncrypted packetType = 9 + packetTypeMarker packetType = 10 packetTypeLiteralData packetType = 11 + packetTypeTrust packetType = 12 packetTypeUserId packetType = 13 packetTypePublicSubkey packetType = 14 packetTypeUserAttribute packetType = 17 packetTypeSymmetricallyEncryptedIntegrityProtected packetType = 18 packetTypeAEADEncrypted packetType = 20 + packetPadding packetType = 21 ) // EncryptedDataPacket holds encrypted data. It is currently implemented by @@ -328,7 +331,7 @@ type EncryptedDataPacket interface { // Read reads a single OpenPGP packet from the given io.Reader. If there is an // error parsing a packet, the whole packet is consumed from the input. func Read(r io.Reader) (p Packet, err error) { - tag, _, contents, err := readHeader(r) + tag, len, contents, err := readHeader(r) if err != nil { return } @@ -367,8 +370,93 @@ func Read(r io.Reader) (p Packet, err error) { p = se case packetTypeAEADEncrypted: p = new(AEADEncrypted) - default: + case packetPadding: + p = Padding(len) + case packetTypeMarker: + p = new(Marker) + case packetTypeTrust: + // Not implemented, just consume err = errors.UnknownPacketTypeError(tag) + default: + // Packet Tags from 0 to 39 are critical. + // Packet Tags from 40 to 63 are non-critical. + if tag < 40 { + err = errors.CriticalUnknownPacketTypeError(tag) + } else { + err = errors.UnknownPacketTypeError(tag) + } + } + if p != nil { + err = p.parse(contents) + } + if err != nil { + consumeAll(contents) + } + return +} + +// ReadWithCheck reads a single OpenPGP message packet from the given io.Reader. If there is an +// error parsing a packet, the whole packet is consumed from the input. +// ReadWithCheck additionally checks if the OpenPGP message packet sequence adheres +// to the packet composition rules in rfc4880, if not throws an error. +func ReadWithCheck(r io.Reader, sequence *SequenceVerifier) (p Packet, msgErr error, err error) { + tag, len, contents, err := readHeader(r) + if err != nil { + return + } + switch tag { + case packetTypeEncryptedKey: + msgErr = sequence.Next(ESKSymbol) + p = new(EncryptedKey) + case packetTypeSignature: + msgErr = sequence.Next(SigSymbol) + p = new(Signature) + case packetTypeSymmetricKeyEncrypted: + msgErr = sequence.Next(ESKSymbol) + p = new(SymmetricKeyEncrypted) + case packetTypeOnePassSignature: + msgErr = sequence.Next(OPSSymbol) + p = new(OnePassSignature) + case packetTypeCompressed: + msgErr = sequence.Next(CompSymbol) + p = new(Compressed) + case packetTypeSymmetricallyEncrypted: + msgErr = sequence.Next(EncSymbol) + p = new(SymmetricallyEncrypted) + case packetTypeLiteralData: + msgErr = sequence.Next(LDSymbol) + p = new(LiteralData) + case packetTypeSymmetricallyEncryptedIntegrityProtected: + msgErr = sequence.Next(EncSymbol) + se := new(SymmetricallyEncrypted) + se.IntegrityProtected = true + p = se + case packetTypeAEADEncrypted: + msgErr = sequence.Next(EncSymbol) + p = new(AEADEncrypted) + case packetPadding: + p = Padding(len) + case packetTypeMarker: + p = new(Marker) + case packetTypeTrust: + // Not implemented, just consume + err = errors.UnknownPacketTypeError(tag) + case packetTypePrivateKey, + packetTypePrivateSubkey, + packetTypePublicKey, + packetTypePublicSubkey, + packetTypeUserId, + packetTypeUserAttribute: + msgErr = sequence.Next(UnknownSymbol) + consumeAll(contents) + default: + // Packet Tags from 0 to 39 are critical. + // Packet Tags from 40 to 63 are non-critical. + if tag < 40 { + err = errors.CriticalUnknownPacketTypeError(tag) + } else { + err = errors.UnknownPacketTypeError(tag) + } } if p != nil { err = p.parse(contents) @@ -385,17 +473,17 @@ type SignatureType uint8 const ( SigTypeBinary SignatureType = 0x00 - SigTypeText = 0x01 - SigTypeGenericCert = 0x10 - SigTypePersonaCert = 0x11 - SigTypeCasualCert = 0x12 - SigTypePositiveCert = 0x13 - SigTypeSubkeyBinding = 0x18 - SigTypePrimaryKeyBinding = 0x19 - SigTypeDirectSignature = 0x1F - SigTypeKeyRevocation = 0x20 - SigTypeSubkeyRevocation = 0x28 - SigTypeCertificationRevocation = 0x30 + SigTypeText SignatureType = 0x01 + SigTypeGenericCert SignatureType = 0x10 + SigTypePersonaCert SignatureType = 0x11 + SigTypeCasualCert SignatureType = 0x12 + SigTypePositiveCert SignatureType = 0x13 + SigTypeSubkeyBinding SignatureType = 0x18 + SigTypePrimaryKeyBinding SignatureType = 0x19 + SigTypeDirectSignature SignatureType = 0x1F + SigTypeKeyRevocation SignatureType = 0x20 + SigTypeSubkeyRevocation SignatureType = 0x28 + SigTypeCertificationRevocation SignatureType = 0x30 ) // PublicKeyAlgorithm represents the different public key system specified for @@ -412,6 +500,11 @@ const ( PubKeyAlgoECDSA PublicKeyAlgorithm = 19 // https://www.ietf.org/archive/id/draft-koch-eddsa-for-openpgp-04.txt PubKeyAlgoEdDSA PublicKeyAlgorithm = 22 + // https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh + PubKeyAlgoX25519 PublicKeyAlgorithm = 25 + PubKeyAlgoX448 PublicKeyAlgorithm = 26 + PubKeyAlgoEd25519 PublicKeyAlgorithm = 27 + PubKeyAlgoEd448 PublicKeyAlgorithm = 28 // Deprecated in RFC 4880, Section 13.5. Use key flags instead. PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 @@ -422,7 +515,7 @@ const ( // key of the given type. func (pka PublicKeyAlgorithm) CanEncrypt() bool { switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH: + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH, PubKeyAlgoX25519, PubKeyAlgoX448: return true } return false @@ -432,7 +525,7 @@ func (pka PublicKeyAlgorithm) CanEncrypt() bool { // sign a message. func (pka PublicKeyAlgorithm) CanSign() bool { switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA, PubKeyAlgoEd25519, PubKeyAlgoEd448: return true } return false @@ -512,6 +605,11 @@ func (mode AEADMode) TagLength() int { return algorithm.AEADMode(mode).TagLength() } +// IsSupported returns true if the aead mode is supported from the library +func (mode AEADMode) IsSupported() bool { + return algorithm.AEADMode(mode).TagLength() > 0 +} + // new returns a fresh instance of the given mode. func (mode AEADMode) new(block cipher.Block) cipher.AEAD { return algorithm.AEADMode(mode).New(block) @@ -526,8 +624,17 @@ const ( KeySuperseded ReasonForRevocation = 1 KeyCompromised ReasonForRevocation = 2 KeyRetired ReasonForRevocation = 3 + UserIDNotValid ReasonForRevocation = 32 + Unknown ReasonForRevocation = 200 ) +func NewReasonForRevocation(value byte) ReasonForRevocation { + if value < 4 || value == 32 { + return ReasonForRevocation(value) + } + return Unknown +} + // Curve is a mapping to supported ECC curves for key generation. // See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-06.html#name-curve-specific-wire-formats type Curve string @@ -549,3 +656,20 @@ type TrustLevel uint8 // TrustAmount represents a trust amount per RFC4880 5.2.3.13 type TrustAmount uint8 + +const ( + // versionSize is the length in bytes of the version value. + versionSize = 1 + // algorithmSize is the length in bytes of the key algorithm value. + algorithmSize = 1 + // keyVersionSize is the length in bytes of the key version value + keyVersionSize = 1 + // keyIdSize is the length in bytes of the key identifier value. + keyIdSize = 8 + // timestampSize is the length in bytes of encoded timestamps. + timestampSize = 4 + // fingerprintSizeV6 is the length in bytes of the key fingerprint in v6. + fingerprintSizeV6 = 32 + // fingerprintSize is the length in bytes of the key fingerprint. + fingerprintSize = 20 +) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_sequence.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_sequence.go new file mode 100644 index 000000000..55a8a56c2 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_sequence.go @@ -0,0 +1,222 @@ +package packet + +// This file implements the pushdown automata (PDA) from PGPainless (Paul Schaub) +// to verify pgp packet sequences. See Paul's blogpost for more details: +// https://blog.jabberhead.tk/2022/10/26/implementing-packet-sequence-validation-using-pushdown-automata/ +import ( + "fmt" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +func NewErrMalformedMessage(from State, input InputSymbol, stackSymbol StackSymbol) errors.ErrMalformedMessage { + return errors.ErrMalformedMessage(fmt.Sprintf("state %d, input symbol %d, stack symbol %d ", from, input, stackSymbol)) +} + +// InputSymbol defines the input alphabet of the PDA +type InputSymbol uint8 + +const ( + LDSymbol InputSymbol = iota + SigSymbol + OPSSymbol + CompSymbol + ESKSymbol + EncSymbol + EOSSymbol + UnknownSymbol +) + +// StackSymbol defines the stack alphabet of the PDA +type StackSymbol int8 + +const ( + MsgStackSymbol StackSymbol = iota + OpsStackSymbol + KeyStackSymbol + EndStackSymbol + EmptyStackSymbol +) + +// State defines the states of the PDA +type State int8 + +const ( + OpenPGPMessage State = iota + ESKMessage + LiteralMessage + CompressedMessage + EncryptedMessage + ValidMessage +) + +// transition represents a state transition in the PDA +type transition func(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) + +// SequenceVerifier is a pushdown automata to verify +// PGP messages packet sequences according to rfc4880. +type SequenceVerifier struct { + stack []StackSymbol + state State +} + +// Next performs a state transition with the given input symbol. +// If the transition fails a ErrMalformedMessage is returned. +func (sv *SequenceVerifier) Next(input InputSymbol) error { + for { + stackSymbol := sv.popStack() + transitionFunc := getTransition(sv.state) + nextState, newStackSymbols, redo, err := transitionFunc(input, stackSymbol) + if err != nil { + return err + } + if redo { + sv.pushStack(stackSymbol) + } + for _, newStackSymbol := range newStackSymbols { + sv.pushStack(newStackSymbol) + } + sv.state = nextState + if !redo { + break + } + } + return nil +} + +// Valid returns true if RDA is in a valid state. +func (sv *SequenceVerifier) Valid() bool { + return sv.state == ValidMessage && len(sv.stack) == 0 +} + +func (sv *SequenceVerifier) AssertValid() error { + if !sv.Valid() { + return errors.ErrMalformedMessage("invalid message") + } + return nil +} + +func NewSequenceVerifier() *SequenceVerifier { + return &SequenceVerifier{ + stack: []StackSymbol{EndStackSymbol, MsgStackSymbol}, + state: OpenPGPMessage, + } +} + +func (sv *SequenceVerifier) popStack() StackSymbol { + if len(sv.stack) == 0 { + return EmptyStackSymbol + } + elemIndex := len(sv.stack) - 1 + stackSymbol := sv.stack[elemIndex] + sv.stack = sv.stack[:elemIndex] + return stackSymbol +} + +func (sv *SequenceVerifier) pushStack(stackSymbol StackSymbol) { + sv.stack = append(sv.stack, stackSymbol) +} + +func getTransition(from State) transition { + switch from { + case OpenPGPMessage: + return fromOpenPGPMessage + case LiteralMessage: + return fromLiteralMessage + case CompressedMessage: + return fromCompressedMessage + case EncryptedMessage: + return fromEncryptedMessage + case ESKMessage: + return fromESKMessage + case ValidMessage: + return fromValidMessage + } + return nil +} + +// fromOpenPGPMessage is the transition for the state OpenPGPMessage. +func fromOpenPGPMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + if stackSymbol != MsgStackSymbol { + return 0, nil, false, NewErrMalformedMessage(OpenPGPMessage, input, stackSymbol) + } + switch input { + case LDSymbol: + return LiteralMessage, nil, false, nil + case SigSymbol: + return OpenPGPMessage, []StackSymbol{MsgStackSymbol}, false, nil + case OPSSymbol: + return OpenPGPMessage, []StackSymbol{OpsStackSymbol, MsgStackSymbol}, false, nil + case CompSymbol: + return CompressedMessage, nil, false, nil + case ESKSymbol: + return ESKMessage, []StackSymbol{KeyStackSymbol}, false, nil + case EncSymbol: + return EncryptedMessage, nil, false, nil + } + return 0, nil, false, NewErrMalformedMessage(OpenPGPMessage, input, stackSymbol) +} + +// fromESKMessage is the transition for the state ESKMessage. +func fromESKMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + if stackSymbol != KeyStackSymbol { + return 0, nil, false, NewErrMalformedMessage(ESKMessage, input, stackSymbol) + } + switch input { + case ESKSymbol: + return ESKMessage, []StackSymbol{KeyStackSymbol}, false, nil + case EncSymbol: + return EncryptedMessage, nil, false, nil + } + return 0, nil, false, NewErrMalformedMessage(ESKMessage, input, stackSymbol) +} + +// fromLiteralMessage is the transition for the state LiteralMessage. +func fromLiteralMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + switch input { + case SigSymbol: + if stackSymbol == OpsStackSymbol { + return LiteralMessage, nil, false, nil + } + case EOSSymbol: + if stackSymbol == EndStackSymbol { + return ValidMessage, nil, false, nil + } + } + return 0, nil, false, NewErrMalformedMessage(LiteralMessage, input, stackSymbol) +} + +// fromLiteralMessage is the transition for the state CompressedMessage. +func fromCompressedMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + switch input { + case SigSymbol: + if stackSymbol == OpsStackSymbol { + return CompressedMessage, nil, false, nil + } + case EOSSymbol: + if stackSymbol == EndStackSymbol { + return ValidMessage, nil, false, nil + } + } + return OpenPGPMessage, []StackSymbol{MsgStackSymbol}, true, nil +} + +// fromEncryptedMessage is the transition for the state EncryptedMessage. +func fromEncryptedMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + switch input { + case SigSymbol: + if stackSymbol == OpsStackSymbol { + return EncryptedMessage, nil, false, nil + } + case EOSSymbol: + if stackSymbol == EndStackSymbol { + return ValidMessage, nil, false, nil + } + } + return OpenPGPMessage, []StackSymbol{MsgStackSymbol}, true, nil +} + +// fromValidMessage is the transition for the state ValidMessage. +func fromValidMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + return 0, nil, false, NewErrMalformedMessage(ValidMessage, input, stackSymbol) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_unsupported.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_unsupported.go new file mode 100644 index 000000000..2d714723c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_unsupported.go @@ -0,0 +1,24 @@ +package packet + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// UnsupportedPackage represents a OpenPGP packet with a known packet type +// but with unsupported content. +type UnsupportedPacket struct { + IncompletePacket Packet + Error errors.UnsupportedError +} + +// Implements the Packet interface +func (up *UnsupportedPacket) parse(read io.Reader) error { + err := up.IncompletePacket.parse(read) + if castedErr, ok := err.(errors.UnsupportedError); ok { + up.Error = castedErr + return nil + } + return err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/padding.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/padding.go new file mode 100644 index 000000000..3b6a7045d --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/padding.go @@ -0,0 +1,26 @@ +package packet + +import ( + "io" +) + +// Padding type represents a Padding Packet (Tag 21). +// The padding type is represented by the length of its padding. +// see https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#name-padding-packet-tag-21 +type Padding int + +// parse just ignores the padding content. +func (pad Padding) parse(reader io.Reader) error { + _, err := io.CopyN(io.Discard, reader, int64(pad)) + return err +} + +// SerializePadding writes the padding to writer. +func (pad Padding) SerializePadding(writer io.Writer, rand io.Reader) error { + err := serializeHeader(writer, packetPadding, int(pad)) + if err != nil { + return err + } + _, err = io.CopyN(writer, rand, int64(pad)) + return err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go index 2fc438643..f04e6c6b8 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go @@ -9,22 +9,28 @@ import ( "crypto" "crypto/cipher" "crypto/dsa" - "crypto/rand" "crypto/rsa" "crypto/sha1" + "crypto/sha256" + "crypto/subtle" + "fmt" "io" - "io/ioutil" "math/big" "strconv" "time" "github.com/ProtonMail/go-crypto/openpgp/ecdh" "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/ed25519" + "github.com/ProtonMail/go-crypto/openpgp/ed448" "github.com/ProtonMail/go-crypto/openpgp/eddsa" "github.com/ProtonMail/go-crypto/openpgp/elgamal" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" "github.com/ProtonMail/go-crypto/openpgp/s2k" + "github.com/ProtonMail/go-crypto/openpgp/x25519" + "github.com/ProtonMail/go-crypto/openpgp/x448" + "golang.org/x/crypto/hkdf" ) // PrivateKey represents a possibly encrypted private key. See RFC 4880, @@ -35,14 +41,14 @@ type PrivateKey struct { encryptedData []byte cipher CipherFunction s2k func(out, in []byte) - // An *{rsa|dsa|elgamal|ecdh|ecdsa|ed25519}.PrivateKey or + aead AEADMode // only relevant if S2KAEAD is enabled + // An *{rsa|dsa|elgamal|ecdh|ecdsa|ed25519|ed448}.PrivateKey or // crypto.Signer/crypto.Decrypter (Decryptor RSA only). - PrivateKey interface{} - sha1Checksum bool - iv []byte + PrivateKey interface{} + iv []byte // Type of encryption of the S2K packet - // Allowed values are 0 (Not encrypted), 254 (SHA1), or + // Allowed values are 0 (Not encrypted), 253 (AEAD), 254 (SHA1), or // 255 (2-byte checksum) s2kType S2KType // Full parameters of the S2K packet @@ -55,6 +61,8 @@ type S2KType uint8 const ( // S2KNON unencrypt S2KNON S2KType = 0 + // S2KAEAD use authenticated encryption + S2KAEAD S2KType = 253 // S2KSHA1 sha1 sum check S2KSHA1 S2KType = 254 // S2KCHECKSUM sum check @@ -103,6 +111,34 @@ func NewECDHPrivateKey(creationTime time.Time, priv *ecdh.PrivateKey) *PrivateKe return pk } +func NewX25519PrivateKey(creationTime time.Time, priv *x25519.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewX25519PublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewX448PrivateKey(creationTime time.Time, priv *x448.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewX448PublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewEd25519PrivateKey(creationTime time.Time, priv *ed25519.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewEd25519PublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewEd448PrivateKey(creationTime time.Time, priv *ed448.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewEd448PublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + // NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that // implements RSA, ECDSA or EdDSA. func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey { @@ -122,6 +158,14 @@ func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey) case eddsa.PrivateKey: pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey) + case *ed25519.PrivateKey: + pk.PublicKey = *NewEd25519PublicKey(creationTime, &pubkey.PublicKey) + case ed25519.PrivateKey: + pk.PublicKey = *NewEd25519PublicKey(creationTime, &pubkey.PublicKey) + case *ed448.PrivateKey: + pk.PublicKey = *NewEd448PublicKey(creationTime, &pubkey.PublicKey) + case ed448.PrivateKey: + pk.PublicKey = *NewEd448PublicKey(creationTime, &pubkey.PublicKey) default: panic("openpgp: unknown signer type in NewSignerPrivateKey") } @@ -129,7 +173,7 @@ func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey return pk } -// NewDecrypterPrivateKey creates a PrivateKey from a *{rsa|elgamal|ecdh}.PrivateKey. +// NewDecrypterPrivateKey creates a PrivateKey from a *{rsa|elgamal|ecdh|x25519|x448}.PrivateKey. func NewDecrypterPrivateKey(creationTime time.Time, decrypter interface{}) *PrivateKey { pk := new(PrivateKey) switch priv := decrypter.(type) { @@ -139,6 +183,10 @@ func NewDecrypterPrivateKey(creationTime time.Time, decrypter interface{}) *Priv pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) case *ecdh.PrivateKey: pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey) + case *x25519.PrivateKey: + pk.PublicKey = *NewX25519PublicKey(creationTime, &priv.PublicKey) + case *x448.PrivateKey: + pk.PublicKey = *NewX448PublicKey(creationTime, &priv.PublicKey) default: panic("openpgp: unknown decrypter type in NewDecrypterPrivateKey") } @@ -152,6 +200,11 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { return } v5 := pk.PublicKey.Version == 5 + v6 := pk.PublicKey.Version == 6 + + if V5Disabled && v5 { + return errors.UnsupportedError("support for parsing v5 entities is disabled; build with `-tags v5` if needed") + } var buf [1]byte _, err = readFull(r, buf[:]) @@ -160,7 +213,7 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { } pk.s2kType = S2KType(buf[0]) var optCount [1]byte - if v5 { + if v5 || (v6 && pk.s2kType != S2KNON) { if _, err = readFull(r, optCount[:]); err != nil { return } @@ -170,9 +223,9 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { case S2KNON: pk.s2k = nil pk.Encrypted = false - case S2KSHA1, S2KCHECKSUM: - if v5 && pk.s2kType == S2KCHECKSUM { - return errors.StructuralError("wrong s2k identifier for version 5") + case S2KSHA1, S2KCHECKSUM, S2KAEAD: + if (v5 || v6) && pk.s2kType == S2KCHECKSUM { + return errors.StructuralError(fmt.Sprintf("wrong s2k identifier for version %d", pk.Version)) } _, err = readFull(r, buf[:]) if err != nil { @@ -182,6 +235,29 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { if pk.cipher != 0 && !pk.cipher.IsSupported() { return errors.UnsupportedError("unsupported cipher function in private key") } + // [Optional] If string-to-key usage octet was 253, + // a one-octet AEAD algorithm. + if pk.s2kType == S2KAEAD { + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.aead = AEADMode(buf[0]) + if !pk.aead.IsSupported() { + return errors.UnsupportedError("unsupported aead mode in private key") + } + } + + // [Optional] Only for a version 6 packet, + // and if string-to-key usage octet was 255, 254, or 253, + // an one-octet count of the following field. + if v6 { + _, err = readFull(r, buf[:]) + if err != nil { + return + } + } + pk.s2kParams, err = s2k.ParseIntoParams(r) if err != nil { return @@ -189,28 +265,43 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { if pk.s2kParams.Dummy() { return } + if pk.s2kParams.Mode() == s2k.Argon2S2K && pk.s2kType != S2KAEAD { + return errors.StructuralError("using Argon2 S2K without AEAD is not allowed") + } + if pk.s2kParams.Mode() == s2k.SimpleS2K && pk.Version == 6 { + return errors.StructuralError("using Simple S2K with version 6 keys is not allowed") + } pk.s2k, err = pk.s2kParams.Function() if err != nil { return } pk.Encrypted = true - if pk.s2kType == S2KSHA1 { - pk.sha1Checksum = true - } default: return errors.UnsupportedError("deprecated s2k function in private key") } if pk.Encrypted { - blockSize := pk.cipher.blockSize() - if blockSize == 0 { + var ivSize int + // If the S2K usage octet was 253, the IV is of the size expected by the AEAD mode, + // unless it's a version 5 key, in which case it's the size of the symmetric cipher's block size. + // For all other S2K modes, it's always the block size. + if !v5 && pk.s2kType == S2KAEAD { + ivSize = pk.aead.IvLength() + } else { + ivSize = pk.cipher.blockSize() + } + + if ivSize == 0 { return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) } - pk.iv = make([]byte, blockSize) + pk.iv = make([]byte, ivSize) _, err = readFull(r, pk.iv) if err != nil { return } + if v5 && pk.s2kType == S2KAEAD { + pk.iv = pk.iv[:pk.aead.IvLength()] + } } var privateKeyData []byte @@ -230,7 +321,7 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { return } } else { - privateKeyData, err = ioutil.ReadAll(r) + privateKeyData, err = io.ReadAll(r) if err != nil { return } @@ -239,16 +330,22 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { if len(privateKeyData) < 2 { return errors.StructuralError("truncated private key data") } - var sum uint16 - for i := 0; i < len(privateKeyData)-2; i++ { - sum += uint16(privateKeyData[i]) + if pk.Version != 6 { + // checksum + var sum uint16 + for i := 0; i < len(privateKeyData)-2; i++ { + sum += uint16(privateKeyData[i]) + } + if privateKeyData[len(privateKeyData)-2] != uint8(sum>>8) || + privateKeyData[len(privateKeyData)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + privateKeyData = privateKeyData[:len(privateKeyData)-2] + return pk.parsePrivateKey(privateKeyData) + } else { + // No checksum + return pk.parsePrivateKey(privateKeyData) } - if privateKeyData[len(privateKeyData)-2] != uint8(sum>>8) || - privateKeyData[len(privateKeyData)-1] != uint8(sum) { - return errors.StructuralError("private key checksum failure") - } - privateKeyData = privateKeyData[:len(privateKeyData)-2] - return pk.parsePrivateKey(privateKeyData) } pk.encryptedData = privateKeyData @@ -280,18 +377,59 @@ func (pk *PrivateKey) Serialize(w io.Writer) (err error) { optional := bytes.NewBuffer(nil) if pk.Encrypted || pk.Dummy() { - optional.Write([]byte{uint8(pk.cipher)}) - if err := pk.s2kParams.Serialize(optional); err != nil { + // [Optional] If string-to-key usage octet was 255, 254, or 253, + // a one-octet symmetric encryption algorithm. + if _, err = optional.Write([]byte{uint8(pk.cipher)}); err != nil { + return + } + // [Optional] If string-to-key usage octet was 253, + // a one-octet AEAD algorithm. + if pk.s2kType == S2KAEAD { + if _, err = optional.Write([]byte{uint8(pk.aead)}); err != nil { + return + } + } + + s2kBuffer := bytes.NewBuffer(nil) + if err := pk.s2kParams.Serialize(s2kBuffer); err != nil { return err } + // [Optional] Only for a version 6 packet, and if string-to-key + // usage octet was 255, 254, or 253, an one-octet + // count of the following field. + if pk.Version == 6 { + if _, err = optional.Write([]byte{uint8(s2kBuffer.Len())}); err != nil { + return + } + } + // [Optional] If string-to-key usage octet was 255, 254, or 253, + // a string-to-key (S2K) specifier. The length of the string-to-key specifier + // depends on its type + if _, err = io.Copy(optional, s2kBuffer); err != nil { + return + } + + // IV if pk.Encrypted { - optional.Write(pk.iv) + if _, err = optional.Write(pk.iv); err != nil { + return + } + if pk.Version == 5 && pk.s2kType == S2KAEAD { + // Add padding for version 5 + padding := make([]byte, pk.cipher.blockSize()-len(pk.iv)) + if _, err = optional.Write(padding); err != nil { + return + } + } } } - if pk.Version == 5 { + if pk.Version == 5 || (pk.Version == 6 && pk.s2kType != S2KNON) { contents.Write([]byte{uint8(optional.Len())}) } - io.Copy(contents, optional) + + if _, err := io.Copy(contents, optional); err != nil { + return err + } if !pk.Dummy() { l := 0 @@ -303,8 +441,10 @@ func (pk *PrivateKey) Serialize(w io.Writer) (err error) { return err } l = buf.Len() - checksum := mod64kHash(buf.Bytes()) - buf.Write([]byte{byte(checksum >> 8), byte(checksum)}) + if pk.Version != 6 { + checksum := mod64kHash(buf.Bytes()) + buf.Write([]byte{byte(checksum >> 8), byte(checksum)}) + } priv = buf.Bytes() } else { priv, l = pk.encryptedData, len(pk.encryptedData) @@ -370,6 +510,26 @@ func serializeECDHPrivateKey(w io.Writer, priv *ecdh.PrivateKey) error { return err } +func serializeX25519PrivateKey(w io.Writer, priv *x25519.PrivateKey) error { + _, err := w.Write(priv.Secret) + return err +} + +func serializeX448PrivateKey(w io.Writer, priv *x448.PrivateKey) error { + _, err := w.Write(priv.Secret) + return err +} + +func serializeEd25519PrivateKey(w io.Writer, priv *ed25519.PrivateKey) error { + _, err := w.Write(priv.MarshalByteSecret()) + return err +} + +func serializeEd448PrivateKey(w io.Writer, priv *ed448.PrivateKey) error { + _, err := w.Write(priv.MarshalByteSecret()) + return err +} + // decrypt decrypts an encrypted private key using a decryption key. func (pk *PrivateKey) decrypt(decryptionKey []byte) error { if pk.Dummy() { @@ -378,37 +538,51 @@ func (pk *PrivateKey) decrypt(decryptionKey []byte) error { if !pk.Encrypted { return nil } - block := pk.cipher.new(decryptionKey) - cfb := cipher.NewCFBDecrypter(block, pk.iv) - - data := make([]byte, len(pk.encryptedData)) - cfb.XORKeyStream(data, pk.encryptedData) - - if pk.sha1Checksum { - if len(data) < sha1.Size { - return errors.StructuralError("truncated private key data") + var data []byte + switch pk.s2kType { + case S2KAEAD: + aead := pk.aead.new(block) + additionalData, err := pk.additionalData() + if err != nil { + return err } - h := sha1.New() - h.Write(data[:len(data)-sha1.Size]) - sum := h.Sum(nil) - if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { - return errors.StructuralError("private key checksum failure") + // Decrypt the encrypted key material with aead + data, err = aead.Open(nil, pk.iv, pk.encryptedData, additionalData) + if err != nil { + return err } - data = data[:len(data)-sha1.Size] - } else { - if len(data) < 2 { - return errors.StructuralError("truncated private key data") + case S2KSHA1, S2KCHECKSUM: + cfb := cipher.NewCFBDecrypter(block, pk.iv) + data = make([]byte, len(pk.encryptedData)) + cfb.XORKeyStream(data, pk.encryptedData) + if pk.s2kType == S2KSHA1 { + if len(data) < sha1.Size { + return errors.StructuralError("truncated private key data") + } + h := sha1.New() + h.Write(data[:len(data)-sha1.Size]) + sum := h.Sum(nil) + if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-sha1.Size] + } else { + if len(data) < 2 { + return errors.StructuralError("truncated private key data") + } + var sum uint16 + for i := 0; i < len(data)-2; i++ { + sum += uint16(data[i]) + } + if data[len(data)-2] != uint8(sum>>8) || + data[len(data)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-2] } - var sum uint16 - for i := 0; i < len(data)-2; i++ { - sum += uint16(data[i]) - } - if data[len(data)-2] != uint8(sum>>8) || - data[len(data)-1] != uint8(sum) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-2] + default: + return errors.InvalidArgumentError("invalid s2k type") } err := pk.parsePrivateKey(data) @@ -424,7 +598,6 @@ func (pk *PrivateKey) decrypt(decryptionKey []byte) error { pk.s2k = nil pk.Encrypted = false pk.encryptedData = nil - return nil } @@ -440,6 +613,9 @@ func (pk *PrivateKey) decryptWithCache(passphrase []byte, keyCache *s2k.Cache) e if err != nil { return err } + if pk.s2kType == S2KAEAD { + key = pk.applyHKDF(key) + } return pk.decrypt(key) } @@ -454,11 +630,14 @@ func (pk *PrivateKey) Decrypt(passphrase []byte) error { key := make([]byte, pk.cipher.KeySize()) pk.s2k(key, passphrase) + if pk.s2kType == S2KAEAD { + key = pk.applyHKDF(key) + } return pk.decrypt(key) } // DecryptPrivateKeys decrypts all encrypted keys with the given config and passphrase. -// Avoids recomputation of similar s2k key derivations. +// Avoids recomputation of similar s2k key derivations. func DecryptPrivateKeys(keys []*PrivateKey, passphrase []byte) error { // Create a cache to avoid recomputation of key derviations for the same passphrase. s2kCache := &s2k.Cache{} @@ -474,7 +653,7 @@ func DecryptPrivateKeys(keys []*PrivateKey, passphrase []byte) error { } // encrypt encrypts an unencrypted private key. -func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, cipherFunction CipherFunction) error { +func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, s2kType S2KType, cipherFunction CipherFunction, rand io.Reader) error { if pk.Dummy() { return errors.ErrDummyPrivateKey("dummy key found") } @@ -485,7 +664,15 @@ func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, cipherFunction Cip if len(key) != cipherFunction.KeySize() { return errors.InvalidArgumentError("supplied encryption key has the wrong size") } - + + if params.Mode() == s2k.Argon2S2K && s2kType != S2KAEAD { + return errors.InvalidArgumentError("using Argon2 S2K without AEAD is not allowed") + } + if params.Mode() != s2k.Argon2S2K && params.Mode() != s2k.IteratedSaltedS2K && + params.Mode() != s2k.SaltedS2K { // only allowed for high-entropy passphrases + return errors.InvalidArgumentError("insecure S2K mode") + } + priv := bytes.NewBuffer(nil) err := pk.serializePrivateKey(priv) if err != nil { @@ -497,35 +684,53 @@ func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, cipherFunction Cip pk.s2k, err = pk.s2kParams.Function() if err != nil { return err - } + } privateKeyBytes := priv.Bytes() - pk.sha1Checksum = true + pk.s2kType = s2kType block := pk.cipher.new(key) - pk.iv = make([]byte, pk.cipher.blockSize()) - _, err = rand.Read(pk.iv) - if err != nil { - return err - } - cfb := cipher.NewCFBEncrypter(block, pk.iv) - - if pk.sha1Checksum { - pk.s2kType = S2KSHA1 - h := sha1.New() - h.Write(privateKeyBytes) - sum := h.Sum(nil) - privateKeyBytes = append(privateKeyBytes, sum...) - } else { - pk.s2kType = S2KCHECKSUM - var sum uint16 - for _, b := range privateKeyBytes { - sum += uint16(b) + switch s2kType { + case S2KAEAD: + if pk.aead == 0 { + return errors.StructuralError("aead mode is not set on key") } - priv.Write([]byte{uint8(sum >> 8), uint8(sum)}) + aead := pk.aead.new(block) + additionalData, err := pk.additionalData() + if err != nil { + return err + } + pk.iv = make([]byte, aead.NonceSize()) + _, err = io.ReadFull(rand, pk.iv) + if err != nil { + return err + } + // Decrypt the encrypted key material with aead + pk.encryptedData = aead.Seal(nil, pk.iv, privateKeyBytes, additionalData) + case S2KSHA1, S2KCHECKSUM: + pk.iv = make([]byte, pk.cipher.blockSize()) + _, err = io.ReadFull(rand, pk.iv) + if err != nil { + return err + } + cfb := cipher.NewCFBEncrypter(block, pk.iv) + if s2kType == S2KSHA1 { + h := sha1.New() + h.Write(privateKeyBytes) + sum := h.Sum(nil) + privateKeyBytes = append(privateKeyBytes, sum...) + } else { + var sum uint16 + for _, b := range privateKeyBytes { + sum += uint16(b) + } + privateKeyBytes = append(privateKeyBytes, []byte{uint8(sum >> 8), uint8(sum)}...) + } + pk.encryptedData = make([]byte, len(privateKeyBytes)) + cfb.XORKeyStream(pk.encryptedData, privateKeyBytes) + default: + return errors.InvalidArgumentError("invalid s2k type for encryption") } - pk.encryptedData = make([]byte, len(privateKeyBytes)) - cfb.XORKeyStream(pk.encryptedData, privateKeyBytes) pk.Encrypted = true pk.PrivateKey = nil return err @@ -544,8 +749,15 @@ func (pk *PrivateKey) EncryptWithConfig(passphrase []byte, config *Config) error return err } s2k(key, passphrase) + s2kType := S2KSHA1 + if config.AEAD() != nil { + s2kType = S2KAEAD + pk.aead = config.AEAD().Mode() + pk.cipher = config.Cipher() + key = pk.applyHKDF(key) + } // Encrypt the private key with the derived encryption key. - return pk.encrypt(key, params, config.Cipher()) + return pk.encrypt(key, params, s2kType, config.Cipher(), config.Random()) } // EncryptPrivateKeys encrypts all unencrypted keys with the given config and passphrase. @@ -564,7 +776,16 @@ func EncryptPrivateKeys(keys []*PrivateKey, passphrase []byte, config *Config) e s2k(encryptionKey, passphrase) for _, key := range keys { if key != nil && !key.Dummy() && !key.Encrypted { - err = key.encrypt(encryptionKey, params, config.Cipher()) + s2kType := S2KSHA1 + if config.AEAD() != nil { + s2kType = S2KAEAD + key.aead = config.AEAD().Mode() + key.cipher = config.Cipher() + derivedKey := key.applyHKDF(encryptionKey) + err = key.encrypt(derivedKey, params, s2kType, config.Cipher(), config.Random()) + } else { + err = key.encrypt(encryptionKey, params, s2kType, config.Cipher(), config.Random()) + } if err != nil { return err } @@ -581,7 +802,7 @@ func (pk *PrivateKey) Encrypt(passphrase []byte) error { S2KMode: s2k.IteratedSaltedS2K, S2KCount: 65536, Hash: crypto.SHA256, - } , + }, DefaultCipher: CipherAES256, } return pk.EncryptWithConfig(passphrase, config) @@ -601,6 +822,14 @@ func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) { err = serializeEdDSAPrivateKey(w, priv) case *ecdh.PrivateKey: err = serializeECDHPrivateKey(w, priv) + case *x25519.PrivateKey: + err = serializeX25519PrivateKey(w, priv) + case *x448.PrivateKey: + err = serializeX448PrivateKey(w, priv) + case *ed25519.PrivateKey: + err = serializeEd25519PrivateKey(w, priv) + case *ed448.PrivateKey: + err = serializeEd448PrivateKey(w, priv) default: err = errors.InvalidArgumentError("unknown private key type") } @@ -621,8 +850,18 @@ func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { return pk.parseECDHPrivateKey(data) case PubKeyAlgoEdDSA: return pk.parseEdDSAPrivateKey(data) + case PubKeyAlgoX25519: + return pk.parseX25519PrivateKey(data) + case PubKeyAlgoX448: + return pk.parseX448PrivateKey(data) + case PubKeyAlgoEd25519: + return pk.parseEd25519PrivateKey(data) + case PubKeyAlgoEd448: + return pk.parseEd448PrivateKey(data) + default: + err = errors.StructuralError("unknown private key type") + return } - panic("impossible") } func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { @@ -743,6 +982,86 @@ func (pk *PrivateKey) parseECDHPrivateKey(data []byte) (err error) { return nil } +func (pk *PrivateKey) parseX25519PrivateKey(data []byte) (err error) { + publicKey := pk.PublicKey.PublicKey.(*x25519.PublicKey) + privateKey := x25519.NewPrivateKey(*publicKey) + privateKey.PublicKey = *publicKey + + privateKey.Secret = make([]byte, x25519.KeySize) + + if len(data) != x25519.KeySize { + err = errors.StructuralError("wrong x25519 key size") + return err + } + subtle.ConstantTimeCopy(1, privateKey.Secret, data) + if err = x25519.Validate(privateKey); err != nil { + return err + } + pk.PrivateKey = privateKey + return nil +} + +func (pk *PrivateKey) parseX448PrivateKey(data []byte) (err error) { + publicKey := pk.PublicKey.PublicKey.(*x448.PublicKey) + privateKey := x448.NewPrivateKey(*publicKey) + privateKey.PublicKey = *publicKey + + privateKey.Secret = make([]byte, x448.KeySize) + + if len(data) != x448.KeySize { + err = errors.StructuralError("wrong x448 key size") + return err + } + subtle.ConstantTimeCopy(1, privateKey.Secret, data) + if err = x448.Validate(privateKey); err != nil { + return err + } + pk.PrivateKey = privateKey + return nil +} + +func (pk *PrivateKey) parseEd25519PrivateKey(data []byte) (err error) { + publicKey := pk.PublicKey.PublicKey.(*ed25519.PublicKey) + privateKey := ed25519.NewPrivateKey(*publicKey) + privateKey.PublicKey = *publicKey + + if len(data) != ed25519.SeedSize { + err = errors.StructuralError("wrong ed25519 key size") + return err + } + err = privateKey.UnmarshalByteSecret(data) + if err != nil { + return err + } + err = ed25519.Validate(privateKey) + if err != nil { + return err + } + pk.PrivateKey = privateKey + return nil +} + +func (pk *PrivateKey) parseEd448PrivateKey(data []byte) (err error) { + publicKey := pk.PublicKey.PublicKey.(*ed448.PublicKey) + privateKey := ed448.NewPrivateKey(*publicKey) + privateKey.PublicKey = *publicKey + + if len(data) != ed448.SeedSize { + err = errors.StructuralError("wrong ed448 key size") + return err + } + err = privateKey.UnmarshalByteSecret(data) + if err != nil { + return err + } + err = ed448.Validate(privateKey) + if err != nil { + return err + } + pk.PrivateKey = privateKey + return nil +} + func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) { eddsaPub := pk.PublicKey.PublicKey.(*eddsa.PublicKey) eddsaPriv := eddsa.NewPrivateKey(*eddsaPub) @@ -767,6 +1086,41 @@ func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) { return nil } +func (pk *PrivateKey) additionalData() ([]byte, error) { + additionalData := bytes.NewBuffer(nil) + // Write additional data prefix based on packet type + var packetByte byte + if pk.PublicKey.IsSubkey { + packetByte = 0xc7 + } else { + packetByte = 0xc5 + } + // Write public key to additional data + _, err := additionalData.Write([]byte{packetByte}) + if err != nil { + return nil, err + } + err = pk.PublicKey.serializeWithoutHeaders(additionalData) + if err != nil { + return nil, err + } + return additionalData.Bytes(), nil +} + +func (pk *PrivateKey) applyHKDF(inputKey []byte) []byte { + var packetByte byte + if pk.PublicKey.IsSubkey { + packetByte = 0xc7 + } else { + packetByte = 0xc5 + } + associatedData := []byte{packetByte, byte(pk.Version), byte(pk.cipher), byte(pk.aead)} + hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, associatedData) + encryptionKey := make([]byte, pk.cipher.KeySize()) + _, _ = readFull(hkdfReader, encryptionKey) + return encryptionKey +} + func validateDSAParameters(priv *dsa.PrivateKey) error { p := priv.P // group prime q := priv.Q // subgroup order diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go index 3402b8c14..f8da781bb 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go @@ -5,7 +5,6 @@ package packet import ( - "crypto" "crypto/dsa" "crypto/rsa" "crypto/sha1" @@ -21,23 +20,24 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/ecdh" "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/ed25519" + "github.com/ProtonMail/go-crypto/openpgp/ed448" "github.com/ProtonMail/go-crypto/openpgp/eddsa" "github.com/ProtonMail/go-crypto/openpgp/elgamal" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "github.com/ProtonMail/go-crypto/openpgp/x25519" + "github.com/ProtonMail/go-crypto/openpgp/x448" ) -type kdfHashFunction byte -type kdfAlgorithm byte - // PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. type PublicKey struct { Version int CreationTime time.Time PubKeyAlgo PublicKeyAlgorithm - PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or *eddsa.PublicKey + PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or *eddsa.PublicKey, *x25519.PublicKey, *x448.PublicKey, *ed25519.PublicKey, *ed448.PublicKey Fingerprint []byte KeyId uint64 IsSubkey bool @@ -61,11 +61,19 @@ func (pk *PublicKey) UpgradeToV5() { pk.setFingerprintAndKeyId() } +// UpgradeToV6 updates the version of the key to v6, and updates all necessary +// fields. +func (pk *PublicKey) UpgradeToV6() error { + pk.Version = 6 + pk.setFingerprintAndKeyId() + return pk.checkV6Compatibility() +} + // signingKey provides a convenient abstraction over signature verification // for v3 and v4 public keys. type signingKey interface { SerializeForHash(io.Writer) error - SerializeSignaturePrefix(io.Writer) + SerializeSignaturePrefix(io.Writer) error serializeWithoutHeaders(io.Writer) error } @@ -174,6 +182,54 @@ func NewEdDSAPublicKey(creationTime time.Time, pub *eddsa.PublicKey) *PublicKey return pk } +func NewX25519PublicKey(creationTime time.Time, pub *x25519.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoX25519, + PublicKey: pub, + } + + pk.setFingerprintAndKeyId() + return pk +} + +func NewX448PublicKey(creationTime time.Time, pub *x448.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoX448, + PublicKey: pub, + } + + pk.setFingerprintAndKeyId() + return pk +} + +func NewEd25519PublicKey(creationTime time.Time, pub *ed25519.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoEd25519, + PublicKey: pub, + } + + pk.setFingerprintAndKeyId() + return pk +} + +func NewEd448PublicKey(creationTime time.Time, pub *ed448.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoEd448, + PublicKey: pub, + } + + pk.setFingerprintAndKeyId() + return pk +} + func (pk *PublicKey) parse(r io.Reader) (err error) { // RFC 4880, section 5.5.2 var buf [6]byte @@ -181,12 +237,19 @@ func (pk *PublicKey) parse(r io.Reader) (err error) { if err != nil { return } - if buf[0] != 4 && buf[0] != 5 { + + pk.Version = int(buf[0]) + if pk.Version != 4 && pk.Version != 5 && pk.Version != 6 { return errors.UnsupportedError("public key version " + strconv.Itoa(int(buf[0]))) } - pk.Version = int(buf[0]) - if pk.Version == 5 { + if V5Disabled && pk.Version == 5 { + return errors.UnsupportedError("support for parsing v5 entities is disabled; build with `-tags v5` if needed") + } + + if pk.Version >= 5 { + // Read the four-octet scalar octet count + // The count is not used in this implementation var n [4]byte _, err = readFull(r, n[:]) if err != nil { @@ -195,6 +258,7 @@ func (pk *PublicKey) parse(r io.Reader) (err error) { } pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) + // Ignore four-ocet length switch pk.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: err = pk.parseRSA(r) @@ -208,6 +272,14 @@ func (pk *PublicKey) parse(r io.Reader) (err error) { err = pk.parseECDH(r) case PubKeyAlgoEdDSA: err = pk.parseEdDSA(r) + case PubKeyAlgoX25519: + err = pk.parseX25519(r) + case PubKeyAlgoX448: + err = pk.parseX448(r) + case PubKeyAlgoEd25519: + err = pk.parseEd25519(r) + case PubKeyAlgoEd448: + err = pk.parseEd448(r) default: err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) } @@ -221,21 +293,44 @@ func (pk *PublicKey) parse(r io.Reader) (err error) { func (pk *PublicKey) setFingerprintAndKeyId() { // RFC 4880, section 12.2 - if pk.Version == 5 { + if pk.Version >= 5 { fingerprint := sha256.New() - pk.SerializeForHash(fingerprint) + if err := pk.SerializeForHash(fingerprint); err != nil { + // Should not happen for a hash. + panic(err) + } pk.Fingerprint = make([]byte, 32) copy(pk.Fingerprint, fingerprint.Sum(nil)) pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[:8]) } else { fingerprint := sha1.New() - pk.SerializeForHash(fingerprint) + if err := pk.SerializeForHash(fingerprint); err != nil { + // Should not happen for a hash. + panic(err) + } pk.Fingerprint = make([]byte, 20) copy(pk.Fingerprint, fingerprint.Sum(nil)) pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) } } +func (pk *PublicKey) checkV6Compatibility() error { + // Implementations MUST NOT accept or generate version 6 key material using the deprecated OIDs. + switch pk.PubKeyAlgo { + case PubKeyAlgoECDH: + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil { + return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) + } + if curveInfo.GenName == ecc.Curve25519GenName { + return errors.StructuralError("cannot generate v6 key with deprecated OID: Curve25519Legacy") + } + case PubKeyAlgoEdDSA: + return errors.StructuralError("cannot generate v6 key with deprecated algorithm: EdDSALegacy") + } + return nil +} + // parseRSA parses RSA public key material from the given Reader. See RFC 4880, // section 5.5.2. func (pk *PublicKey) parseRSA(r io.Reader) (err error) { @@ -324,16 +419,17 @@ func (pk *PublicKey) parseECDSA(r io.Reader) (err error) { if _, err = pk.oid.ReadFrom(r); err != nil { return } - pk.p = new(encoding.MPI) - if _, err = pk.p.ReadFrom(r); err != nil { - return - } curveInfo := ecc.FindByOid(pk.oid) if curveInfo == nil { return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) } + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + c, ok := curveInfo.Curve.(ecc.ECDSACurve) if !ok { return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) @@ -353,6 +449,17 @@ func (pk *PublicKey) parseECDH(r io.Reader) (err error) { if _, err = pk.oid.ReadFrom(r); err != nil { return } + + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil { + return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) + } + + if pk.Version == 6 && curveInfo.GenName == ecc.Curve25519GenName { + // Implementations MUST NOT accept or generate version 6 key material using the deprecated OIDs. + return errors.StructuralError("cannot read v6 key with deprecated OID: Curve25519Legacy") + } + pk.p = new(encoding.MPI) if _, err = pk.p.ReadFrom(r); err != nil { return @@ -362,12 +469,6 @@ func (pk *PublicKey) parseECDH(r io.Reader) (err error) { return } - curveInfo := ecc.FindByOid(pk.oid) - - if curveInfo == nil { - return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) - } - c, ok := curveInfo.Curve.(ecc.ECDHCurve) if !ok { return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) @@ -396,10 +497,16 @@ func (pk *PublicKey) parseECDH(r io.Reader) (err error) { } func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) { + if pk.Version == 6 { + // Implementations MUST NOT accept or generate version 6 key material using the deprecated OIDs. + return errors.StructuralError("cannot generate v6 key with deprecated algorithm: EdDSALegacy") + } + pk.oid = new(encoding.OID) if _, err = pk.oid.ReadFrom(r); err != nil { return } + curveInfo := ecc.FindByOid(pk.oid) if curveInfo == nil { return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) @@ -435,75 +542,145 @@ func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) { return } +func (pk *PublicKey) parseX25519(r io.Reader) (err error) { + point := make([]byte, x25519.KeySize) + _, err = io.ReadFull(r, point) + if err != nil { + return + } + pub := &x25519.PublicKey{ + Point: point, + } + pk.PublicKey = pub + return +} + +func (pk *PublicKey) parseX448(r io.Reader) (err error) { + point := make([]byte, x448.KeySize) + _, err = io.ReadFull(r, point) + if err != nil { + return + } + pub := &x448.PublicKey{ + Point: point, + } + pk.PublicKey = pub + return +} + +func (pk *PublicKey) parseEd25519(r io.Reader) (err error) { + point := make([]byte, ed25519.PublicKeySize) + _, err = io.ReadFull(r, point) + if err != nil { + return + } + pub := &ed25519.PublicKey{ + Point: point, + } + pk.PublicKey = pub + return +} + +func (pk *PublicKey) parseEd448(r io.Reader) (err error) { + point := make([]byte, ed448.PublicKeySize) + _, err = io.ReadFull(r, point) + if err != nil { + return + } + pub := &ed448.PublicKey{ + Point: point, + } + pk.PublicKey = pub + return +} + // SerializeForHash serializes the PublicKey to w with the special packet // header format needed for hashing. func (pk *PublicKey) SerializeForHash(w io.Writer) error { - pk.SerializeSignaturePrefix(w) + if err := pk.SerializeSignaturePrefix(w); err != nil { + return err + } return pk.serializeWithoutHeaders(w) } // SerializeSignaturePrefix writes the prefix for this public key to the given Writer. // The prefix is used when calculating a signature over this public key. See // RFC 4880, section 5.2.4. -func (pk *PublicKey) SerializeSignaturePrefix(w io.Writer) { +func (pk *PublicKey) SerializeSignaturePrefix(w io.Writer) error { var pLength = pk.algorithmSpecificByteCount() - if pk.Version == 5 { - pLength += 10 // version, timestamp (4), algorithm, key octet count (4). - w.Write([]byte{ - 0x9A, + // version, timestamp, algorithm + pLength += versionSize + timestampSize + algorithmSize + if pk.Version >= 5 { + // key octet count (4). + pLength += 4 + _, err := w.Write([]byte{ + // When a v4 signature is made over a key, the hash data starts with the octet 0x99, followed by a two-octet length + // of the key, and then the body of the key packet. When a v6 signature is made over a key, the hash data starts + // with the salt, then octet 0x9B, followed by a four-octet length of the key, and then the body of the key packet. + 0x95 + byte(pk.Version), byte(pLength >> 24), byte(pLength >> 16), byte(pLength >> 8), byte(pLength), }) - return + return err } - pLength += 6 - w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) + if _, err := w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}); err != nil { + return err + } + return nil } func (pk *PublicKey) Serialize(w io.Writer) (err error) { - length := 6 // 6 byte header + length := uint32(versionSize + timestampSize + algorithmSize) // 6 byte header length += pk.algorithmSpecificByteCount() - if pk.Version == 5 { + if pk.Version >= 5 { length += 4 // octet key count } packetType := packetTypePublicKey if pk.IsSubkey { packetType = packetTypePublicSubkey } - err = serializeHeader(w, packetType, length) + err = serializeHeader(w, packetType, int(length)) if err != nil { return } return pk.serializeWithoutHeaders(w) } -func (pk *PublicKey) algorithmSpecificByteCount() int { - length := 0 +func (pk *PublicKey) algorithmSpecificByteCount() uint32 { + length := uint32(0) switch pk.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += int(pk.n.EncodedLength()) - length += int(pk.e.EncodedLength()) + length += uint32(pk.n.EncodedLength()) + length += uint32(pk.e.EncodedLength()) case PubKeyAlgoDSA: - length += int(pk.p.EncodedLength()) - length += int(pk.q.EncodedLength()) - length += int(pk.g.EncodedLength()) - length += int(pk.y.EncodedLength()) + length += uint32(pk.p.EncodedLength()) + length += uint32(pk.q.EncodedLength()) + length += uint32(pk.g.EncodedLength()) + length += uint32(pk.y.EncodedLength()) case PubKeyAlgoElGamal: - length += int(pk.p.EncodedLength()) - length += int(pk.g.EncodedLength()) - length += int(pk.y.EncodedLength()) + length += uint32(pk.p.EncodedLength()) + length += uint32(pk.g.EncodedLength()) + length += uint32(pk.y.EncodedLength()) case PubKeyAlgoECDSA: - length += int(pk.oid.EncodedLength()) - length += int(pk.p.EncodedLength()) + length += uint32(pk.oid.EncodedLength()) + length += uint32(pk.p.EncodedLength()) case PubKeyAlgoECDH: - length += int(pk.oid.EncodedLength()) - length += int(pk.p.EncodedLength()) - length += int(pk.kdf.EncodedLength()) + length += uint32(pk.oid.EncodedLength()) + length += uint32(pk.p.EncodedLength()) + length += uint32(pk.kdf.EncodedLength()) case PubKeyAlgoEdDSA: - length += int(pk.oid.EncodedLength()) - length += int(pk.p.EncodedLength()) + length += uint32(pk.oid.EncodedLength()) + length += uint32(pk.p.EncodedLength()) + case PubKeyAlgoX25519: + length += x25519.KeySize + case PubKeyAlgoX448: + length += x448.KeySize + case PubKeyAlgoEd25519: + length += ed25519.PublicKeySize + case PubKeyAlgoEd448: + length += ed448.PublicKeySize default: panic("unknown public key algorithm") } @@ -522,7 +699,7 @@ func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { return } - if pk.Version == 5 { + if pk.Version >= 5 { n := pk.algorithmSpecificByteCount() if _, err = w.Write([]byte{ byte(n >> 24), byte(n >> 16), byte(n >> 8), byte(n), @@ -580,6 +757,22 @@ func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { } _, err = w.Write(pk.p.EncodedBytes()) return + case PubKeyAlgoX25519: + publicKey := pk.PublicKey.(*x25519.PublicKey) + _, err = w.Write(publicKey.Point) + return + case PubKeyAlgoX448: + publicKey := pk.PublicKey.(*x448.PublicKey) + _, err = w.Write(publicKey.Point) + return + case PubKeyAlgoEd25519: + publicKey := pk.PublicKey.(*ed25519.PublicKey) + _, err = w.Write(publicKey.Point) + return + case PubKeyAlgoEd448: + publicKey := pk.PublicKey.(*ed448.PublicKey) + _, err = w.Write(publicKey.Point) + return } return errors.InvalidArgumentError("bad public-key algorithm") } @@ -589,6 +782,20 @@ func (pk *PublicKey) CanSign() bool { return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal && pk.PubKeyAlgo != PubKeyAlgoECDH } +// VerifyHashTag returns nil iff sig appears to be a plausible signature of the data +// hashed into signed, based solely on its HashTag. signed is mutated by this call. +func VerifyHashTag(signed hash.Hash, sig *Signature) (err error) { + if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { + sig.AddMetadataToHashSuffix() + } + signed.Write(sig.HashSuffix) + hashBytes := signed.Sum(nil) + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + return nil +} + // VerifySignature returns nil iff sig is a valid signature, made by this // public key, of the data hashed into signed. signed is mutated by this call. func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { @@ -600,7 +807,8 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro } signed.Write(sig.HashSuffix) hashBytes := signed.Sum(nil) - if sig.Version == 5 && (hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1]) { + // see discussion https://github.com/ProtonMail/go-crypto/issues/107 + if sig.Version >= 5 && (hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1]) { return errors.SignatureError("hash tag doesn't match") } @@ -639,6 +847,18 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro return errors.SignatureError("EdDSA verification failure") } return nil + case PubKeyAlgoEd25519: + ed25519PublicKey := pk.PublicKey.(*ed25519.PublicKey) + if !ed25519.Verify(ed25519PublicKey, hashBytes, sig.EdSig) { + return errors.SignatureError("Ed25519 verification failure") + } + return nil + case PubKeyAlgoEd448: + ed448PublicKey := pk.PublicKey.(*ed448.PublicKey) + if !ed448.Verify(ed448PublicKey, hashBytes, sig.EdSig) { + return errors.SignatureError("ed448 verification failure") + } + return nil default: return errors.SignatureError("Unsupported public key algorithm used in signature") } @@ -646,11 +866,8 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro // keySignatureHash returns a Hash of the message that needs to be signed for // pk to assert a subkey relationship to signed. -func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() +func keySignatureHash(pk, signed signingKey, hashFunc hash.Hash) (h hash.Hash, err error) { + h = hashFunc // RFC 4880, section 5.2.4 err = pk.SerializeForHash(h) @@ -662,10 +879,28 @@ func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, return } +// VerifyKeyHashTag returns nil iff sig appears to be a plausible signature over this +// primary key and subkey, based solely on its HashTag. +func (pk *PublicKey) VerifyKeyHashTag(signed *PublicKey, sig *Signature) error { + preparedHash, err := sig.PrepareVerify() + if err != nil { + return err + } + h, err := keySignatureHash(pk, signed, preparedHash) + if err != nil { + return err + } + return VerifyHashTag(h, sig) +} + // VerifyKeySignature returns nil iff sig is a valid signature, made by this // public key, of signed. func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { - h, err := keySignatureHash(pk, signed, sig.Hash) + preparedHash, err := sig.PrepareVerify() + if err != nil { + return err + } + h, err := keySignatureHash(pk, signed, preparedHash) if err != nil { return err } @@ -679,10 +914,14 @@ func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error if sig.EmbeddedSignature == nil { return errors.StructuralError("signing subkey is missing cross-signature") } + preparedHashEmbedded, err := sig.EmbeddedSignature.PrepareVerify() + if err != nil { + return err + } // Verify the cross-signature. This is calculated over the same // data as the main signature, so we cannot just recursively // call signed.VerifyKeySignature(...) - if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { + if h, err = keySignatureHash(pk, signed, preparedHashEmbedded); err != nil { return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) } if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { @@ -693,32 +932,44 @@ func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error return nil } -func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") +func keyRevocationHash(pk signingKey, hashFunc hash.Hash) (err error) { + return pk.SerializeForHash(hashFunc) +} + +// VerifyRevocationHashTag returns nil iff sig appears to be a plausible signature +// over this public key, based solely on its HashTag. +func (pk *PublicKey) VerifyRevocationHashTag(sig *Signature) (err error) { + preparedHash, err := sig.PrepareVerify() + if err != nil { + return err } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - err = pk.SerializeForHash(h) - - return + if err = keyRevocationHash(pk, preparedHash); err != nil { + return err + } + return VerifyHashTag(preparedHash, sig) } // VerifyRevocationSignature returns nil iff sig is a valid signature, made by this // public key. func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { - h, err := keyRevocationHash(pk, sig.Hash) + preparedHash, err := sig.PrepareVerify() if err != nil { return err } - return pk.VerifySignature(h, sig) + if err = keyRevocationHash(pk, preparedHash); err != nil { + return err + } + return pk.VerifySignature(preparedHash, sig) } // VerifySubkeyRevocationSignature returns nil iff sig is a valid subkey revocation signature, // made by this public key, of signed. func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signed *PublicKey) (err error) { - h, err := keySignatureHash(pk, signed, sig.Hash) + preparedHash, err := sig.PrepareVerify() + if err != nil { + return err + } + h, err := keySignatureHash(pk, signed, preparedHash) if err != nil { return err } @@ -727,15 +978,15 @@ func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signed *Pub // userIdSignatureHash returns a Hash of the message that needs to be signed // to assert that pk is a valid key for id. -func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() +func userIdSignatureHash(id string, pk *PublicKey, h hash.Hash) (err error) { // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) + if err := pk.SerializeSignaturePrefix(h); err != nil { + return err + } + if err := pk.serializeWithoutHeaders(h); err != nil { + return err + } var buf [5]byte buf[0] = 0xb4 @@ -746,16 +997,51 @@ func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash h.Write(buf[:]) h.Write([]byte(id)) - return + return nil +} + +// directKeySignatureHash returns a Hash of the message that needs to be signed. +func directKeySignatureHash(pk *PublicKey, h hash.Hash) (err error) { + return pk.SerializeForHash(h) +} + +// VerifyUserIdHashTag returns nil iff sig appears to be a plausible signature over this +// public key and UserId, based solely on its HashTag +func (pk *PublicKey) VerifyUserIdHashTag(id string, sig *Signature) (err error) { + preparedHash, err := sig.PrepareVerify() + if err != nil { + return err + } + err = userIdSignatureHash(id, pk, preparedHash) + if err != nil { + return err + } + return VerifyHashTag(preparedHash, sig) } // VerifyUserIdSignature returns nil iff sig is a valid signature, made by this // public key, that id is the identity of pub. func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { - h, err := userIdSignatureHash(id, pub, sig.Hash) + h, err := sig.PrepareVerify() if err != nil { return err } + if err := userIdSignatureHash(id, pub, h); err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// VerifyDirectKeySignature returns nil iff sig is a valid signature, made by this +// public key. +func (pk *PublicKey) VerifyDirectKeySignature(sig *Signature) (err error) { + h, err := sig.PrepareVerify() + if err != nil { + return err + } + if err := directKeySignatureHash(pk, h); err != nil { + return err + } return pk.VerifySignature(h, sig) } @@ -786,21 +1072,49 @@ func (pk *PublicKey) BitLength() (bitLength uint16, err error) { bitLength = pk.p.BitLength() case PubKeyAlgoEdDSA: bitLength = pk.p.BitLength() + case PubKeyAlgoX25519: + bitLength = x25519.KeySize * 8 + case PubKeyAlgoX448: + bitLength = x448.KeySize * 8 + case PubKeyAlgoEd25519: + bitLength = ed25519.PublicKeySize * 8 + case PubKeyAlgoEd448: + bitLength = ed448.PublicKeySize * 8 default: err = errors.InvalidArgumentError("bad public-key algorithm") } return } +// Curve returns the used elliptic curve of this public key. +// Returns an error if no elliptic curve is used. +func (pk *PublicKey) Curve() (curve Curve, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoECDSA, PubKeyAlgoECDH, PubKeyAlgoEdDSA: + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil { + return "", errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) + } + curve = Curve(curveInfo.GenName) + case PubKeyAlgoEd25519, PubKeyAlgoX25519: + curve = Curve25519 + case PubKeyAlgoEd448, PubKeyAlgoX448: + curve = Curve448 + default: + err = errors.InvalidArgumentError("public key does not operate with an elliptic curve") + } + return +} + // KeyExpired returns whether sig is a self-signature of a key that has // expired or is created in the future. func (pk *PublicKey) KeyExpired(sig *Signature, currentTime time.Time) bool { - if pk.CreationTime.After(currentTime) { + if pk.CreationTime.Unix() > currentTime.Unix() { return true } if sig.KeyLifetimeSecs == nil || *sig.KeyLifetimeSecs == 0 { return false } expiry := pk.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) - return currentTime.After(expiry) + return currentTime.Unix() > expiry.Unix() } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go index 10215fe5f..dd8409239 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go @@ -10,6 +10,12 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/errors" ) +type PacketReader interface { + Next() (p Packet, err error) + Push(reader io.Reader) (err error) + Unread(p Packet) +} + // Reader reads packets from an io.Reader and allows packets to be 'unread' so // that they result from the next call to Next. type Reader struct { @@ -26,37 +32,81 @@ type Reader struct { const maxReaders = 32 // Next returns the most recently unread Packet, or reads another packet from -// the top-most io.Reader. Unknown packet types are skipped. +// the top-most io.Reader. Unknown/unsupported/Marker packet types are skipped. func (r *Reader) Next() (p Packet, err error) { + for { + p, err := r.read() + if err == io.EOF { + break + } else if err != nil { + if _, ok := err.(errors.UnknownPacketTypeError); ok { + continue + } + if _, ok := err.(errors.UnsupportedError); ok { + switch p.(type) { + case *SymmetricallyEncrypted, *AEADEncrypted, *Compressed, *LiteralData: + return nil, err + } + continue + } + return nil, err + } else { + //A marker packet MUST be ignored when received + switch p.(type) { + case *Marker: + continue + } + return p, nil + } + } + return nil, io.EOF +} + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown/Marker packet types are skipped while unsupported +// packets are returned as UnsupportedPacket type. +func (r *Reader) NextWithUnsupported() (p Packet, err error) { + for { + p, err = r.read() + if err == io.EOF { + break + } else if err != nil { + if _, ok := err.(errors.UnknownPacketTypeError); ok { + continue + } + if casteErr, ok := err.(errors.UnsupportedError); ok { + return &UnsupportedPacket{ + IncompletePacket: p, + Error: casteErr, + }, nil + } + return + } else { + //A marker packet MUST be ignored when received + switch p.(type) { + case *Marker: + continue + } + return + } + } + return nil, io.EOF +} + +func (r *Reader) read() (p Packet, err error) { if len(r.q) > 0 { p = r.q[len(r.q)-1] r.q = r.q[:len(r.q)-1] return } - for len(r.readers) > 0 { p, err = Read(r.readers[len(r.readers)-1]) - if err == nil { - return - } if err == io.EOF { r.readers = r.readers[:len(r.readers)-1] continue } - // TODO: Add strict mode that rejects unknown packets, instead of ignoring them. - if _, ok := err.(errors.UnknownPacketTypeError); ok { - continue - } - if _, ok := err.(errors.UnsupportedError); ok { - switch p.(type) { - case *SymmetricallyEncrypted, *AEADEncrypted, *Compressed, *LiteralData: - return nil, err - } - continue - } - return nil, err + return p, err } - return nil, io.EOF } @@ -84,3 +134,76 @@ func NewReader(r io.Reader) *Reader { readers: []io.Reader{r}, } } + +// CheckReader is similar to Reader but additionally +// uses the pushdown automata to verify the read packet sequence. +type CheckReader struct { + Reader + verifier *SequenceVerifier + fullyRead bool +} + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown packet types are skipped. +// If the read packet sequence does not conform to the packet composition +// rules in rfc4880, it returns an error. +func (r *CheckReader) Next() (p Packet, err error) { + if r.fullyRead { + return nil, io.EOF + } + if len(r.q) > 0 { + p = r.q[len(r.q)-1] + r.q = r.q[:len(r.q)-1] + return + } + var errMsg error + for len(r.readers) > 0 { + p, errMsg, err = ReadWithCheck(r.readers[len(r.readers)-1], r.verifier) + if errMsg != nil { + err = errMsg + return + } + if err == nil { + return + } + if err == io.EOF { + r.readers = r.readers[:len(r.readers)-1] + continue + } + //A marker packet MUST be ignored when received + switch p.(type) { + case *Marker: + continue + } + if _, ok := err.(errors.UnknownPacketTypeError); ok { + continue + } + if _, ok := err.(errors.UnsupportedError); ok { + switch p.(type) { + case *SymmetricallyEncrypted, *AEADEncrypted, *Compressed, *LiteralData: + return nil, err + } + continue + } + return nil, err + } + if errMsg = r.verifier.Next(EOSSymbol); errMsg != nil { + return nil, errMsg + } + if errMsg = r.verifier.AssertValid(); errMsg != nil { + return nil, errMsg + } + r.fullyRead = true + return nil, io.EOF +} + +func NewCheckReader(r io.Reader) *CheckReader { + return &CheckReader{ + Reader: Reader{ + q: nil, + readers: []io.Reader{r}, + }, + verifier: NewSequenceVerifier(), + fullyRead: false, + } +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/recipient.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/recipient.go new file mode 100644 index 000000000..fb2e362e4 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/recipient.go @@ -0,0 +1,15 @@ +package packet + +// Recipient type represents a Intended Recipient Fingerprint subpacket +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#name-intended-recipient-fingerpr +type Recipient struct { + KeyVersion int + Fingerprint []byte +} + +func (r *Recipient) Serialize() []byte { + packet := make([]byte, len(r.Fingerprint)+1) + packet[0] = byte(r.KeyVersion) + copy(packet[1:], r.Fingerprint) + return packet +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go index 80d0bb98e..3a4b366d8 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go @@ -8,13 +8,17 @@ import ( "bytes" "crypto" "crypto/dsa" + "encoding/asn1" "encoding/binary" "hash" "io" + "math/big" "strconv" "time" "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/ed25519" + "github.com/ProtonMail/go-crypto/openpgp/ed448" "github.com/ProtonMail/go-crypto/openpgp/eddsa" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" @@ -22,7 +26,8 @@ import ( ) const ( - // See RFC 4880, section 5.2.3.21 for details. + // First octet of key flags. + // See RFC 9580, section 5.2.3.29 for details. KeyFlagCertify = 1 << iota KeyFlagSign KeyFlagEncryptCommunications @@ -33,12 +38,30 @@ const ( KeyFlagGroupKey ) -// Signature represents a signature. See RFC 4880, section 5.2. +const ( + // First octet of keyserver preference flags. + // See RFC 9580, section 5.2.3.25 for details. + _ = 1 << iota + _ + _ + _ + _ + _ + _ + KeyserverPrefNoModify +) + +const SaltNotationName = "salt@notations.openpgpjs.org" + +// Signature represents a signature. See RFC 9580, section 5.2. type Signature struct { Version int SigType SignatureType PubKeyAlgo PublicKeyAlgorithm Hash crypto.Hash + // salt contains a random salt value for v6 signatures + // See RFC 9580 Section 5.2.4. + salt []byte // HashSuffix is extra data that is hashed in after the signed data. HashSuffix []byte @@ -57,6 +80,7 @@ type Signature struct { DSASigR, DSASigS encoding.Field ECDSASigR, ECDSASigS encoding.Field EdDSASigR, EdDSASigS encoding.Field + EdSig []byte // rawSubpackets contains the unparsed subpackets, in order. rawSubpackets []outputSubpacket @@ -72,31 +96,42 @@ type Signature struct { SignerUserId *string IsPrimaryId *bool Notations []*Notation + IntendedRecipients []*Recipient // TrustLevel and TrustAmount can be set by the signer to assert that // the key is not only valid but also trustworthy at the specified // level. - // See RFC 4880, section 5.2.3.13 for details. + // See RFC 9580, section 5.2.3.21 for details. TrustLevel TrustLevel TrustAmount TrustAmount // TrustRegularExpression can be used in conjunction with trust Signature // packets to limit the scope of the trust that is extended. - // See RFC 4880, section 5.2.3.14 for details. + // See RFC 9580, section 5.2.3.22 for details. TrustRegularExpression *string + // KeyserverPrefsValid is set if any keyserver preferences were given. See RFC 9580, section + // 5.2.3.25 for details. + KeyserverPrefsValid bool + KeyserverPrefNoModify bool + + // PreferredKeyserver can be set to a URI where the latest version of the + // key that this signature is made over can be found. See RFC 9580, section + // 5.2.3.26 for details. + PreferredKeyserver string + // PolicyURI can be set to the URI of a document that describes the - // policy under which the signature was issued. See RFC 4880, section - // 5.2.3.20 for details. + // policy under which the signature was issued. See RFC 9580, section + // 5.2.3.28 for details. PolicyURI string - // FlagsValid is set if any flags were given. See RFC 4880, section - // 5.2.3.21 for details. + // FlagsValid is set if any flags were given. See RFC 9580, section + // 5.2.3.29 for details. FlagsValid bool FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage, FlagSplitKey, FlagAuthenticate, FlagGroupKey bool // RevocationReason is set if this signature has been revoked. - // See RFC 4880, section 5.2.3.23 for details. + // See RFC 9580, section 5.2.3.31 for details. RevocationReason *ReasonForRevocation RevocationReasonText string @@ -113,26 +148,57 @@ type Signature struct { outSubpackets []outputSubpacket } +// VerifiableSignature internally keeps state if the +// the signature has been verified before. +type VerifiableSignature struct { + Valid *bool // nil if it has not been verified yet + Packet *Signature +} + +// NewVerifiableSig returns a struct of type VerifiableSignature referencing the input signature. +func NewVerifiableSig(signature *Signature) *VerifiableSignature { + return &VerifiableSignature{ + Packet: signature, + } +} + +// Salt returns the signature salt for v6 signatures. +func (sig *Signature) Salt() []byte { + if sig == nil { + return nil + } + return sig.salt +} + func (sig *Signature) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.3 - var buf [5]byte + // RFC 9580, section 5.2.3 + var buf [7]byte _, err = readFull(r, buf[:1]) if err != nil { return } - if buf[0] != 4 && buf[0] != 5 { + sig.Version = int(buf[0]) + if sig.Version != 4 && sig.Version != 5 && sig.Version != 6 { err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) return } - sig.Version = int(buf[0]) - _, err = readFull(r, buf[:5]) + + if V5Disabled && sig.Version == 5 { + return errors.UnsupportedError("support for parsing v5 entities is disabled; build with `-tags v5` if needed") + } + + if sig.Version == 6 { + _, err = readFull(r, buf[:7]) + } else { + _, err = readFull(r, buf[:5]) + } if err != nil { return } sig.SigType = SignatureType(buf[0]) sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA, PubKeyAlgoEd25519, PubKeyAlgoEd448: default: err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) return @@ -150,7 +216,17 @@ func (sig *Signature) parse(r io.Reader) (err error) { return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) } - hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) + var hashedSubpacketsLength int + if sig.Version == 6 { + // For a v6 signature, a four-octet length is used. + hashedSubpacketsLength = + int(buf[3])<<24 | + int(buf[4])<<16 | + int(buf[5])<<8 | + int(buf[6]) + } else { + hashedSubpacketsLength = int(buf[3])<<8 | int(buf[4]) + } hashedSubpackets := make([]byte, hashedSubpacketsLength) _, err = readFull(r, hashedSubpackets) if err != nil { @@ -166,11 +242,21 @@ func (sig *Signature) parse(r io.Reader) (err error) { return } - _, err = readFull(r, buf[:2]) + if sig.Version == 6 { + _, err = readFull(r, buf[:4]) + } else { + _, err = readFull(r, buf[:2]) + } + if err != nil { return } - unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) + var unhashedSubpacketsLength uint32 + if sig.Version == 6 { + unhashedSubpacketsLength = uint32(buf[0])<<24 | uint32(buf[1])<<16 | uint32(buf[2])<<8 | uint32(buf[3]) + } else { + unhashedSubpacketsLength = uint32(buf[0])<<8 | uint32(buf[1]) + } unhashedSubpackets := make([]byte, unhashedSubpacketsLength) _, err = readFull(r, unhashedSubpackets) if err != nil { @@ -186,6 +272,30 @@ func (sig *Signature) parse(r io.Reader) (err error) { return } + if sig.Version == 6 { + // Only for v6 signatures, a variable-length field containing the salt + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + saltLength := int(buf[0]) + var expectedSaltLength int + expectedSaltLength, err = SaltLengthForHash(sig.Hash) + if err != nil { + return + } + if saltLength != expectedSaltLength { + err = errors.StructuralError("unexpected salt size for the given hash algorithm") + return + } + salt := make([]byte, expectedSaltLength) + _, err = readFull(r, salt) + if err != nil { + return + } + sig.salt = salt + } + switch sig.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: sig.RSASignature = new(encoding.MPI) @@ -216,6 +326,16 @@ func (sig *Signature) parse(r io.Reader) (err error) { if _, err = sig.EdDSASigS.ReadFrom(r); err != nil { return } + case PubKeyAlgoEd25519: + sig.EdSig, err = ed25519.ReadSignature(r) + if err != nil { + return + } + case PubKeyAlgoEd448: + sig.EdSig, err = ed448.ReadSignature(r) + if err != nil { + return + } default: panic("unreachable") } @@ -223,7 +343,7 @@ func (sig *Signature) parse(r io.Reader) (err error) { } // parseSignatureSubpackets parses subpackets of the main signature packet. See -// RFC 4880, section 5.2.3.1. +// RFC 9580, section 5.2.3.1. func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { for len(subpackets) > 0 { subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) @@ -244,6 +364,7 @@ type signatureSubpacketType uint8 const ( creationTimeSubpacket signatureSubpacketType = 2 signatureExpirationSubpacket signatureSubpacketType = 3 + exportableCertSubpacket signatureSubpacketType = 4 trustSubpacket signatureSubpacketType = 5 regularExpressionSubpacket signatureSubpacketType = 6 keyExpirationSubpacket signatureSubpacketType = 9 @@ -252,6 +373,8 @@ const ( notationDataSubpacket signatureSubpacketType = 20 prefHashAlgosSubpacket signatureSubpacketType = 21 prefCompressionSubpacket signatureSubpacketType = 22 + keyserverPrefsSubpacket signatureSubpacketType = 23 + prefKeyserverSubpacket signatureSubpacketType = 24 primaryUserIdSubpacket signatureSubpacketType = 25 policyUriSubpacket signatureSubpacketType = 26 keyFlagsSubpacket signatureSubpacketType = 27 @@ -260,12 +383,13 @@ const ( featuresSubpacket signatureSubpacketType = 30 embeddedSignatureSubpacket signatureSubpacketType = 32 issuerFingerprintSubpacket signatureSubpacketType = 33 + intendedRecipientSubpacket signatureSubpacketType = 35 prefCipherSuitesSubpacket signatureSubpacketType = 39 ) // parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { - // RFC 4880, section 5.2.3.1 + // RFC 9580, section 5.2.3.7 var ( length uint32 packetType signatureSubpacketType @@ -323,19 +447,24 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r t := binary.BigEndian.Uint32(subpacket) sig.CreationTime = time.Unix(int64(t), 0) case signatureExpirationSubpacket: - // Signature expiration time, section 5.2.3.10 + // Signature expiration time, section 5.2.3.18 if len(subpacket) != 4 { err = errors.StructuralError("expiration subpacket with bad length") return } sig.SigLifetimeSecs = new(uint32) *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case exportableCertSubpacket: + if subpacket[0] == 0 { + err = errors.UnsupportedError("signature with non-exportable certification") + return + } case trustSubpacket: if len(subpacket) != 2 { err = errors.StructuralError("trust subpacket with bad length") return } - // Trust level and amount, section 5.2.3.13 + // Trust level and amount, section 5.2.3.21 sig.TrustLevel = TrustLevel(subpacket[0]) sig.TrustAmount = TrustAmount(subpacket[1]) case regularExpressionSubpacket: @@ -343,7 +472,7 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r err = errors.StructuralError("regexp subpacket with bad length") return } - // Trust regular expression, section 5.2.3.14 + // Trust regular expression, section 5.2.3.22 // RFC specifies the string should be null-terminated; remove a null byte from the end if subpacket[len(subpacket)-1] != 0x00 { err = errors.StructuralError("expected regular expression to be null-terminated") @@ -352,7 +481,7 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r trustRegularExpression := string(subpacket[:len(subpacket)-1]) sig.TrustRegularExpression = &trustRegularExpression case keyExpirationSubpacket: - // Key expiration time, section 5.2.3.6 + // Key expiration time, section 5.2.3.13 if len(subpacket) != 4 { err = errors.StructuralError("key expiration subpacket with bad length") return @@ -360,23 +489,25 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r sig.KeyLifetimeSecs = new(uint32) *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) case prefSymmetricAlgosSubpacket: - // Preferred symmetric algorithms, section 5.2.3.7 + // Preferred symmetric algorithms, section 5.2.3.14 sig.PreferredSymmetric = make([]byte, len(subpacket)) copy(sig.PreferredSymmetric, subpacket) case issuerSubpacket: - // Issuer, section 5.2.3.5 - if sig.Version > 4 { - err = errors.StructuralError("issuer subpacket found in v5 key") + // Issuer, section 5.2.3.12 + if sig.Version > 4 && isHashed { + err = errors.StructuralError("issuer subpacket found in v6 key") return } if len(subpacket) != 8 { err = errors.StructuralError("issuer subpacket with bad length") return } - sig.IssuerKeyId = new(uint64) - *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) + if sig.Version <= 4 { + sig.IssuerKeyId = new(uint64) + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) + } case notationDataSubpacket: - // Notation data, section 5.2.3.16 + // Notation data, section 5.2.3.24 if len(subpacket) < 8 { err = errors.StructuralError("notation data subpacket with bad length") return @@ -398,15 +529,27 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r sig.Notations = append(sig.Notations, ¬ation) case prefHashAlgosSubpacket: - // Preferred hash algorithms, section 5.2.3.8 + // Preferred hash algorithms, section 5.2.3.16 sig.PreferredHash = make([]byte, len(subpacket)) copy(sig.PreferredHash, subpacket) case prefCompressionSubpacket: - // Preferred compression algorithms, section 5.2.3.9 + // Preferred compression algorithms, section 5.2.3.17 sig.PreferredCompression = make([]byte, len(subpacket)) copy(sig.PreferredCompression, subpacket) + case keyserverPrefsSubpacket: + // Keyserver preferences, section 5.2.3.25 + sig.KeyserverPrefsValid = true + if len(subpacket) == 0 { + return + } + if subpacket[0]&KeyserverPrefNoModify != 0 { + sig.KeyserverPrefNoModify = true + } + case prefKeyserverSubpacket: + // Preferred keyserver, section 5.2.3.26 + sig.PreferredKeyserver = string(subpacket) case primaryUserIdSubpacket: - // Primary User ID, section 5.2.3.19 + // Primary User ID, section 5.2.3.27 if len(subpacket) != 1 { err = errors.StructuralError("primary user id subpacket with bad length") return @@ -416,12 +559,11 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r *sig.IsPrimaryId = true } case keyFlagsSubpacket: - // Key flags, section 5.2.3.21 + // Key flags, section 5.2.3.29 + sig.FlagsValid = true if len(subpacket) == 0 { - err = errors.StructuralError("empty key flags subpacket") return } - sig.FlagsValid = true if subpacket[0]&KeyFlagCertify != 0 { sig.FlagCertify = true } @@ -447,16 +589,16 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r userId := string(subpacket) sig.SignerUserId = &userId case reasonForRevocationSubpacket: - // Reason For Revocation, section 5.2.3.23 + // Reason For Revocation, section 5.2.3.31 if len(subpacket) == 0 { err = errors.StructuralError("empty revocation reason subpacket") return } sig.RevocationReason = new(ReasonForRevocation) - *sig.RevocationReason = ReasonForRevocation(subpacket[0]) + *sig.RevocationReason = NewReasonForRevocation(subpacket[0]) sig.RevocationReasonText = string(subpacket[1:]) case featuresSubpacket: - // Features subpacket, section 5.2.3.24 specifies a very general + // Features subpacket, section 5.2.3.32 specifies a very general // mechanism for OpenPGP implementations to signal support for new // features. if len(subpacket) > 0 { @@ -470,16 +612,13 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r } case embeddedSignatureSubpacket: // Only usage is in signatures that cross-certify - // signing subkeys. section 5.2.3.26 describes the + // signing subkeys. section 5.2.3.34 describes the // format, with its usage described in section 11.1 if sig.EmbeddedSignature != nil { err = errors.StructuralError("Cannot have multiple embedded signatures") return } sig.EmbeddedSignature = new(Signature) - // Embedded signatures are required to be v4 signatures see - // section 12.1. However, we only parse v4 signatures in this - // file anyway. if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { return nil, err } @@ -487,7 +626,7 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) } case policyUriSubpacket: - // Policy URI, section 5.2.3.20 + // Policy URI, section 5.2.3.28 sig.PolicyURI = string(subpacket) case issuerFingerprintSubpacket: if len(subpacket) == 0 { @@ -495,20 +634,31 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r return } v, l := subpacket[0], len(subpacket[1:]) - if v == 5 && l != 32 || v != 5 && l != 20 { + if v >= 5 && l != 32 || v < 5 && l != 20 { return nil, errors.StructuralError("bad fingerprint length") } sig.IssuerFingerprint = make([]byte, l) copy(sig.IssuerFingerprint, subpacket[1:]) sig.IssuerKeyId = new(uint64) - if v == 5 { + if v >= 5 { *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[1:9]) } else { *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[13:21]) } + case intendedRecipientSubpacket: + // Intended Recipient Fingerprint, section 5.2.3.36 + if len(subpacket) < 1 { + return nil, errors.StructuralError("invalid intended recipient fingerpring length") + } + version, length := subpacket[0], len(subpacket[1:]) + if version >= 5 && length != 32 || version < 5 && length != 20 { + return nil, errors.StructuralError("invalid fingerprint length") + } + fingerprint := make([]byte, length) + copy(fingerprint, subpacket[1:]) + sig.IntendedRecipients = append(sig.IntendedRecipients, &Recipient{int(version), fingerprint}) case prefCipherSuitesSubpacket: - // Preferred AEAD cipher suites - // See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#name-preferred-aead-ciphersuites + // Preferred AEAD cipher suites, section 5.2.3.15 if len(subpacket)%2 != 0 { err = errors.StructuralError("invalid aead cipher suite length") return @@ -550,9 +700,16 @@ func (sig *Signature) CheckKeyIdOrFingerprint(pk *PublicKey) bool { return sig.IssuerKeyId != nil && *sig.IssuerKeyId == pk.KeyId } +func (sig *Signature) CheckKeyIdOrFingerprintExplicit(fingerprint []byte, keyId uint64) bool { + if sig.IssuerFingerprint != nil && len(sig.IssuerFingerprint) >= 20 && fingerprint != nil { + return bytes.Equal(sig.IssuerFingerprint, fingerprint) + } + return sig.IssuerKeyId != nil && *sig.IssuerKeyId == keyId +} + // serializeSubpacketLength marshals the given length into to. func serializeSubpacketLength(to []byte, length int) int { - // RFC 4880, Section 4.2.2. + // RFC 9580, Section 4.2.1. if length < 192 { to[0] = byte(length) return 1 @@ -598,20 +755,19 @@ func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { to = to[n:] } } - return } // SigExpired returns whether sig is a signature that has expired or is created // in the future. func (sig *Signature) SigExpired(currentTime time.Time) bool { - if sig.CreationTime.After(currentTime) { + if sig.CreationTime.Unix() > currentTime.Unix() { return true } if sig.SigLifetimeSecs == nil || *sig.SigLifetimeSecs == 0 { return false } expiry := sig.CreationTime.Add(time.Duration(*sig.SigLifetimeSecs) * time.Second) - return currentTime.After(expiry) + return currentTime.Unix() > expiry.Unix() } // buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. @@ -635,20 +791,36 @@ func (sig *Signature) buildHashSuffix(hashedSubpackets []byte) (err error) { uint8(sig.SigType), uint8(sig.PubKeyAlgo), uint8(hashId), - uint8(len(hashedSubpackets) >> 8), - uint8(len(hashedSubpackets)), }) + hashedSubpacketsLength := len(hashedSubpackets) + if sig.Version == 6 { + // v6 signatures store the length in 4 octets + hashedFields.Write([]byte{ + uint8(hashedSubpacketsLength >> 24), + uint8(hashedSubpacketsLength >> 16), + uint8(hashedSubpacketsLength >> 8), + uint8(hashedSubpacketsLength), + }) + } else { + hashedFields.Write([]byte{ + uint8(hashedSubpacketsLength >> 8), + uint8(hashedSubpacketsLength), + }) + } + lenPrefix := hashedFields.Len() hashedFields.Write(hashedSubpackets) - var l uint64 = uint64(6 + len(hashedSubpackets)) + var l uint64 = uint64(lenPrefix + len(hashedSubpackets)) if sig.Version == 5 { + // v5 case hashedFields.Write([]byte{0x05, 0xff}) hashedFields.Write([]byte{ uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), }) } else { - hashedFields.Write([]byte{0x04, 0xff}) + // v4 and v6 case + hashedFields.Write([]byte{byte(sig.Version), 0xff}) hashedFields.Write([]byte{ uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), }) @@ -676,6 +848,67 @@ func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { return } +// PrepareSign must be called to create a hash object before Sign for v6 signatures. +// The created hash object initially hashes a randomly generated salt +// as required by v6 signatures. The generated salt is stored in sig. If the signature is not v6, +// the method returns an empty hash object. +// See RFC 9580 Section 5.2.4. +func (sig *Signature) PrepareSign(config *Config) (hash.Hash, error) { + if !sig.Hash.Available() { + return nil, errors.UnsupportedError("hash function") + } + hasher := sig.Hash.New() + if sig.Version == 6 { + if sig.salt == nil { + var err error + sig.salt, err = SignatureSaltForHash(sig.Hash, config.Random()) + if err != nil { + return nil, err + } + } + hasher.Write(sig.salt) + } + return hasher, nil +} + +// SetSalt sets the signature salt for v6 signatures. +// Assumes salt is generated correctly and checks if length matches. +// If the signature is not v6, the method ignores the salt. +// Use PrepareSign whenever possible instead of generating and +// hashing the salt externally. +// See RFC 9580 Section 5.2.4. +func (sig *Signature) SetSalt(salt []byte) error { + if sig.Version == 6 { + expectedSaltLength, err := SaltLengthForHash(sig.Hash) + if err != nil { + return err + } + if salt == nil || len(salt) != expectedSaltLength { + return errors.InvalidArgumentError("unexpected salt size for the given hash algorithm") + } + sig.salt = salt + } + return nil +} + +// PrepareVerify must be called to create a hash object before verifying v6 signatures. +// The created hash object initially hashes the internally stored salt. +// If the signature is not v6, the method returns an empty hash object. +// See RFC 9580 Section 5.2.4. +func (sig *Signature) PrepareVerify() (hash.Hash, error) { + if !sig.Hash.Available() { + return nil, errors.UnsupportedError("hash function") + } + hasher := sig.Hash.New() + if sig.Version == 6 { + if sig.salt == nil { + return nil, errors.StructuralError("v6 requires a salt for the hash to be signed") + } + hasher.Write(sig.salt) + } + return hasher, nil +} + // Sign signs a message with a private key. The hash, h, must contain // the hash of the message to be signed and will be mutated by this function. // On success, the signature is stored in sig. Call Serialize to write it out. @@ -686,6 +919,20 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e } sig.Version = priv.PublicKey.Version sig.IssuerFingerprint = priv.PublicKey.Fingerprint + if sig.Version < 6 && config.RandomizeSignaturesViaNotation() { + sig.removeNotationsWithName(SaltNotationName) + salt, err := SignatureSaltForHash(sig.Hash, config.Random()) + if err != nil { + return err + } + notation := Notation{ + Name: SaltNotationName, + Value: salt, + IsCritical: false, + IsHumanReadable: false, + } + sig.Notations = append(sig.Notations, ¬ation) + } sig.outSubpackets, err = sig.buildSubpackets(priv.PublicKey) if err != nil { return err @@ -715,8 +962,16 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e sig.DSASigS = new(encoding.MPI).SetBig(s) } case PubKeyAlgoECDSA: - sk := priv.PrivateKey.(*ecdsa.PrivateKey) - r, s, err := ecdsa.Sign(config.Random(), sk, digest) + var r, s *big.Int + if sk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { + r, s, err = ecdsa.Sign(config.Random(), sk, digest) + } else { + var b []byte + b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) + if err == nil { + r, s, err = unwrapECDSASig(b) + } + } if err == nil { sig.ECDSASigR = new(encoding.MPI).SetBig(r) @@ -729,6 +984,18 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e sig.EdDSASigR = encoding.NewMPI(r) sig.EdDSASigS = encoding.NewMPI(s) } + case PubKeyAlgoEd25519: + sk := priv.PrivateKey.(*ed25519.PrivateKey) + signature, err := ed25519.Sign(sk, digest) + if err == nil { + sig.EdSig = signature + } + case PubKeyAlgoEd448: + sk := priv.PrivateKey.(*ed448.PrivateKey) + signature, err := ed448.Sign(sk, digest) + if err == nil { + sig.EdSig = signature + } default: err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) } @@ -736,6 +1003,18 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e return } +// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA signature. +func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { + var ecsdaSig struct { + R, S *big.Int + } + _, err = asn1.Unmarshal(b, &ecsdaSig) + if err != nil { + return + } + return ecsdaSig.R, ecsdaSig.S, nil +} + // SignUserId computes a signature from priv, asserting that pub is a valid // key for the identity id. On success, the signature is stored in sig. Call // Serialize to write it out. @@ -744,11 +1023,32 @@ func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, co if priv.Dummy() { return errors.ErrDummyPrivateKey("dummy key found") } - h, err := userIdSignatureHash(id, pub, sig.Hash) + prepareHash, err := sig.PrepareSign(config) if err != nil { return err } - return sig.Sign(h, priv, config) + if err := userIdSignatureHash(id, pub, prepareHash); err != nil { + return err + } + return sig.Sign(prepareHash, priv, config) +} + +// SignDirectKeyBinding computes a signature from priv +// On success, the signature is stored in sig. +// Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignDirectKeyBinding(pub *PublicKey, priv *PrivateKey, config *Config) error { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + prepareHash, err := sig.PrepareSign(config) + if err != nil { + return err + } + if err := directKeySignatureHash(pub, prepareHash); err != nil { + return err + } + return sig.Sign(prepareHash, priv, config) } // CrossSignKey computes a signature from signingKey on pub hashed using hashKey. On success, @@ -756,7 +1056,11 @@ func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, co // If config is nil, sensible defaults will be used. func (sig *Signature) CrossSignKey(pub *PublicKey, hashKey *PublicKey, signingKey *PrivateKey, config *Config) error { - h, err := keySignatureHash(hashKey, pub, sig.Hash) + prepareHash, err := sig.PrepareSign(config) + if err != nil { + return err + } + h, err := keySignatureHash(hashKey, pub, prepareHash) if err != nil { return err } @@ -770,7 +1074,11 @@ func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) if priv.Dummy() { return errors.ErrDummyPrivateKey("dummy key found") } - h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) + prepareHash, err := sig.PrepareSign(config) + if err != nil { + return err + } + h, err := keySignatureHash(&priv.PublicKey, pub, prepareHash) if err != nil { return err } @@ -781,11 +1089,14 @@ func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) // stored in sig. Call Serialize to write it out. // If config is nil, sensible defaults will be used. func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := keyRevocationHash(pub, sig.Hash) + prepareHash, err := sig.PrepareSign(config) if err != nil { return err } - return sig.Sign(h, priv, config) + if err := keyRevocationHash(pub, prepareHash); err != nil { + return err + } + return sig.Sign(prepareHash, priv, config) } // RevokeSubkey computes a subkey revocation signature of pub using priv. @@ -802,7 +1113,7 @@ func (sig *Signature) Serialize(w io.Writer) (err error) { if len(sig.outSubpackets) == 0 { sig.outSubpackets = sig.rawSubpackets } - if sig.RSASignature == nil && sig.DSASigR == nil && sig.ECDSASigR == nil && sig.EdDSASigR == nil { + if sig.RSASignature == nil && sig.DSASigR == nil && sig.ECDSASigR == nil && sig.EdDSASigR == nil && sig.EdSig == nil { return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") } @@ -819,16 +1130,24 @@ func (sig *Signature) Serialize(w io.Writer) (err error) { case PubKeyAlgoEdDSA: sigLength = int(sig.EdDSASigR.EncodedLength()) sigLength += int(sig.EdDSASigS.EncodedLength()) + case PubKeyAlgoEd25519: + sigLength = ed25519.SignatureSize + case PubKeyAlgoEd448: + sigLength = ed448.SignatureSize default: panic("impossible") } + hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) - length := len(sig.HashSuffix) - 6 /* trailer not included */ + + length := 4 + /* length of version|signature type|public-key algorithm|hash algorithm */ + 2 /* length of hashed subpackets */ + hashedSubpacketsLen + 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + 2 /* hash tag */ + sigLength - if sig.Version == 5 { - length -= 4 // eight-octet instead of four-octet big endian + if sig.Version == 6 { + length += 4 + /* the two length fields are four-octet instead of two */ + 1 + /* salt length */ + len(sig.salt) /* length salt */ } err = serializeHeader(w, packetTypeSignature, length) if err != nil { @@ -842,18 +1161,41 @@ func (sig *Signature) Serialize(w io.Writer) (err error) { } func (sig *Signature) serializeBody(w io.Writer) (err error) { - hashedSubpacketsLen := uint16(uint16(sig.HashSuffix[4])<<8) | uint16(sig.HashSuffix[5]) - fields := sig.HashSuffix[:6+hashedSubpacketsLen] + var fields []byte + if sig.Version == 6 { + // v6 signatures use 4 octets for length + hashedSubpacketsLen := + uint32(uint32(sig.HashSuffix[4])<<24) | + uint32(uint32(sig.HashSuffix[5])<<16) | + uint32(uint32(sig.HashSuffix[6])<<8) | + uint32(sig.HashSuffix[7]) + fields = sig.HashSuffix[:8+hashedSubpacketsLen] + } else { + hashedSubpacketsLen := uint16(uint16(sig.HashSuffix[4])<<8) | + uint16(sig.HashSuffix[5]) + fields = sig.HashSuffix[:6+hashedSubpacketsLen] + + } _, err = w.Write(fields) if err != nil { return } unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) - unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) - unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) - unhashedSubpackets[1] = byte(unhashedSubpacketsLen) - serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) + var unhashedSubpackets []byte + if sig.Version == 6 { + unhashedSubpackets = make([]byte, 4+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 24) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen >> 16) + unhashedSubpackets[2] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[3] = byte(unhashedSubpacketsLen) + serializeSubpackets(unhashedSubpackets[4:], sig.outSubpackets, false) + } else { + unhashedSubpackets = make([]byte, 2+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen) + serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) + } _, err = w.Write(unhashedSubpackets) if err != nil { @@ -864,6 +1206,18 @@ func (sig *Signature) serializeBody(w io.Writer) (err error) { return } + if sig.Version == 6 { + // write salt for v6 signatures + _, err = w.Write([]byte{uint8(len(sig.salt))}) + if err != nil { + return + } + _, err = w.Write(sig.salt) + if err != nil { + return + } + } + switch sig.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: _, err = w.Write(sig.RSASignature.EncodedBytes()) @@ -882,6 +1236,10 @@ func (sig *Signature) serializeBody(w io.Writer) (err error) { return } _, err = w.Write(sig.EdDSASigS.EncodedBytes()) + case PubKeyAlgoEd25519: + err = ed25519.WriteSignature(w, sig.EdSig) + case PubKeyAlgoEd448: + err = ed448.WriteSignature(w, sig.EdSig) default: panic("impossible") } @@ -899,28 +1257,81 @@ type outputSubpacket struct { func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubpacket, err error) { creationTime := make([]byte, 4) binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) - subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) - - if sig.IssuerKeyId != nil && sig.Version == 4 { - keyId := make([]byte, 8) - binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) - subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) - } - if sig.IssuerFingerprint != nil { - contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...) - subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, sig.Version == 5, contents}) - } - if sig.SignerUserId != nil { - subpackets = append(subpackets, outputSubpacket{true, signerUserIdSubpacket, false, []byte(*sig.SignerUserId)}) - } + // Signature Creation Time + subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, true, creationTime}) + // Signature Expiration Time if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { sigLifetime := make([]byte, 4) binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) } - + // Trust Signature + if sig.TrustLevel != 0 { + subpackets = append(subpackets, outputSubpacket{true, trustSubpacket, true, []byte{byte(sig.TrustLevel), byte(sig.TrustAmount)}}) + } + // Regular Expression + if sig.TrustRegularExpression != nil { + // RFC specifies the string should be null-terminated; add a null byte to the end + subpackets = append(subpackets, outputSubpacket{true, regularExpressionSubpacket, true, []byte(*sig.TrustRegularExpression + "\000")}) + } + // Key Expiration Time + if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { + keyLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) + } + // Preferred Symmetric Ciphers for v1 SEIPD + if len(sig.PreferredSymmetric) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) + } + // Issuer Key ID + if sig.IssuerKeyId != nil && sig.Version == 4 { + keyId := make([]byte, 8) + binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) + subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, true, keyId}) + } + // Notation Data + for _, notation := range sig.Notations { + subpackets = append( + subpackets, + outputSubpacket{ + true, + notationDataSubpacket, + notation.IsCritical, + notation.getData(), + }) + } + // Preferred Hash Algorithms + if len(sig.PreferredHash) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) + } + // Preferred Compression Algorithms + if len(sig.PreferredCompression) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) + } + // Keyserver Preferences + // Keyserver preferences may only appear in self-signatures or certification signatures. + if sig.KeyserverPrefsValid { + var prefs byte + if sig.KeyserverPrefNoModify { + prefs |= KeyserverPrefNoModify + } + subpackets = append(subpackets, outputSubpacket{true, keyserverPrefsSubpacket, false, []byte{prefs}}) + } + // Preferred Keyserver + if len(sig.PreferredKeyserver) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefKeyserverSubpacket, false, []uint8(sig.PreferredKeyserver)}) + } + // Primary User ID + if sig.IsPrimaryId != nil && *sig.IsPrimaryId { + subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) + } + // Policy URI + if len(sig.PolicyURI) > 0 { + subpackets = append(subpackets, outputSubpacket{true, policyUriSubpacket, false, []uint8(sig.PolicyURI)}) + } + // Key Flags // Key flags may only appear in self-signatures or certification signatures. - if sig.FlagsValid { var flags byte if sig.FlagCertify { @@ -944,22 +1355,19 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp if sig.FlagGroupKey { flags |= KeyFlagGroupKey } - subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) + subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, true, []byte{flags}}) } - - for _, notation := range sig.Notations { - subpackets = append( - subpackets, - outputSubpacket{ - true, - notationDataSubpacket, - notation.IsCritical, - notation.getData(), - }) + // Signer's User ID + if sig.SignerUserId != nil { + subpackets = append(subpackets, outputSubpacket{true, signerUserIdSubpacket, false, []byte(*sig.SignerUserId)}) } - - // The following subpackets may only appear in self-signatures. - + // Reason for Revocation + // Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.31. + if sig.RevocationReason != nil { + subpackets = append(subpackets, outputSubpacket{true, reasonForRevocationSubpacket, true, + append([]uint8{uint8(*sig.RevocationReason)}, []uint8(sig.RevocationReasonText)...)}) + } + // Features var features = byte(0x00) if sig.SEIPDv1 { features |= 0x01 @@ -967,62 +1375,11 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp if sig.SEIPDv2 { features |= 0x08 } - if features != 0x00 { subpackets = append(subpackets, outputSubpacket{true, featuresSubpacket, false, []byte{features}}) } - - if sig.TrustLevel != 0 { - subpackets = append(subpackets, outputSubpacket{true, trustSubpacket, true, []byte{byte(sig.TrustLevel), byte(sig.TrustAmount)}}) - } - - if sig.TrustRegularExpression != nil { - // RFC specifies the string should be null-terminated; add a null byte to the end - subpackets = append(subpackets, outputSubpacket{true, regularExpressionSubpacket, true, []byte(*sig.TrustRegularExpression + "\000")}) - } - - if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { - keyLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) - } - - if sig.IsPrimaryId != nil && *sig.IsPrimaryId { - subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) - } - - if len(sig.PreferredSymmetric) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) - } - - if len(sig.PreferredHash) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) - } - - if len(sig.PreferredCompression) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) - } - - if len(sig.PolicyURI) > 0 { - subpackets = append(subpackets, outputSubpacket{true, policyUriSubpacket, false, []uint8(sig.PolicyURI)}) - } - - if len(sig.PreferredCipherSuites) > 0 { - serialized := make([]byte, len(sig.PreferredCipherSuites)*2) - for i, cipherSuite := range sig.PreferredCipherSuites { - serialized[2*i] = cipherSuite[0] - serialized[2*i+1] = cipherSuite[1] - } - subpackets = append(subpackets, outputSubpacket{true, prefCipherSuitesSubpacket, false, serialized}) - } - - // Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.23. - if sig.RevocationReason != nil { - subpackets = append(subpackets, outputSubpacket{true, reasonForRevocationSubpacket, true, - append([]uint8{uint8(*sig.RevocationReason)}, []uint8(sig.RevocationReasonText)...)}) - } - - // EmbeddedSignature appears only in subkeys capable of signing and is serialized as per section 5.2.3.26. + // Embedded Signature + // EmbeddedSignature appears only in subkeys capable of signing and is serialized as per section 5.2.3.34. if sig.EmbeddedSignature != nil { var buf bytes.Buffer err = sig.EmbeddedSignature.serializeBody(&buf) @@ -1031,7 +1388,31 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp } subpackets = append(subpackets, outputSubpacket{true, embeddedSignatureSubpacket, true, buf.Bytes()}) } - + // Issuer Fingerprint + if sig.IssuerFingerprint != nil { + contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...) + subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, sig.Version >= 5, contents}) + } + // Intended Recipient Fingerprint + for _, recipient := range sig.IntendedRecipients { + subpackets = append( + subpackets, + outputSubpacket{ + true, + intendedRecipientSubpacket, + false, + recipient.Serialize(), + }) + } + // Preferred AEAD Ciphersuites + if len(sig.PreferredCipherSuites) > 0 { + serialized := make([]byte, len(sig.PreferredCipherSuites)*2) + for i, cipherSuite := range sig.PreferredCipherSuites { + serialized[2*i] = cipherSuite[0] + serialized[2*i+1] = cipherSuite[1] + } + subpackets = append(subpackets, outputSubpacket{true, prefCipherSuitesSubpacket, false, serialized}) + } return } @@ -1073,8 +1454,6 @@ func (sig *Signature) AddMetadataToHashSuffix() { binary.BigEndian.PutUint32(buf[:], lit.Time) suffix.Write(buf[:]) - // Update the counter and restore trailing bytes - l = uint64(suffix.Len()) suffix.Write([]byte{0x05, 0xff}) suffix.Write([]byte{ uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), @@ -1082,3 +1461,49 @@ func (sig *Signature) AddMetadataToHashSuffix() { }) sig.HashSuffix = suffix.Bytes() } + +// SaltLengthForHash selects the required salt length for the given hash algorithm, +// as per Table 23 (Hash algorithm registry) of the crypto refresh. +// See RFC 9580 Section 9.5. +func SaltLengthForHash(hash crypto.Hash) (int, error) { + switch hash { + case crypto.SHA256, crypto.SHA224, crypto.SHA3_256: + return 16, nil + case crypto.SHA384: + return 24, nil + case crypto.SHA512, crypto.SHA3_512: + return 32, nil + default: + return 0, errors.UnsupportedError("hash function not supported for V6 signatures") + } +} + +// SignatureSaltForHash generates a random signature salt +// with the length for the given hash algorithm. +// See RFC 9580 Section 9.5. +func SignatureSaltForHash(hash crypto.Hash, randReader io.Reader) ([]byte, error) { + saltLength, err := SaltLengthForHash(hash) + if err != nil { + return nil, err + } + salt := make([]byte, saltLength) + _, err = io.ReadFull(randReader, salt) + if err != nil { + return nil, err + } + return salt, nil +} + +// removeNotationsWithName removes all notations in this signature with the given name. +func (sig *Signature) removeNotationsWithName(name string) { + if sig == nil || sig.Notations == nil { + return + } + updatedNotations := make([]*Notation, 0, len(sig.Notations)) + for _, notation := range sig.Notations { + if notation.Name != name { + updatedNotations = append(updatedNotations, notation) + } + } + sig.Notations = updatedNotations +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go index bac2b132e..2812a1db8 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go @@ -7,11 +7,13 @@ package packet import ( "bytes" "crypto/cipher" + "crypto/sha256" "io" "strconv" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/s2k" + "golang.org/x/crypto/hkdf" ) // This is the largest session key that we'll support. Since at most 256-bit cipher @@ -39,10 +41,21 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { return err } ske.Version = int(buf[0]) - if ske.Version != 4 && ske.Version != 5 { + if ske.Version != 4 && ske.Version != 5 && ske.Version != 6 { return errors.UnsupportedError("unknown SymmetricKeyEncrypted version") } + if V5Disabled && ske.Version == 5 { + return errors.UnsupportedError("support for parsing v5 entities is disabled; build with `-tags v5` if needed") + } + + if ske.Version > 5 { + // Scalar octet count + if _, err := readFull(r, buf[:]); err != nil { + return err + } + } + // Cipher function if _, err := readFull(r, buf[:]); err != nil { return err @@ -52,7 +65,7 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[0]))) } - if ske.Version == 5 { + if ske.Version >= 5 { // AEAD mode if _, err := readFull(r, buf[:]); err != nil { return errors.StructuralError("cannot read AEAD octet from packet") @@ -60,6 +73,13 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { ske.Mode = AEADMode(buf[0]) } + if ske.Version > 5 { + // Scalar octet count + if _, err := readFull(r, buf[:]); err != nil { + return err + } + } + var err error if ske.s2k, err = s2k.Parse(r); err != nil { if _, ok := err.(errors.ErrDummyPrivateKey); ok { @@ -68,7 +88,7 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { return err } - if ske.Version == 5 { + if ske.Version >= 5 { // AEAD IV iv := make([]byte, ske.Mode.IvLength()) _, err := readFull(r, iv) @@ -109,8 +129,8 @@ func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunc case 4: plaintextKey, cipherFunc, err := ske.decryptV4(key) return plaintextKey, cipherFunc, err - case 5: - plaintextKey, err := ske.decryptV5(key) + case 5, 6: + plaintextKey, err := ske.aeadDecrypt(ske.Version, key) return plaintextKey, CipherFunction(0), err } err := errors.UnsupportedError("unknown SymmetricKeyEncrypted version") @@ -136,9 +156,9 @@ func (ske *SymmetricKeyEncrypted) decryptV4(key []byte) ([]byte, CipherFunction, return plaintextKey, cipherFunc, nil } -func (ske *SymmetricKeyEncrypted) decryptV5(key []byte) ([]byte, error) { - adata := []byte{0xc3, byte(5), byte(ske.CipherFunc), byte(ske.Mode)} - aead := getEncryptedKeyAeadInstance(ske.CipherFunc, ske.Mode, key, adata) +func (ske *SymmetricKeyEncrypted) aeadDecrypt(version int, key []byte) ([]byte, error) { + adata := []byte{0xc3, byte(version), byte(ske.CipherFunc), byte(ske.Mode)} + aead := getEncryptedKeyAeadInstance(ske.CipherFunc, ske.Mode, key, adata, version) plaintextKey, err := aead.Open(nil, ske.iv, ske.encryptedKey, adata) if err != nil { @@ -175,10 +195,22 @@ func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Conf // the given passphrase. The returned session key must be passed to // SerializeSymmetricallyEncrypted. // If config is nil, sensible defaults will be used. +// Deprecated: Use SerializeSymmetricKeyEncryptedAEADReuseKey instead. func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, passphrase []byte, config *Config) (err error) { + return SerializeSymmetricKeyEncryptedAEADReuseKey(w, sessionKey, passphrase, config.AEAD() != nil, config) +} + +// SerializeSymmetricKeyEncryptedAEADReuseKey serializes a symmetric key packet to w. +// The packet contains the given session key, encrypted by a key derived from +// the given passphrase. The returned session key must be passed to +// SerializeSymmetricallyEncrypted. +// If aeadSupported is set, SKESK v6 is used, otherwise v4. +// Note: aeadSupported MUST match the value passed to SerializeSymmetricallyEncrypted. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricKeyEncryptedAEADReuseKey(w io.Writer, sessionKey []byte, passphrase []byte, aeadSupported bool, config *Config) (err error) { var version int - if config.AEAD() != nil { - version = 5 + if aeadSupported { + version = 6 } else { version = 4 } @@ -203,11 +235,15 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass switch version { case 4: packetLength = 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize - case 5: + case 5, 6: ivLen := config.AEAD().Mode().IvLength() tagLen := config.AEAD().Mode().TagLength() packetLength = 3 + len(s2kBytes) + ivLen + keySize + tagLen } + if version > 5 { + packetLength += 2 // additional octet count fields + } + err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) if err != nil { return @@ -216,13 +252,22 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass // Symmetric Key Encrypted Version buf := []byte{byte(version)} + if version > 5 { + // Scalar octet count + buf = append(buf, byte(3+len(s2kBytes)+config.AEAD().Mode().IvLength())) + } + // Cipher function buf = append(buf, byte(cipherFunc)) - if version == 5 { + if version >= 5 { // AEAD mode buf = append(buf, byte(config.AEAD().Mode())) } + if version > 5 { + // Scalar octet count + buf = append(buf, byte(len(s2kBytes))) + } _, err = w.Write(buf) if err != nil { return @@ -243,10 +288,10 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass if err != nil { return } - case 5: + case 5, 6: mode := config.AEAD().Mode() - adata := []byte{0xc3, byte(5), byte(cipherFunc), byte(mode)} - aead := getEncryptedKeyAeadInstance(cipherFunc, mode, keyEncryptingKey, adata) + adata := []byte{0xc3, byte(version), byte(cipherFunc), byte(mode)} + aead := getEncryptedKeyAeadInstance(cipherFunc, mode, keyEncryptingKey, adata, version) // Sample iv using random reader iv := make([]byte, config.AEAD().Mode().IvLength()) @@ -270,7 +315,17 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass return } -func getEncryptedKeyAeadInstance(c CipherFunction, mode AEADMode, inputKey, associatedData []byte) (aead cipher.AEAD) { - blockCipher := c.new(inputKey) +func getEncryptedKeyAeadInstance(c CipherFunction, mode AEADMode, inputKey, associatedData []byte, version int) (aead cipher.AEAD) { + var blockCipher cipher.Block + if version > 5 { + hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, associatedData) + + encryptionKey := make([]byte, c.KeySize()) + _, _ = readFull(hkdfReader, encryptionKey) + + blockCipher = c.new(encryptionKey) + } else { + blockCipher = c.new(inputKey) + } return mode.new(blockCipher) } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go index e9bbf0327..0e898742c 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go @@ -74,6 +74,10 @@ func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.Read // SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet // to w and returns a WriteCloser to which the to-be-encrypted packets can be // written. +// If aeadSupported is set to true, SEIPDv2 is used with the indicated CipherSuite. +// Otherwise, SEIPDv1 is used with the indicated CipherFunction. +// Note: aeadSupported MUST match the value passed to SerializeEncryptedKeyAEAD +// and/or SerializeSymmetricKeyEncryptedAEADReuseKey. // If config is nil, sensible defaults will be used. func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, aeadSupported bool, cipherSuite CipherSuite, key []byte, config *Config) (Contents io.WriteCloser, err error) { writeCloser := noOpCloser{w} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go index e96252c19..3ddc4fe4a 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go @@ -7,7 +7,9 @@ package packet import ( "crypto/cipher" "crypto/sha256" + "fmt" "io" + "strconv" "github.com/ProtonMail/go-crypto/openpgp/errors" "golang.org/x/crypto/hkdf" @@ -25,19 +27,19 @@ func (se *SymmetricallyEncrypted) parseAead(r io.Reader) error { se.Cipher = CipherFunction(headerData[0]) // cipherFunc must have block size 16 to use AEAD if se.Cipher.blockSize() != 16 { - return errors.UnsupportedError("invalid aead cipher: " + string(se.Cipher)) + return errors.UnsupportedError("invalid aead cipher: " + strconv.Itoa(int(se.Cipher))) } // Mode se.Mode = AEADMode(headerData[1]) if se.Mode.TagLength() == 0 { - return errors.UnsupportedError("unknown aead mode: " + string(se.Mode)) + return errors.UnsupportedError("unknown aead mode: " + strconv.Itoa(int(se.Mode))) } // Chunk size se.ChunkSizeByte = headerData[2] if se.ChunkSizeByte > 16 { - return errors.UnsupportedError("invalid aead chunk size byte: " + string(se.ChunkSizeByte)) + return errors.UnsupportedError("invalid aead chunk size byte: " + strconv.Itoa(int(se.ChunkSizeByte))) } // Salt @@ -62,11 +64,16 @@ func (se *SymmetricallyEncrypted) associatedData() []byte { // decryptAead decrypts a V2 SEIPD packet (AEAD) as specified in // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, error) { - aead, nonce := getSymmetricallyEncryptedAeadInstance(se.Cipher, se.Mode, inputKey, se.Salt[:], se.associatedData()) + if se.Cipher.KeySize() != len(inputKey) { + return nil, errors.StructuralError(fmt.Sprintf("invalid session key length for cipher: got %d bytes, but expected %d bytes", len(inputKey), se.Cipher.KeySize())) + } + aead, nonce := getSymmetricallyEncryptedAeadInstance(se.Cipher, se.Mode, inputKey, se.Salt[:], se.associatedData()) // Carry the first tagLen bytes + chunkSize := decodeAEADChunkSize(se.ChunkSizeByte) tagLen := se.Mode.TagLength() - peekedBytes := make([]byte, tagLen) + chunkBytes := make([]byte, chunkSize+tagLen*2) + peekedBytes := chunkBytes[chunkSize+tagLen:] n, err := io.ReadFull(se.Contents, peekedBytes) if n < tagLen || (err != nil && err != io.EOF) { return nil, errors.StructuralError("not enough data to decrypt:" + err.Error()) @@ -76,12 +83,13 @@ func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, e aeadCrypter: aeadCrypter{ aead: aead, chunkSize: decodeAEADChunkSize(se.ChunkSizeByte), - initialNonce: nonce, + nonce: nonce, associatedData: se.associatedData(), - chunkIndex: make([]byte, 8), + chunkIndex: nonce[len(nonce)-8:], packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected, }, reader: se.Contents, + chunkBytes: chunkBytes, peekedBytes: peekedBytes, }, nil } @@ -115,7 +123,7 @@ func serializeSymmetricallyEncryptedAead(ciphertext io.WriteCloser, cipherSuite // Random salt salt := make([]byte, aeadSaltSize) - if _, err := rand.Read(salt); err != nil { + if _, err := io.ReadFull(rand, salt); err != nil { return nil, err } @@ -125,16 +133,20 @@ func serializeSymmetricallyEncryptedAead(ciphertext io.WriteCloser, cipherSuite aead, nonce := getSymmetricallyEncryptedAeadInstance(cipherSuite.Cipher, cipherSuite.Mode, inputKey, salt, prefix) + chunkSize := decodeAEADChunkSize(chunkSizeByte) + tagLen := aead.Overhead() + chunkBytes := make([]byte, chunkSize+tagLen) return &aeadEncrypter{ aeadCrypter: aeadCrypter{ aead: aead, - chunkSize: decodeAEADChunkSize(chunkSizeByte), + chunkSize: chunkSize, associatedData: prefix, - chunkIndex: make([]byte, 8), - initialNonce: nonce, + nonce: nonce, + chunkIndex: nonce[len(nonce)-8:], packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected, }, - writer: ciphertext, + writer: ciphertext, + chunkBytes: chunkBytes, }, nil } @@ -144,10 +156,10 @@ func getSymmetricallyEncryptedAeadInstance(c CipherFunction, mode AEADMode, inpu encryptionKey := make([]byte, c.KeySize()) _, _ = readFull(hkdfReader, encryptionKey) - // Last 64 bits of nonce are the counter - nonce = make([]byte, mode.IvLength()-8) + nonce = make([]byte, mode.IvLength()) - _, _ = readFull(hkdfReader, nonce) + // Last 64 bits of nonce are the counter + _, _ = readFull(hkdfReader, nonce[:len(nonce)-8]) blockCipher := c.new(encryptionKey) aead = mode.new(blockCipher) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go index fa26bebe3..8b1862368 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go @@ -148,7 +148,7 @@ const mdcPacketTagByte = byte(0x80) | 0x40 | 19 func (ser *seMDCReader) Close() error { if ser.error { - return errors.ErrMDCMissing + return errors.ErrMDCHashMismatch } for !ser.eof { @@ -159,7 +159,7 @@ func (ser *seMDCReader) Close() error { break } if err != nil { - return errors.ErrMDCMissing + return errors.ErrMDCHashMismatch } } @@ -172,7 +172,7 @@ func (ser *seMDCReader) Close() error { // The hash already includes the MDC header, but we still check its value // to confirm encryption correctness if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { - return errors.ErrMDCMissing + return errors.ErrMDCHashMismatch } return nil } @@ -237,9 +237,9 @@ func serializeSymmetricallyEncryptedMdc(ciphertext io.WriteCloser, c CipherFunct block := c.new(key) blockSize := block.BlockSize() iv := make([]byte, blockSize) - _, err = config.Random().Read(iv) + _, err = io.ReadFull(config.Random(), iv) if err != nil { - return + return nil, err } s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) _, err = ciphertext.Write(prefix) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go index 88ec72c6c..63814ed13 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go @@ -9,7 +9,6 @@ import ( "image" "image/jpeg" "io" - "io/ioutil" ) const UserAttrImageSubpacket = 1 @@ -63,7 +62,7 @@ func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { func (uat *UserAttribute) parse(r io.Reader) (err error) { // RFC 4880, section 5.13 - b, err := ioutil.ReadAll(r) + b, err := io.ReadAll(r) if err != nil { return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go index 614fbafd5..3c7451a3c 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go @@ -6,7 +6,6 @@ package packet import ( "io" - "io/ioutil" "strings" ) @@ -66,7 +65,7 @@ func NewUserId(name, comment, email string) *UserId { func (uid *UserId) parse(r io.Reader) (err error) { // RFC 4880, section 5.11 - b, err := ioutil.ReadAll(r) + b, err := io.ReadAll(r) if err != nil { return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go index 8499c7379..e6dd9b5fd 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go @@ -46,6 +46,7 @@ type MessageDetails struct { DecryptedWith Key // the private key used to decrypt the message, if any. IsSigned bool // true if the message is signed. SignedByKeyId uint64 // the key id of the signer, if any. + SignedByFingerprint []byte // the key fingerprint of the signer, if any. SignedBy *Key // the key of the signer, if available. LiteralData *packet.LiteralData // the metadata of the contents UnverifiedBody io.Reader // the contents of the message. @@ -117,7 +118,7 @@ ParsePackets: // This packet contains the decryption key encrypted to a public key. md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) switch p.Algo { - case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH: + case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH, packet.PubKeyAlgoX25519, packet.PubKeyAlgoX448: break default: continue @@ -232,7 +233,7 @@ FindKey: } mdFinal, sensitiveParsingErr := readSignedMessage(packets, md, keyring, config) if sensitiveParsingErr != nil { - return nil, errors.StructuralError("parsing error") + return nil, errors.HandleSensitiveParsingError(sensitiveParsingErr, md.decrypted != nil) } return mdFinal, nil } @@ -270,13 +271,17 @@ FindLiteralData: prevLast = true } - h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) + h, wrappedHash, err = hashForSignature(p.Hash, p.SigType, p.Salt) if err != nil { md.SignatureError = err } md.IsSigned = true + if p.Version == 6 { + md.SignedByFingerprint = p.KeyFingerprint + } md.SignedByKeyId = p.KeyId + if keyring != nil { keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) if len(keys) > 0 { @@ -292,7 +297,7 @@ FindLiteralData: if md.IsSigned && md.SignatureError == nil { md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md, config} } else if md.decrypted != nil { - md.UnverifiedBody = checkReader{md} + md.UnverifiedBody = &checkReader{md, false} } else { md.UnverifiedBody = md.LiteralData.Body } @@ -300,12 +305,22 @@ FindLiteralData: return md, nil } +func wrapHashForSignature(hashFunc hash.Hash, sigType packet.SignatureType) (hash.Hash, error) { + switch sigType { + case packet.SigTypeBinary: + return hashFunc, nil + case packet.SigTypeText: + return NewCanonicalTextHash(hashFunc), nil + } + return nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) +} + // hashForSignature returns a pair of hashes that can be used to verify a // signature. The signature may specify that the contents of the signed message // should be preprocessed (i.e. to normalize line endings). Thus this function // returns two hashes. The second should be used to hash the message itself and // performs any needed preprocessing. -func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { +func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType, sigSalt []byte) (hash.Hash, hash.Hash, error) { if _, ok := algorithm.HashToHashIdWithSha1(hashFunc); !ok { return nil, nil, errors.UnsupportedError("unsupported hash function") } @@ -313,14 +328,19 @@ func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType) (hash. return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashFunc))) } h := hashFunc.New() - + if sigSalt != nil { + h.Write(sigSalt) + } + wrappedHash, err := wrapHashForSignature(h, sigType) + if err != nil { + return nil, nil, err + } switch sigType { case packet.SigTypeBinary: - return h, h, nil + return h, wrappedHash, nil case packet.SigTypeText: - return h, NewCanonicalTextHash(h), nil + return h, wrappedHash, nil } - return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) } @@ -328,21 +348,27 @@ func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType) (hash. // it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger // MDC checks. type checkReader struct { - md *MessageDetails + md *MessageDetails + checked bool } -func (cr checkReader) Read(buf []byte) (int, error) { +func (cr *checkReader) Read(buf []byte) (int, error) { n, sensitiveParsingError := cr.md.LiteralData.Body.Read(buf) if sensitiveParsingError == io.EOF { + if cr.checked { + // Only check once + return n, io.EOF + } mdcErr := cr.md.decrypted.Close() if mdcErr != nil { return n, mdcErr } + cr.checked = true return n, io.EOF } if sensitiveParsingError != nil { - return n, errors.StructuralError("parsing error") + return n, errors.HandleSensitiveParsingError(sensitiveParsingError, true) } return n, nil @@ -366,6 +392,7 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) { scr.wrappedHash.Write(buf[:n]) } + readsDecryptedData := scr.md.decrypted != nil if sensitiveParsingError == io.EOF { var p packet.Packet var readError error @@ -384,7 +411,7 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) { key := scr.md.SignedBy signatureError := key.PublicKey.VerifySignature(scr.h, sig) if signatureError == nil { - signatureError = checkSignatureDetails(key, sig, scr.config) + signatureError = checkMessageSignatureDetails(key, sig, scr.config) } scr.md.Signature = sig scr.md.SignatureError = signatureError @@ -408,16 +435,15 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) { // unsigned hash of its own. In order to check this we need to // close that Reader. if scr.md.decrypted != nil { - mdcErr := scr.md.decrypted.Close() - if mdcErr != nil { - return n, mdcErr + if sensitiveParsingError := scr.md.decrypted.Close(); sensitiveParsingError != nil { + return n, errors.HandleSensitiveParsingError(sensitiveParsingError, true) } } return n, io.EOF } if sensitiveParsingError != nil { - return n, errors.StructuralError("parsing error") + return n, errors.HandleSensitiveParsingError(sensitiveParsingError, readsDecryptedData) } return n, nil @@ -428,14 +454,13 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) { // if any, and a possible signature verification error. // If the signer isn't known, ErrUnknownIssuer is returned. func VerifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { - var expectedHashes []crypto.Hash - return verifyDetachedSignature(keyring, signed, signature, expectedHashes, config) + return verifyDetachedSignature(keyring, signed, signature, nil, false, config) } // VerifyDetachedSignatureAndHash performs the same actions as // VerifyDetachedSignature and checks that the expected hash functions were used. func VerifyDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { - return verifyDetachedSignature(keyring, signed, signature, expectedHashes, config) + return verifyDetachedSignature(keyring, signed, signature, expectedHashes, true, config) } // CheckDetachedSignature takes a signed file and a detached signature and @@ -443,25 +468,24 @@ func VerifyDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader // signature verification error. If the signer isn't known, // ErrUnknownIssuer is returned. func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { - var expectedHashes []crypto.Hash - return CheckDetachedSignatureAndHash(keyring, signed, signature, expectedHashes, config) + _, signer, err = verifyDetachedSignature(keyring, signed, signature, nil, false, config) + return } // CheckDetachedSignatureAndHash performs the same actions as // CheckDetachedSignature and checks that the expected hash functions were used. func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (signer *Entity, err error) { - _, signer, err = verifyDetachedSignature(keyring, signed, signature, expectedHashes, config) + _, signer, err = verifyDetachedSignature(keyring, signed, signature, expectedHashes, true, config) return } -func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { +func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, checkHashes bool, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { var issuerKeyId uint64 var hashFunc crypto.Hash var sigType packet.SignatureType var keys []Key var p packet.Packet - expectedHashesLen := len(expectedHashes) packets := packet.NewReader(signature) for { p, err = packets.Next() @@ -483,16 +507,19 @@ func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expec issuerKeyId = *sig.IssuerKeyId hashFunc = sig.Hash sigType = sig.SigType - - for i, expectedHash := range expectedHashes { - if hashFunc == expectedHash { - break + if checkHashes { + matchFound := false + // check for hashes + for _, expectedHash := range expectedHashes { + if hashFunc == expectedHash { + matchFound = true + break + } } - if i+1 == expectedHashesLen { - return nil, nil, errors.StructuralError("hash algorithm mismatch with cleartext message headers") + if !matchFound { + return nil, nil, errors.StructuralError("hash algorithm or salt mismatch with cleartext message headers") } } - keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) if len(keys) > 0 { break @@ -503,7 +530,11 @@ func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expec panic("unreachable") } - h, wrappedHash, err := hashForSignature(hashFunc, sigType) + h, err := sig.PrepareVerify() + if err != nil { + return nil, nil, err + } + wrappedHash, err := wrapHashForSignature(h, sigType) if err != nil { return nil, nil, err } @@ -515,7 +546,7 @@ func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expec for _, key := range keys { err = key.PublicKey.VerifySignature(h, sig) if err == nil { - return sig, key.Entity, checkSignatureDetails(&key, sig, config) + return sig, key.Entity, checkMessageSignatureDetails(&key, sig, config) } } @@ -533,7 +564,7 @@ func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader, return CheckDetachedSignature(keyring, signed, body, config) } -// checkSignatureDetails returns an error if: +// checkMessageSignatureDetails returns an error if: // - The signature (or one of the binding signatures mentioned below) // has a unknown critical notation data subpacket // - The primary key of the signing entity is revoked @@ -551,15 +582,11 @@ func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader, // NOTE: The order of these checks is important, as the caller may choose to // ignore ErrSignatureExpired or ErrKeyExpired errors, but should never // ignore any other errors. -// -// TODO: Also return an error if: -// - The primary key is expired according to a direct-key signature -// - (For V5 keys only:) The direct-key signature (exists and) is expired -func checkSignatureDetails(key *Key, signature *packet.Signature, config *packet.Config) error { +func checkMessageSignatureDetails(key *Key, signature *packet.Signature, config *packet.Config) error { now := config.Now() - primaryIdentity := key.Entity.PrimaryIdentity() + primarySelfSignature, primaryIdentity := key.Entity.PrimarySelfSignature() signedBySubKey := key.PublicKey != key.Entity.PrimaryKey - sigsToCheck := []*packet.Signature{signature, primaryIdentity.SelfSignature} + sigsToCheck := []*packet.Signature{signature, primarySelfSignature} if signedBySubKey { sigsToCheck = append(sigsToCheck, key.SelfSignature, key.SelfSignature.EmbeddedSignature) } @@ -572,10 +599,10 @@ func checkSignatureDetails(key *Key, signature *packet.Signature, config *packet } if key.Entity.Revoked(now) || // primary key is revoked (signedBySubKey && key.Revoked(now)) || // subkey is revoked - primaryIdentity.Revoked(now) { // primary identity is revoked + (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // primary identity is revoked for v4 return errors.ErrKeyRevoked } - if key.Entity.PrimaryKey.KeyExpired(primaryIdentity.SelfSignature, now) { // primary key is expired + if key.Entity.PrimaryKey.KeyExpired(primarySelfSignature, now) { // primary key is expired return errors.ErrKeyExpired } if signedBySubKey { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go index db6dad5c0..670d60226 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go @@ -26,6 +26,8 @@ const testKeys1And2PrivateHex = "9501d8044d3c5c10010400b1d13382944bd5aba23a43129 const dsaElGamalTestKeysHex = "9501e1044dfcb16a110400aa3e5c1a1f43dd28c2ffae8abf5cfce555ee874134d8ba0a0f7b868ce2214beddc74e5e1e21ded354a95d18acdaf69e5e342371a71fbb9093162e0c5f3427de413a7f2c157d83f5cd2f9d791256dc4f6f0e13f13c3302af27f2384075ab3021dff7a050e14854bbde0a1094174855fc02f0bae8e00a340d94a1f22b32e48485700a0cec672ac21258fb95f61de2ce1af74b2c4fa3e6703ff698edc9be22c02ae4d916e4fa223f819d46582c0516235848a77b577ea49018dcd5e9e15cff9dbb4663a1ae6dd7580fa40946d40c05f72814b0f88481207e6c0832c3bded4853ebba0a7e3bd8e8c66df33d5a537cd4acf946d1080e7a3dcea679cb2b11a72a33a2b6a9dc85f466ad2ddf4c3db6283fa645343286971e3dd700703fc0c4e290d45767f370831a90187e74e9972aae5bff488eeff7d620af0362bfb95c1a6c3413ab5d15a2e4139e5d07a54d72583914661ed6a87cce810be28a0aa8879a2dd39e52fb6fe800f4f181ac7e328f740cde3d09a05cecf9483e4cca4253e60d4429ffd679d9996a520012aad119878c941e3cf151459873bdfc2a9563472fe0303027a728f9feb3b864260a1babe83925ce794710cfd642ee4ae0e5b9d74cee49e9c67b6cd0ea5dfbb582132195a121356a1513e1bca73e5b80c58c7ccb4164453412f456c47616d616c2054657374204b65792031886204131102002205024dfcb16a021b03060b090807030206150802090a0b0416020301021e01021780000a091033af447ccd759b09fadd00a0b8fd6f5a790bad7e9f2dbb7632046dc4493588db009c087c6a9ba9f7f49fab221587a74788c00db4889ab00200009d0157044dfcb16a1004008dec3f9291205255ccff8c532318133a6840739dd68b03ba942676f9038612071447bf07d00d559c5c0875724ea16a4c774f80d8338b55fca691a0522e530e604215b467bbc9ccfd483a1da99d7bc2648b4318fdbd27766fc8bfad3fddb37c62b8ae7ccfe9577e9b8d1e77c1d417ed2c2ef02d52f4da11600d85d3229607943700030503ff506c94c87c8cab778e963b76cf63770f0a79bf48fb49d3b4e52234620fc9f7657f9f8d56c96a2b7c7826ae6b57ebb2221a3fe154b03b6637cea7e6d98e3e45d87cf8dc432f723d3d71f89c5192ac8d7290684d2c25ce55846a80c9a7823f6acd9bb29fa6cd71f20bc90eccfca20451d0c976e460e672b000df49466408d527affe0303027a728f9feb3b864260abd761730327bca2aaa4ea0525c175e92bf240682a0e83b226f97ecb2e935b62c9a133858ce31b271fa8eb41f6a1b3cd72a63025ce1a75ee4180dcc284884904181102000905024dfcb16a021b0c000a091033af447ccd759b09dd0b009e3c3e7296092c81bee5a19929462caaf2fff3ae26009e218c437a2340e7ea628149af1ec98ec091a43992b00200009501e1044dfcb1be1104009f61faa61aa43df75d128cbe53de528c4aec49ce9360c992e70c77072ad5623de0a3a6212771b66b39a30dad6781799e92608316900518ec01184a85d872365b7d2ba4bacfb5882ea3c2473d3750dc6178cc1cf82147fb58caa28b28e9f12f6d1efcb0534abed644156c91cca4ab78834268495160b2400bc422beb37d237c2300a0cac94911b6d493bda1e1fbc6feeca7cb7421d34b03fe22cec6ccb39675bb7b94a335c2b7be888fd3906a1125f33301d8aa6ec6ee6878f46f73961c8d57a3e9544d8ef2a2cbfd4d52da665b1266928cfe4cb347a58c412815f3b2d2369dec04b41ac9a71cc9547426d5ab941cccf3b18575637ccfb42df1a802df3cfe0a999f9e7109331170e3a221991bf868543960f8c816c28097e503fe319db10fb98049f3a57d7c80c420da66d56f3644371631fad3f0ff4040a19a4fedc2d07727a1b27576f75a4d28c47d8246f27071e12d7a8de62aad216ddbae6aa02efd6b8a3e2818cda48526549791ab277e447b3a36c57cefe9b592f5eab73959743fcc8e83cbefec03a329b55018b53eec196765ae40ef9e20521a603c551efe0303020950d53a146bf9c66034d00c23130cce95576a2ff78016ca471276e8227fb30b1ffbd92e61804fb0c3eff9e30b1a826ee8f3e4730b4d86273ca977b4164453412f456c47616d616c2054657374204b65792032886204131102002205024dfcb1be021b03060b090807030206150802090a0b0416020301021e01021780000a0910a86bf526325b21b22bd9009e34511620415c974750a20df5cb56b182f3b48e6600a0a9466cb1a1305a84953445f77d461593f1d42bc1b00200009d0157044dfcb1be1004009565a951da1ee87119d600c077198f1c1bceb0f7aa54552489298e41ff788fa8f0d43a69871f0f6f77ebdfb14a4260cf9fbeb65d5844b4272a1904dd95136d06c3da745dc46327dd44a0f16f60135914368c8039a34033862261806bb2c5ce1152e2840254697872c85441ccb7321431d75a747a4bfb1d2c66362b51ce76311700030503fc0ea76601c196768070b7365a200e6ddb09307f262d5f39eec467b5f5784e22abdf1aa49226f59ab37cb49969d8f5230ea65caf56015abda62604544ed526c5c522bf92bed178a078789f6c807b6d34885688024a5bed9e9f8c58d11d4b82487b44c5f470c5606806a0443b79cadb45e0f897a561a53f724e5349b9267c75ca17fe0303020950d53a146bf9c660bc5f4ce8f072465e2d2466434320c1e712272fafc20e342fe7608101580fa1a1a367e60486a7cd1246b7ef5586cf5e10b32762b710a30144f12dd17dd4884904181102000905024dfcb1be021b0c000a0910a86bf526325b21b2904c00a0b2b66b4b39ccffda1d10f3ea8d58f827e30a8b8e009f4255b2d8112a184e40cde43a34e8655ca7809370b0020000" +const ed25519wX25519Key = "c54b0663877fe31b00000020f94da7bb48d60a61e567706a6587d0331999bb9d891a08242ead84543df895a3001972817b12be707e8d5f586ce61361201d344eb266a2c82fde6835762b65b0b7c2b1061f1b0a00000042058263877fe3030b090705150a0e080c021600029b03021e09222106cb186c4f0609a697e4d52dfa6c722b0c1f1e27c18a56708f6525ec27bad9acc905270902070200000000ad2820103e2d7d227ec0e6d7ce4471db36bfc97083253690271498a7ef0576c07faae14585b3b903b0127ec4fda2f023045a2ec76bcb4f9571a9651e14aee1137a1d668442c88f951e33c4ffd33fb9a17d511eed758fc6d9cc50cb5fd793b2039d5804c74b0663877fe319000000208693248367f9e5015db922f8f48095dda784987f2d5985b12fbad16caf5e4435004d600a4f794d44775c57a26e0feefed558e9afffd6ad0d582d57fb2ba2dcedb8c29b06181b0a0000002c050263877fe322a106cb186c4f0609a697e4d52dfa6c722b0c1f1e27c18a56708f6525ec27bad9acc9021b0c00000000defa20a6e9186d9d5935fc8fe56314cdb527486a5a5120f9b762a235a729f039010a56b89c658568341fbef3b894e9834ad9bc72afae2f4c9c47a43855e65f1cb0a3f77bbc5f61085c1f8249fe4e7ca59af5f0bcee9398e0fa8d76e522e1d8ab42bb0d" + const signedMessageHex = "a3019bc0cbccc0c4b8d8b74ee2108fe16ec6d3ca490cbe362d3f8333d3f352531472538b8b13d353b97232f352158c20943157c71c16064626063656269052062e4e01987e9b6fccff4b7df3a34c534b23e679cbec3bc0f8f6e64dfb4b55fe3f8efa9ce110ddb5cd79faf1d753c51aecfa669f7e7aa043436596cccc3359cb7dd6bbe9ecaa69e5989d9e57209571edc0b2fa7f57b9b79a64ee6e99ce1371395fee92fec2796f7b15a77c386ff668ee27f6d38f0baa6c438b561657377bf6acff3c5947befd7bf4c196252f1d6e5c524d0300" const signedTextMessageHex = "a3019bc0cbccc8c4b8d8b74ee2108fe16ec6d36a250cbece0c178233d3f352531472538b8b13d35379b97232f352158ca0b4312f57c71c1646462606365626906a062e4e019811591798ff99bf8afee860b0d8a8c2a85c3387e3bcf0bb3b17987f2bbcfab2aa526d930cbfd3d98757184df3995c9f3e7790e36e3e9779f06089d4c64e9e47dd6202cb6e9bc73c5d11bb59fbaf89d22d8dc7cf199ddf17af96e77c5f65f9bbed56f427bd8db7af37f6c9984bf9385efaf5f184f986fb3e6adb0ecfe35bbf92d16a7aa2a344fb0bc52fb7624f0200" @@ -160,18 +162,78 @@ TcIYl5/Uyoi+FOvPLcNw4hOv2nwUzSSVAw== =IiS2 -----END PGP PRIVATE KEY BLOCK-----` -// Generated with the above private key -const v5PrivKeyMsg = `-----BEGIN PGP MESSAGE----- -Version: OpenPGP.js v4.10.7 -Comment: https://openpgpjs.org +// See OpenPGP crypto refresh Section A.3. +const v6PrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- -xA0DAQoWGTR7yYckZAIByxF1B21zZy50eHRfbIGSdGVzdMJ3BQEWCgAGBQJf -bIGSACMiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVDQvAP9G -y29VPonFXqi2zKkpZrvyvZxg+n5e8Nt9wNbuxeCd3QD/TtO2s+JvjrE4Siwv -UQdl5MlBka1QSNbMq2Bz7XwNPg4= -=6lbM +xUsGY4d/4xsAAAAg+U2nu0jWCmHlZ3BqZYfQMxmZu52JGggkLq2EVD34laMAGXKB +exK+cH6NX1hs5hNhIB00TrJmosgv3mg1ditlsLfCsQYfGwoAAABCBYJjh3/jAwsJ +BwUVCg4IDAIWAAKbAwIeCSIhBssYbE8GCaaX5NUt+mxyKwwfHifBilZwj2Ul7Ce6 +2azJBScJAgcCAAAAAK0oIBA+LX0ifsDm185Ecds2v8lwgyU2kCcUmKfvBXbAf6rh +RYWzuQOwEn7E/aLwIwRaLsdry0+VcallHhSu4RN6HWaEQsiPlR4zxP/TP7mhfVEe +7XWPxtnMUMtf15OyA51YBMdLBmOHf+MZAAAAIIaTJINn+eUBXbki+PSAld2nhJh/ +LVmFsS+60WyvXkQ1AE1gCk95TUR3XFeibg/u/tVY6a//1q0NWC1X+yui3O24wpsG +GBsKAAAALAWCY4d/4wKbDCIhBssYbE8GCaaX5NUt+mxyKwwfHifBilZwj2Ul7Ce6 +2azJAAAAAAQBIKbpGG2dWTX8j+VjFM21J0hqWlEg+bdiojWnKfA5AQpWUWtnNwDE +M0g12vYxoWM8Y81W+bHBw805I8kWVkXU6vFOi+HWvv/ira7ofJu16NnoUkhclkUr +k0mXubZvyl4GBg== +-----END PGP PRIVATE KEY BLOCK-----` + +// See OpenPGP crypto refresh merge request: +// https://gitlab.com/openpgp-wg/rfc4880bis/-/merge_requests/304 +const v6PrivKeyMsg = `-----BEGIN PGP MESSAGE----- + +wV0GIQYSyD8ecG9jCP4VGkF3Q6HwM3kOk+mXhIjR2zeNqZMIhRmHzxjV8bU/gXzO +WgBM85PMiVi93AZfJfhK9QmxfdNnZBjeo1VDeVZheQHgaVf7yopqR6W1FT6NOrfS +aQIHAgZhZBZTW+CwcW1g4FKlbExAf56zaw76/prQoN+bAzxpohup69LA7JW/Vp0l +yZnuSj3hcFj0DfqLTGgr4/u717J+sPWbtQBfgMfG9AOIwwrUBqsFE9zW+f1zdlYo +bhF30A+IitsxxA== -----END PGP MESSAGE-----` +// See OpenPGP crypto refresh merge request: +// https://gitlab.com/openpgp-wg/rfc4880bis/-/merge_requests/305 +const v6PrivKeyInlineSignMsg = `-----BEGIN PGP MESSAGE----- + +wV0GIQYSyD8ecG9jCP4VGkF3Q6HwM3kOk+mXhIjR2zeNqZMIhRmHzxjV8bU/gXzO +WgBM85PMiVi93AZfJfhK9QmxfdNnZBjeo1VDeVZheQHgaVf7yopqR6W1FT6NOrfS +aQIHAgZhZBZTW+CwcW1g4FKlbExAf56zaw76/prQoN+bAzxpohup69LA7JW/Vp0l +yZnuSj3hcFj0DfqLTGgr4/u717J+sPWbtQBfgMfG9AOIwwrUBqsFE9zW+f1zdlYo +bhF30A+IitsxxA== +-----END PGP MESSAGE-----` + +// See https://gitlab.com/openpgp-wg/rfc4880bis/-/merge_requests/274 +// decryption password: "correct horse battery staple" +const v6ArgonSealedPrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xYIGY4d/4xsAAAAg+U2nu0jWCmHlZ3BqZYfQMxmZu52JGggkLq2EVD34laP9JgkC +FARdb9ccngltHraRe25uHuyuAQQVtKipJ0+r5jL4dacGWSAheCWPpITYiyfyIOPS +3gIDyg8f7strd1OB4+LZsUhcIjOMpVHgmiY/IutJkulneoBYwrEGHxsKAAAAQgWC +Y4d/4wMLCQcFFQoOCAwCFgACmwMCHgkiIQbLGGxPBgmml+TVLfpscisMHx4nwYpW +cI9lJewnutmsyQUnCQIHAgAAAACtKCAQPi19In7A5tfORHHbNr/JcIMlNpAnFJin +7wV2wH+q4UWFs7kDsBJ+xP2i8CMEWi7Ha8tPlXGpZR4UruETeh1mhELIj5UeM8T/ +0z+5oX1RHu11j8bZzFDLX9eTsgOdWATHggZjh3/jGQAAACCGkySDZ/nlAV25Ivj0 +gJXdp4SYfy1ZhbEvutFsr15ENf0mCQIUBA5hhGgp2oaavg6mFUXcFMwBBBUuE8qf +9Ock+xwusd+GAglBr5LVyr/lup3xxQvHXFSjjA2haXfoN6xUGRdDEHI6+uevKjVR +v5oAxgu7eJpaXNjCmwYYGwoAAAAsBYJjh3/jApsMIiEGyxhsTwYJppfk1S36bHIr +DB8eJ8GKVnCPZSXsJ7rZrMkAAAAABAEgpukYbZ1ZNfyP5WMUzbUnSGpaUSD5t2Ki +Nacp8DkBClZRa2c3AMQzSDXa9jGhYzxjzVb5scHDzTkjyRZWRdTq8U6L4da+/+Kt +ruh8m7Xo2ehSSFyWRSuTSZe5tm/KXgYG +-----END PGP PRIVATE KEY BLOCK-----` + +const v4Key25519 = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xUkEZB3qzRto01j2k2pwN5ux9w70stPinAdXULLr20CRW7U7h2GSeACch0M+ +qzQg8yjFQ8VBvu3uwgKH9senoHmj72lLSCLTmhFKzQR0ZXN0wogEEBsIAD4F +gmQd6s0ECwkHCAmQIf45+TuC+xMDFQgKBBYAAgECGQECmwMCHgEWIQSWEzMi +jJUHvyIbVKIh/jn5O4L7EwAAUhaHNlgudvxARdPPETUzVgjuWi+YIz8w1xIb +lHQMvIrbe2sGCQIethpWofd0x7DHuv/ciHg+EoxJ/Td6h4pWtIoKx0kEZB3q +zRm4CyA7quliq7yx08AoOqHTuuCgvpkSdEhpp3pEyejQOgBo0p6ywIiLPllY +0t+jpNspHpAGfXID6oqjpYuJw3AfVRBlwnQEGBsIACoFgmQd6s0JkCH+Ofk7 +gvsTApsMFiEElhMzIoyVB78iG1SiIf45+TuC+xMAAGgQuN9G73446ykvJ/mL +sCZ7zGFId2gBd1EnG0FTC4npfOKpck0X8dngByrCxU8LDSfvjsEp/xDAiKsQ +aU71tdtNBQ== +=e7jT +-----END PGP PRIVATE KEY BLOCK-----` + const keyWithExpiredCrossSig = `-----BEGIN PGP PUBLIC KEY BLOCK----- xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv @@ -272,3 +334,124 @@ AtNTq6ihLMD5v1d82ZC7tNatdlDMGWnIdvEMCv2GZcuIqDQ9rXWs49e7tq1NncLY hz3tYjKhoFTKEIq3y3Pp =h/aX -----END PGP PUBLIC KEY BLOCK-----` + +const keyv5Test = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Comment: Bob's OpenPGP Transferable Secret Key + +lQVYBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv +/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz +/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/ +5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3 +X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv +9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0 +qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb +SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb +vLIwa3T4CyshfT0AEQEAAQAL/RZqbJW2IqQDCnJi4Ozm++gPqBPiX1RhTWSjwxfM +cJKUZfzLj414rMKm6Jh1cwwGY9jekROhB9WmwaaKT8HtcIgrZNAlYzANGRCM4TLK +3VskxfSwKKna8l+s+mZglqbAjUg3wmFuf9Tj2xcUZYmyRm1DEmcN2ZzpvRtHgX7z +Wn1mAKUlSDJZSQks0zjuMNbupcpyJokdlkUg2+wBznBOTKzgMxVNC9b2g5/tMPUs +hGGWmF1UH+7AHMTaS6dlmr2ZBIyogdnfUqdNg5sZwsxSNrbglKP4sqe7X61uEAIQ +bD7rT3LonLbhkrj3I8wilUD8usIwt5IecoHhd9HziqZjRCc1BUBkboUEoyedbDV4 +i4qfsFZ6CEWoLuD5pW7dEp0M+WeuHXO164Rc+LnH6i1VQrpb1Okl4qO6ejIpIjBI +1t3GshtUu/mwGBBxs60KBX5g77mFQ9lLCRj8lSYqOsHRKBhUp4qM869VA+fD0BRP +fqPT0I9IH4Oa/A3jYJcg622GwQYA1LhnP208Waf6PkQSJ6kyr8ymY1yVh9VBE/g6 +fRDYA+pkqKnw9wfH2Qho3ysAA+OmVOX8Hldg+Pc0Zs0e5pCavb0En8iFLvTA0Q2E +LR5rLue9uD7aFuKFU/VdcddY9Ww/vo4k5p/tVGp7F8RYCFn9rSjIWbfvvZi1q5Tx ++akoZbga+4qQ4WYzB/obdX6SCmi6BndcQ1QdjCCQU6gpYx0MddVERbIp9+2SXDyL +hpxjSyz+RGsZi/9UAshT4txP4+MZBgDfK3ZqtW+h2/eMRxkANqOJpxSjMyLO/FXN +WxzTDYeWtHNYiAlOwlQZEPOydZFty9IVzzNFQCIUCGjQ/nNyhw7adSgUk3+BXEx/ +MyJPYY0BYuhLxLYcrfQ9nrhaVKxRJj25SVHj2ASsiwGJRZW4CC3uw40OYxfKEvNC +mer/VxM3kg8qqGf9KUzJ1dVdAvjyx2Hz6jY2qWCyRQ6IMjWHyd43C4r3jxooYKUC +YnstRQyb/gCSKahveSEjo07CiXMr88UGALwzEr3npFAsPW3osGaFLj49y1oRe11E +he9gCHFm+fuzbXrWmdPjYU5/ZdqdojzDqfu4ThfnipknpVUM1o6MQqkjM896FHm8 +zbKVFSMhEP6DPHSCexMFrrSgN03PdwHTO6iBaIBBFqmGY01tmJ03SxvSpiBPON9P +NVvy/6UZFedTq8A07OUAxO62YUSNtT5pmK2vzs3SAZJmbFbMh+NN204TRI72GlqT +t5hcfkuv8hrmwPS/ZR6q312mKQ6w/1pqO9qitCFCb2IgQmFiYmFnZSA8Ym9iQG9w +ZW5wZ3AuZXhhbXBsZT6JAc4EEwEKADgCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgEC +F4AWIQTRpm4aI7GCyZgPeIz7/MgqAV5zMAUCXaWe+gAKCRD7/MgqAV5zMG9sC/9U +2T3RrqEbw533FPNfEflhEVRIZ8gDXKM8hU6cqqEzCmzZT6xYTe6sv4y+PJBGXJFX +yhj0g6FDkSyboM5litOcTupURObVqMgA/Y4UKERznm4fzzH9qek85c4ljtLyNufe +doL2pp3vkGtn7eD0QFRaLLmnxPKQ/TlZKdLE1G3u8Uot8QHicaR6GnAdc5UXQJE3 +BiV7jZuDyWmZ1cUNwJkKL6oRtp+ZNDOQCrLNLecKHcgCqrpjSQG5oouba1I1Q6Vl +sP44dhA1nkmLHtxlTOzpeHj4jnk1FaXmyasurrrI5CgU/L2Oi39DGKTH/A/cywDN +4ZplIQ9zR8enkbXquUZvFDe+Xz+6xRXtb5MwQyWODB3nHw85HocLwRoIN9WdQEI+ +L8a/56AuOwhs8llkSuiITjR7r9SgKJC2WlAHl7E8lhJ3VDW3ELC56KH308d6mwOG +ZRAqIAKzM1T5FGjMBhq7ZV0eqdEntBh3EcOIfj2M8rg1MzJv+0mHZOIjByawikad +BVgEXaWc8gEMANYwv1xsYyunXYK0X1vY/rP1NNPvhLyLIE7NpK90YNBj+xS1ldGD +bUdZqZeef2xJe8gMQg05DoD1DF3GipZ0Ies65beh+d5hegb7N4pzh0LzrBrVNHar +29b5ExdI7i4iYD5TO6Vr/qTUOiAN/byqELEzAb+L+b2DVz/RoCm4PIp1DU9ewcc2 +WB38Ofqut3nLYA5tqJ9XvAiEQme+qAVcM3ZFcaMt4I4dXhDZZNg+D9LiTWcxdUPB +leu8iwDRjAgyAhPzpFp+nWoqWA81uIiULWD1Fj+IVoY3ZvgivoYOiEFBJ9lbb4te +g9m5UT/AaVDTWuHzbspVlbiVe+qyB77C2daWzNyx6UYBPLOo4r0t0c91kbNE5lgj +Z7xz6los0N1U8vq91EFSeQJoSQ62XWavYmlCLmdNT6BNfgh4icLsT7Vr1QMX9jzn +JtTPxdXytSdHvpSpULsqJ016l0dtmONcK3z9mj5N5z0k1tg1AH970TGYOe2aUcSx +IRDMXDOPyzEfjwARAQABAAv9F2CwsjS+Sjh1M1vegJbZjei4gF1HHpEM0K0PSXsp +SfVvpR4AoSJ4He6CXSMWg0ot8XKtDuZoV9jnJaES5UL9pMAD7JwIOqZm/DYVJM5h +OASCh1c356/wSbFbzRHPtUdZO9Q30WFNJM5pHbCJPjtNoRmRGkf71RxtvHBzy7np +Ga+W6U/NVKHw0i0CYwMI0YlKDakYW3Pm+QL+gHZFvngGweTod0f9l2VLLAmeQR/c ++EZs7lNumhuZ8mXcwhUc9JQIhOkpO+wreDysEFkAcsKbkQP3UDUsA1gFx9pbMzT0 +tr1oZq2a4QBtxShHzP/ph7KLpN+6qtjks3xB/yjTgaGmtrwM8tSe0wD1RwXS+/1o +BHpXTnQ7TfeOGUAu4KCoOQLv6ELpKWbRBLWuiPwMdbGpvVFALO8+kvKAg9/r+/ny +zM2GQHY+J3Jh5JxPiJnHfXNZjIKLbFbIPdSKNyJBuazXW8xIa//mEHMI5OcvsZBK +clAIp7LXzjEjKXIwHwDcTn9pBgDpdOKTHOtJ3JUKx0rWVsDH6wq6iKV/FTVSY5jl +zN+puOEsskF1Lfxn9JsJihAVO3yNsp6RvkKtyNlFazaCVKtDAmkjoh60XNxcNRqr +gCnwdpbgdHP6v/hvZY54ZaJjz6L2e8unNEkYLxDt8cmAyGPgH2XgL7giHIp9jrsQ +aS381gnYwNX6wE1aEikgtY91nqJjwPlibF9avSyYQoMtEqM/1UjTjB2KdD/MitK5 +fP0VpvuXpNYZedmyq4UOMwdkiNMGAOrfmOeT0olgLrTMT5H97Cn3Yxbk13uXHNu/ +ZUZZNe8s+QtuLfUlKAJtLEUutN33TlWQY522FV0m17S+b80xJib3yZVJteVurrh5 +HSWHAM+zghQAvCesg5CLXa2dNMkTCmZKgCBvfDLZuZbjFwnwCI6u/NhOY9egKuUf +SA/je/RXaT8m5VxLYMxwqQXKApzD87fv0tLPlVIEvjEsaf992tFEFSNPcG1l/jpd +5AVXw6kKuf85UkJtYR1x2MkQDrqY1QX/XMw00kt8y9kMZUre19aCArcmor+hDhRJ +E3Gt4QJrD9z/bICESw4b4z2DbgD/Xz9IXsA/r9cKiM1h5QMtXvuhyfVeM01enhxM +GbOH3gjqqGNKysx0UODGEwr6AV9hAd8RWXMchJLaExK9J5SRawSg671ObAU24SdY +vMQ9Z4kAQ2+1ReUZzf3ogSMRZtMT+d18gT6L90/y+APZIaoArLPhebIAGq39HLmJ +26x3z0WAgrpA1kNsjXEXkoiZGPLKIGoe3hqJAbYEGAEKACAWIQTRpm4aI7GCyZgP +eIz7/MgqAV5zMAUCXaWc8gIbDAAKCRD7/MgqAV5zMOn/C/9ugt+HZIwX308zI+QX +c5vDLReuzmJ3ieE0DMO/uNSC+K1XEioSIZP91HeZJ2kbT9nn9fuReuoff0T0Dief +rbwcIQQHFFkrqSp1K3VWmUGp2JrUsXFVdjy/fkBIjTd7c5boWljv/6wAsSfiv2V0 +JSM8EFU6TYXxswGjFVfc6X97tJNeIrXL+mpSmPPqy2bztcCCHkWS5lNLWQw+R7Vg +71Fe6yBSNVrqC2/imYG2J9zlowjx1XU63Wdgqp2Wxt0l8OmsB/W80S1fRF5G4SDH +s9HXglXXqPsBRZJYfP+VStm9L5P/sKjCcX6WtZR7yS6G8zj/X767MLK/djANvpPd +NVniEke6hM3CNBXYPAMhQBMWhCulcoz+0lxi8L34rMN+Dsbma96psdUrn7uLaB91 +6we0CTfF8qqm7BsVAgalon/UUiuMY80U3ueoj3okiSTiHIjD/YtpXSPioC8nMng7 +xqAY9Bwizt4FWgXuLm1a4+So4V9j1TRCXd12Uc2l2RNmgDE= +=miES +-----END PGP PRIVATE KEY BLOCK----- +` + +const certv5Test = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +lGEFXJH05BYAAAAtCSsGAQQB2kcPAQEHQFhZlVcVVtwf+21xNQPX+ecMJJBL0MPd +fj75iux+my8QAAAAAAAiAQCHZ1SnSUmWqxEsoI6facIVZQu6mph3cBFzzTvcm5lA +Ng5ctBhlbW1hLmdvbGRtYW5AZXhhbXBsZS5uZXSIlgUTFggASCIhBRk0e8mHJGQC +X5nfPsLgAA7ZiEiS4fez6kyUAJFZVptUBQJckfTkAhsDBQsJCAcCAyICAQYVCgkI +CwIEFgIDAQIeBwIXgAAA9cAA/jiR3yMsZMeEQ40u6uzEoXa6UXeV/S3wwJAXRJy9 +M8s0AP9vuL/7AyTfFXwwzSjDnYmzS0qAhbLDQ643N+MXGBJ2BZxmBVyR9OQSAAAA +MgorBgEEAZdVAQUBAQdA+nysrzml2UCweAqtpDuncSPlvrcBWKU0yfU0YvYWWAoD +AQgHAAAAAAAiAP9OdAPppjU1WwpqjIItkxr+VPQRT8Zm/Riw7U3F6v3OiBFHiHoF +GBYIACwiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVAUCXJH05AIb +DAAAOSQBAP4BOOIR/sGLNMOfeb5fPs/02QMieoiSjIBnijhob2U5AQC+RtOHCHx7 +TcIYl5/Uyoi+FOvPLcNw4hOv2nwUzSSVAw== +=IiS2 +-----END PGP PRIVATE KEY BLOCK----- +` + +const msgv5Test = `-----BEGIN PGP MESSAGE----- + +wcDMA3wvqk35PDeyAQv+PcQiLsoYTH30nJYQh3j3cJaO2+jErtVCrIQRIU0+ +rmgMddERYST4A9mA0DQIiTI4FQ0Lp440D3BWCgpq3LlNWewGzduaWwym5rN6 +cwHz5ccDqOcqbd9X0GXXGy/ZH/ljSgzuVMIytMAXKdF/vrRrVgH/+I7cxvm9 +HwnhjMN5dF0j4aEt996H2T7cbtzSr2GN9SWGW8Gyu7I8Zx73hgrGUI7gDiJB +Afaff+P6hfkkHSGOItr94dde8J/7AUF4VEwwxdVVPvsNEFyvv6gRIbYtOCa2 +6RE6h1V/QTxW2O7zZgzWALrE2ui0oaYr9QuqQSssd9CdgExLfdPbI+3/ZAnE +v31Idzpk3/6ILiakYHtXkElPXvf46mCNpobty8ysT34irF+fy3C1p3oGwAsx +5VDV9OSFU6z5U+UPbSPYAy9rkc5ZssuIKxCER2oTvZ2L8Q5cfUvEUiJtRGGn +CJlHrVDdp3FssKv2tlKgLkvxJLyoOjuEkj44H1qRk+D02FzmmUT/0sAHAYYx +lTir6mjHeLpcGjn4waUuWIAJyph8SxUexP60bic0L0NBa6Qp5SxxijKsPIDb +FPHxWwfJSDZRrgUyYT7089YFB/ZM4FHyH9TZcnxn0f0xIB7NS6YNDsxzN2zT +EVEYf+De4qT/dQTsdww78Chtcv9JY9r2kDm77dk2MUGHL2j7n8jasbLtgA7h +pn2DMIWLrGamMLWRmlwslolKr1sMV5x8w+5Ias6C33iBMl9phkg42an0gYmc +byVJHvLO/XErtC+GNIJeMg== +=liRq +-----END PGP MESSAGE----- +` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go index a43695964..6871b84fc 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go @@ -87,10 +87,10 @@ func decodeCount(c uint8) int { // encodeMemory converts the Argon2 "memory" in the range parallelism*8 to // 2**31, inclusive, to an encoded memory. The return value is the // octet that is actually stored in the GPG file. encodeMemory panics -// if is not in the above range +// if is not in the above range // See OpenPGP crypto refresh Section 3.7.1.4. func encodeMemory(memory uint32, parallelism uint8) uint8 { - if memory < (8 * uint32(parallelism)) || memory > uint32(2147483648) { + if memory < (8*uint32(parallelism)) || memory > uint32(2147483648) { panic("Memory argument memory is outside the required range") } @@ -199,8 +199,8 @@ func Generate(rand io.Reader, c *Config) (*Params, error) { } params = &Params{ - mode: SaltedS2K, - hashId: hashId, + mode: SaltedS2K, + hashId: hashId, } } else { // Enforce IteratedSaltedS2K method otherwise hashId, ok := algorithm.HashToHashId(c.hash()) @@ -211,7 +211,7 @@ func Generate(rand io.Reader, c *Config) (*Params, error) { c.S2KMode = IteratedSaltedS2K } params = &Params{ - mode: IteratedSaltedS2K, + mode: IteratedSaltedS2K, hashId: hashId, countByte: c.EncodedCount(), } @@ -283,6 +283,9 @@ func ParseIntoParams(r io.Reader) (params *Params, err error) { params.passes = buf[Argon2SaltSize] params.parallelism = buf[Argon2SaltSize+1] params.memoryExp = buf[Argon2SaltSize+2] + if err := validateArgon2Params(params); err != nil { + return nil, err + } return params, nil case GnuS2K: // This is a GNU extension. See @@ -300,15 +303,22 @@ func ParseIntoParams(r io.Reader) (params *Params, err error) { return nil, errors.UnsupportedError("S2K function") } +func (params *Params) Mode() Mode { + return params.mode +} + func (params *Params) Dummy() bool { return params != nil && params.mode == GnuS2K } func (params *Params) salt() []byte { switch params.mode { - case SaltedS2K, IteratedSaltedS2K: return params.saltBytes[:8] - case Argon2S2K: return params.saltBytes[:Argon2SaltSize] - default: return nil + case SaltedS2K, IteratedSaltedS2K: + return params.saltBytes[:8] + case Argon2S2K: + return params.saltBytes[:Argon2SaltSize] + default: + return nil } } @@ -405,3 +415,22 @@ func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Co f(key, passphrase) return nil } + +// validateArgon2Params checks that the argon2 parameters are valid according to RFC9580. +func validateArgon2Params(params *Params) error { + // The number of passes t and the degree of parallelism p MUST be non-zero. + if params.parallelism == 0 { + return errors.StructuralError("invalid argon2 params: parallelism is 0") + } + if params.passes == 0 { + return errors.StructuralError("invalid argon2 params: iterations is 0") + } + + // The encoded memory size MUST be a value from 3+ceil(log2(p)) to 31, + // such that the decoded memory size m is a value from 8*p to 2^31. + if params.memoryExp > 31 || decodeMemory(params.memoryExp) < 8*uint32(params.parallelism) { + return errors.StructuralError("invalid argon2 params: memory is out of bounds") + } + + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go index 25a4442df..616e0d12c 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go @@ -5,7 +5,7 @@ package s2k // the same parameters. type Cache map[Params][]byte -// GetOrComputeDerivedKey tries to retrieve the key +// GetOrComputeDerivedKey tries to retrieve the key // for the given s2k parameters from the cache. // If there is no hit, it derives the key with the s2k function from the passphrase, // updates the cache, and returns the key. diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go index b40be5228..b93db1ab8 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go @@ -50,9 +50,9 @@ type Config struct { type Argon2Config struct { NumberOfPasses uint8 DegreeOfParallelism uint8 - // The memory parameter for Argon2 specifies desired memory usage in kibibytes. + // Memory specifies the desired Argon2 memory usage in kibibytes. // For example memory=64*1024 sets the memory cost to ~64 MB. - Memory uint32 + Memory uint32 } func (c *Config) Mode() Mode { @@ -115,7 +115,7 @@ func (c *Argon2Config) EncodedMemory() uint8 { } memory := c.Memory - lowerBound := uint32(c.Parallelism())*8 + lowerBound := uint32(c.Parallelism()) * 8 upperBound := uint32(2147483648) switch { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go index 7fdd13a3d..b0f6ef7b0 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go @@ -76,7 +76,11 @@ func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.S sig := createSignaturePacket(signingKey.PublicKey, sigType, config) - h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) + h, err := sig.PrepareSign(config) + if err != nil { + return + } + wrappedHash, err := wrapHashForSignature(h, sig.SigType) if err != nil { return } @@ -275,14 +279,28 @@ func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entit return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") } + var salt []byte if signer != nil { + var opsVersion = 3 + if signer.Version == 6 { + opsVersion = signer.Version + } ops := &packet.OnePassSignature{ + Version: opsVersion, SigType: sigType, Hash: hash, PubKeyAlgo: signer.PubKeyAlgo, KeyId: signer.KeyId, IsLast: true, } + if opsVersion == 6 { + ops.KeyFingerprint = signer.Fingerprint + salt, err = packet.SignatureSaltForHash(hash, config.Random()) + if err != nil { + return nil, err + } + ops.Salt = salt + } if err := ops.Serialize(payload); err != nil { return nil, err } @@ -310,19 +328,19 @@ func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entit } if signer != nil { - h, wrappedHash, err := hashForSignature(hash, sigType) + h, wrappedHash, err := hashForSignature(hash, sigType, salt) if err != nil { return nil, err } metadata := &packet.LiteralData{ - Format: 't', + Format: 'u', FileName: hints.FileName, Time: epochSeconds, } if hints.IsBinary { metadata.Format = 'b' } - return signatureWriter{payload, literalData, hash, wrappedHash, h, signer, sigType, config, metadata}, nil + return signatureWriter{payload, literalData, hash, wrappedHash, h, salt, signer, sigType, config, metadata}, nil } return literalData, nil } @@ -380,15 +398,19 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no valid encryption keys") } - sig := to[i].PrimaryIdentity().SelfSignature - if !sig.SEIPDv2 { + primarySelfSignature, _ := to[i].PrimarySelfSignature() + if primarySelfSignature == nil { + return nil, errors.InvalidArgumentError("entity without a self-signature") + } + + if !primarySelfSignature.SEIPDv2 { aeadSupported = false } - candidateCiphers = intersectPreferences(candidateCiphers, sig.PreferredSymmetric) - candidateHashes = intersectPreferences(candidateHashes, sig.PreferredHash) - candidateCipherSuites = intersectCipherSuites(candidateCipherSuites, sig.PreferredCipherSuites) - candidateCompression = intersectPreferences(candidateCompression, sig.PreferredCompression) + candidateCiphers = intersectPreferences(candidateCiphers, primarySelfSignature.PreferredSymmetric) + candidateHashes = intersectPreferences(candidateHashes, primarySelfSignature.PreferredHash) + candidateCipherSuites = intersectCipherSuites(candidateCipherSuites, primarySelfSignature.PreferredCipherSuites) + candidateCompression = intersectPreferences(candidateCompression, primarySelfSignature.PreferredCompression) } // In the event that the intersection of supported algorithms is empty we use the ones @@ -422,13 +444,19 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En } } - symKey := make([]byte, cipher.KeySize()) + var symKey []byte + if aeadSupported { + symKey = make([]byte, aeadCipherSuite.Cipher.KeySize()) + } else { + symKey = make([]byte, cipher.KeySize()) + } + if _, err := io.ReadFull(config.Random(), symKey); err != nil { return nil, err } for _, key := range encryptKeys { - if err := packet.SerializeEncryptedKey(keyWriter, key.PublicKey, cipher, symKey, config); err != nil { + if err := packet.SerializeEncryptedKeyAEAD(keyWriter, key.PublicKey, cipher, aeadSupported, symKey, config); err != nil { return nil, err } } @@ -465,13 +493,17 @@ func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Con hashToHashId(crypto.SHA3_512), } defaultHashes := candidateHashes[0:1] - preferredHashes := signed.PrimaryIdentity().SelfSignature.PreferredHash + primarySelfSignature, _ := signed.PrimarySelfSignature() + if primarySelfSignature == nil { + return nil, errors.StructuralError("signed entity has no self-signature") + } + preferredHashes := primarySelfSignature.PreferredHash if len(preferredHashes) == 0 { preferredHashes = defaultHashes } candidateHashes = intersectPreferences(candidateHashes, preferredHashes) if len(candidateHashes) == 0 { - return nil, errors.InvalidArgumentError("cannot sign because signing key shares no common algorithms with candidate hashes") + return nil, errors.StructuralError("cannot sign because signing key shares no common algorithms with candidate hashes") } return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, packet.SigTypeBinary, config) @@ -486,6 +518,7 @@ type signatureWriter struct { hashType crypto.Hash wrappedHash hash.Hash h hash.Hash + salt []byte // v6 only signer *packet.PrivateKey sigType packet.SignatureType config *packet.Config @@ -509,6 +542,10 @@ func (s signatureWriter) Close() error { sig.Hash = s.hashType sig.Metadata = s.metadata + if err := sig.SetSalt(s.salt); err != nil { + return err + } + if err := sig.Sign(s.h, s.signer, s.config); err != nil { return err } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/x25519/x25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/x25519/x25519.go new file mode 100644 index 000000000..38afcc74f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/x25519/x25519.go @@ -0,0 +1,221 @@ +package x25519 + +import ( + "crypto/sha256" + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" + "github.com/ProtonMail/go-crypto/openpgp/errors" + x25519lib "github.com/cloudflare/circl/dh/x25519" + "golang.org/x/crypto/hkdf" +) + +const ( + hkdfInfo = "OpenPGP X25519" + aes128KeySize = 16 + // The size of a public or private key in bytes. + KeySize = x25519lib.Size +) + +type PublicKey struct { + // Point represents the encoded elliptic curve point of the public key. + Point []byte +} + +type PrivateKey struct { + PublicKey + // Secret represents the secret of the private key. + Secret []byte +} + +// NewPrivateKey creates a new empty private key including the public key. +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +// Validate validates that the provided public key matches the private key. +func Validate(pk *PrivateKey) (err error) { + var expectedPublicKey, privateKey x25519lib.Key + subtle.ConstantTimeCopy(1, privateKey[:], pk.Secret) + x25519lib.KeyGen(&expectedPublicKey, &privateKey) + if subtle.ConstantTimeCompare(expectedPublicKey[:], pk.PublicKey.Point) == 0 { + return errors.KeyInvalidError("x25519: invalid key") + } + return nil +} + +// GenerateKey generates a new x25519 key pair. +func GenerateKey(rand io.Reader) (*PrivateKey, error) { + var privateKey, publicKey x25519lib.Key + privateKeyOut := new(PrivateKey) + err := generateKey(rand, &privateKey, &publicKey) + if err != nil { + return nil, err + } + privateKeyOut.PublicKey.Point = publicKey[:] + privateKeyOut.Secret = privateKey[:] + return privateKeyOut, nil +} + +func generateKey(rand io.Reader, privateKey *x25519lib.Key, publicKey *x25519lib.Key) error { + maxRounds := 10 + isZero := true + for round := 0; isZero; round++ { + if round == maxRounds { + return errors.InvalidArgumentError("x25519: zero keys only, randomness source might be corrupt") + } + _, err := io.ReadFull(rand, privateKey[:]) + if err != nil { + return err + } + isZero = constantTimeIsZero(privateKey[:]) + } + x25519lib.KeyGen(publicKey, privateKey) + return nil +} + +// Encrypt encrypts a sessionKey with x25519 according to +// the OpenPGP crypto refresh specification section 5.1.6. The function assumes that the +// sessionKey has the correct format and padding according to the specification. +func Encrypt(rand io.Reader, publicKey *PublicKey, sessionKey []byte) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, err error) { + var ephemeralPrivate, ephemeralPublic, staticPublic, shared x25519lib.Key + // Check that the input static public key has 32 bytes + if len(publicKey.Point) != KeySize { + err = errors.KeyInvalidError("x25519: the public key has the wrong size") + return + } + copy(staticPublic[:], publicKey.Point) + // Generate ephemeral keyPair + err = generateKey(rand, &ephemeralPrivate, &ephemeralPublic) + if err != nil { + return + } + // Compute shared key + ok := x25519lib.Shared(&shared, &ephemeralPrivate, &staticPublic) + if !ok { + err = errors.KeyInvalidError("x25519: the public key is a low order point") + return + } + // Derive the encryption key from the shared secret + encryptionKey := applyHKDF(ephemeralPublic[:], publicKey.Point[:], shared[:]) + ephemeralPublicKey = &PublicKey{ + Point: ephemeralPublic[:], + } + // Encrypt the sessionKey with aes key wrapping + encryptedSessionKey, err = keywrap.Wrap(encryptionKey, sessionKey) + return +} + +// Decrypt decrypts a session key stored in ciphertext with the provided x25519 +// private key and ephemeral public key. +func Decrypt(privateKey *PrivateKey, ephemeralPublicKey *PublicKey, ciphertext []byte) (encodedSessionKey []byte, err error) { + var ephemeralPublic, staticPrivate, shared x25519lib.Key + // Check that the input ephemeral public key has 32 bytes + if len(ephemeralPublicKey.Point) != KeySize { + err = errors.KeyInvalidError("x25519: the public key has the wrong size") + return + } + copy(ephemeralPublic[:], ephemeralPublicKey.Point) + subtle.ConstantTimeCopy(1, staticPrivate[:], privateKey.Secret) + // Compute shared key + ok := x25519lib.Shared(&shared, &staticPrivate, &ephemeralPublic) + if !ok { + err = errors.KeyInvalidError("x25519: the ephemeral public key is a low order point") + return + } + // Derive the encryption key from the shared secret + encryptionKey := applyHKDF(ephemeralPublicKey.Point[:], privateKey.PublicKey.Point[:], shared[:]) + // Decrypt the session key with aes key wrapping + encodedSessionKey, err = keywrap.Unwrap(encryptionKey, ciphertext) + return +} + +func applyHKDF(ephemeralPublicKey []byte, publicKey []byte, sharedSecret []byte) []byte { + inputKey := make([]byte, 3*KeySize) + // ephemeral public key | recipient public key | shared secret + subtle.ConstantTimeCopy(1, inputKey[:KeySize], ephemeralPublicKey) + subtle.ConstantTimeCopy(1, inputKey[KeySize:2*KeySize], publicKey) + subtle.ConstantTimeCopy(1, inputKey[2*KeySize:], sharedSecret) + hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, []byte(hkdfInfo)) + encryptionKey := make([]byte, aes128KeySize) + _, _ = io.ReadFull(hkdfReader, encryptionKey) + return encryptionKey +} + +func constantTimeIsZero(bytes []byte) bool { + isZero := byte(0) + for _, b := range bytes { + isZero |= b + } + return isZero == 0 +} + +// ENCODING/DECODING ciphertexts: + +// EncodeFieldsLength returns the length of the ciphertext encoding +// given the encrypted session key. +func EncodedFieldsLength(encryptedSessionKey []byte, v6 bool) int { + lenCipherFunction := 0 + if !v6 { + lenCipherFunction = 1 + } + return KeySize + 1 + len(encryptedSessionKey) + lenCipherFunction +} + +// EncodeField encodes x25519 session key encryption fields as +// ephemeral x25519 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey +// and writes it to writer. +func EncodeFields(writer io.Writer, ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, v6 bool) (err error) { + lenAlgorithm := 0 + if !v6 { + lenAlgorithm = 1 + } + if _, err = writer.Write(ephemeralPublicKey.Point); err != nil { + return err + } + if _, err = writer.Write([]byte{byte(len(encryptedSessionKey) + lenAlgorithm)}); err != nil { + return err + } + if !v6 { + if _, err = writer.Write([]byte{cipherFunction}); err != nil { + return err + } + } + _, err = writer.Write(encryptedSessionKey) + return err +} + +// DecodeField decodes a x25519 session key encryption as +// ephemeral x25519 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey. +func DecodeFields(reader io.Reader, v6 bool) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, err error) { + var buf [1]byte + ephemeralPublicKey = &PublicKey{ + Point: make([]byte, KeySize), + } + // 32 octets representing an ephemeral x25519 public key. + if _, err = io.ReadFull(reader, ephemeralPublicKey.Point); err != nil { + return nil, nil, 0, err + } + // A one-octet size of the following fields. + if _, err = io.ReadFull(reader, buf[:]); err != nil { + return nil, nil, 0, err + } + followingLen := buf[0] + // The one-octet algorithm identifier, if it was passed (in the case of a v3 PKESK packet). + if !v6 { + if _, err = io.ReadFull(reader, buf[:]); err != nil { + return nil, nil, 0, err + } + cipherFunction = buf[0] + followingLen -= 1 + } + // The encrypted session key. + encryptedSessionKey = make([]byte, followingLen) + if _, err = io.ReadFull(reader, encryptedSessionKey); err != nil { + return nil, nil, 0, err + } + return ephemeralPublicKey, encryptedSessionKey, cipherFunction, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/x448/x448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/x448/x448.go new file mode 100644 index 000000000..65a082dab --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/x448/x448.go @@ -0,0 +1,229 @@ +package x448 + +import ( + "crypto/sha512" + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" + "github.com/ProtonMail/go-crypto/openpgp/errors" + x448lib "github.com/cloudflare/circl/dh/x448" + "golang.org/x/crypto/hkdf" +) + +const ( + hkdfInfo = "OpenPGP X448" + aes256KeySize = 32 + // The size of a public or private key in bytes. + KeySize = x448lib.Size +) + +type PublicKey struct { + // Point represents the encoded elliptic curve point of the public key. + Point []byte +} + +type PrivateKey struct { + PublicKey + // Secret represents the secret of the private key. + Secret []byte +} + +// NewPrivateKey creates a new empty private key including the public key. +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +// Validate validates that the provided public key matches +// the private key. +func Validate(pk *PrivateKey) (err error) { + var expectedPublicKey, privateKey x448lib.Key + subtle.ConstantTimeCopy(1, privateKey[:], pk.Secret) + x448lib.KeyGen(&expectedPublicKey, &privateKey) + if subtle.ConstantTimeCompare(expectedPublicKey[:], pk.PublicKey.Point) == 0 { + return errors.KeyInvalidError("x448: invalid key") + } + return nil +} + +// GenerateKey generates a new x448 key pair. +func GenerateKey(rand io.Reader) (*PrivateKey, error) { + var privateKey, publicKey x448lib.Key + privateKeyOut := new(PrivateKey) + err := generateKey(rand, &privateKey, &publicKey) + if err != nil { + return nil, err + } + privateKeyOut.PublicKey.Point = publicKey[:] + privateKeyOut.Secret = privateKey[:] + return privateKeyOut, nil +} + +func generateKey(rand io.Reader, privateKey *x448lib.Key, publicKey *x448lib.Key) error { + maxRounds := 10 + isZero := true + for round := 0; isZero; round++ { + if round == maxRounds { + return errors.InvalidArgumentError("x448: zero keys only, randomness source might be corrupt") + } + _, err := io.ReadFull(rand, privateKey[:]) + if err != nil { + return err + } + isZero = constantTimeIsZero(privateKey[:]) + } + x448lib.KeyGen(publicKey, privateKey) + return nil +} + +// Encrypt encrypts a sessionKey with x448 according to +// the OpenPGP crypto refresh specification section 5.1.7. The function assumes that the +// sessionKey has the correct format and padding according to the specification. +func Encrypt(rand io.Reader, publicKey *PublicKey, sessionKey []byte) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, err error) { + var ephemeralPrivate, ephemeralPublic, staticPublic, shared x448lib.Key + // Check that the input static public key has 56 bytes. + if len(publicKey.Point) != KeySize { + err = errors.KeyInvalidError("x448: the public key has the wrong size") + return nil, nil, err + } + copy(staticPublic[:], publicKey.Point) + // Generate ephemeral keyPair. + if err = generateKey(rand, &ephemeralPrivate, &ephemeralPublic); err != nil { + return nil, nil, err + } + // Compute shared key. + ok := x448lib.Shared(&shared, &ephemeralPrivate, &staticPublic) + if !ok { + err = errors.KeyInvalidError("x448: the public key is a low order point") + return nil, nil, err + } + // Derive the encryption key from the shared secret. + encryptionKey := applyHKDF(ephemeralPublic[:], publicKey.Point[:], shared[:]) + ephemeralPublicKey = &PublicKey{ + Point: ephemeralPublic[:], + } + // Encrypt the sessionKey with aes key wrapping. + encryptedSessionKey, err = keywrap.Wrap(encryptionKey, sessionKey) + if err != nil { + return nil, nil, err + } + return ephemeralPublicKey, encryptedSessionKey, nil +} + +// Decrypt decrypts a session key stored in ciphertext with the provided x448 +// private key and ephemeral public key. +func Decrypt(privateKey *PrivateKey, ephemeralPublicKey *PublicKey, ciphertext []byte) (encodedSessionKey []byte, err error) { + var ephemeralPublic, staticPrivate, shared x448lib.Key + // Check that the input ephemeral public key has 56 bytes. + if len(ephemeralPublicKey.Point) != KeySize { + err = errors.KeyInvalidError("x448: the public key has the wrong size") + return nil, err + } + copy(ephemeralPublic[:], ephemeralPublicKey.Point) + subtle.ConstantTimeCopy(1, staticPrivate[:], privateKey.Secret) + // Compute shared key. + ok := x448lib.Shared(&shared, &staticPrivate, &ephemeralPublic) + if !ok { + err = errors.KeyInvalidError("x448: the ephemeral public key is a low order point") + return nil, err + } + // Derive the encryption key from the shared secret. + encryptionKey := applyHKDF(ephemeralPublicKey.Point[:], privateKey.PublicKey.Point[:], shared[:]) + // Decrypt the session key with aes key wrapping. + encodedSessionKey, err = keywrap.Unwrap(encryptionKey, ciphertext) + if err != nil { + return nil, err + } + return encodedSessionKey, nil +} + +func applyHKDF(ephemeralPublicKey []byte, publicKey []byte, sharedSecret []byte) []byte { + inputKey := make([]byte, 3*KeySize) + // ephemeral public key | recipient public key | shared secret. + subtle.ConstantTimeCopy(1, inputKey[:KeySize], ephemeralPublicKey) + subtle.ConstantTimeCopy(1, inputKey[KeySize:2*KeySize], publicKey) + subtle.ConstantTimeCopy(1, inputKey[2*KeySize:], sharedSecret) + hkdfReader := hkdf.New(sha512.New, inputKey, []byte{}, []byte(hkdfInfo)) + encryptionKey := make([]byte, aes256KeySize) + _, _ = io.ReadFull(hkdfReader, encryptionKey) + return encryptionKey +} + +func constantTimeIsZero(bytes []byte) bool { + isZero := byte(0) + for _, b := range bytes { + isZero |= b + } + return isZero == 0 +} + +// ENCODING/DECODING ciphertexts: + +// EncodeFieldsLength returns the length of the ciphertext encoding +// given the encrypted session key. +func EncodedFieldsLength(encryptedSessionKey []byte, v6 bool) int { + lenCipherFunction := 0 + if !v6 { + lenCipherFunction = 1 + } + return KeySize + 1 + len(encryptedSessionKey) + lenCipherFunction +} + +// EncodeField encodes x448 session key encryption fields as +// ephemeral x448 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey +// and writes it to writer. +func EncodeFields(writer io.Writer, ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, v6 bool) (err error) { + lenAlgorithm := 0 + if !v6 { + lenAlgorithm = 1 + } + if _, err = writer.Write(ephemeralPublicKey.Point); err != nil { + return err + } + if _, err = writer.Write([]byte{byte(len(encryptedSessionKey) + lenAlgorithm)}); err != nil { + return err + } + if !v6 { + if _, err = writer.Write([]byte{cipherFunction}); err != nil { + return err + } + } + if _, err = writer.Write(encryptedSessionKey); err != nil { + return err + } + return nil +} + +// DecodeField decodes a x448 session key encryption as +// ephemeral x448 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey. +func DecodeFields(reader io.Reader, v6 bool) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, err error) { + var buf [1]byte + ephemeralPublicKey = &PublicKey{ + Point: make([]byte, KeySize), + } + // 56 octets representing an ephemeral x448 public key. + if _, err = io.ReadFull(reader, ephemeralPublicKey.Point); err != nil { + return nil, nil, 0, err + } + // A one-octet size of the following fields. + if _, err = io.ReadFull(reader, buf[:]); err != nil { + return nil, nil, 0, err + } + followingLen := buf[0] + // The one-octet algorithm identifier, if it was passed (in the case of a v3 PKESK packet). + if !v6 { + if _, err = io.ReadFull(reader, buf[:]); err != nil { + return nil, nil, 0, err + } + cipherFunction = buf[0] + followingLen -= 1 + } + // The encrypted session key. + encryptedSessionKey = make([]byte, followingLen) + if _, err = io.ReadFull(reader, encryptedSessionKey); err != nil { + return nil, nil, 0, err + } + return ephemeralPublicKey, encryptedSessionKey, cipherFunction, nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md new file mode 100644 index 000000000..cb1252b53 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md @@ -0,0 +1,209 @@ +# Changelog # +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/) +and this project adheres to [Semantic Versioning](http://semver.org/). + +## [Unreleased] ## + +## [0.3.6] - 2024-12-17 ## + +### Compatibility ### +- The minimum Go version requirement for `filepath-securejoin` is now Go 1.18 + (we use generics internally). + + For reference, `filepath-securejoin@v0.3.0` somewhat-arbitrarily bumped the + Go version requirement to 1.21. + + While we did make some use of Go 1.21 stdlib features (and in principle Go + versions <= 1.21 are no longer even supported by upstream anymore), some + downstreams have complained that the version bump has meant that they have to + do workarounds when backporting fixes that use the new `filepath-securejoin` + API onto old branches. This is not an ideal situation, but since using this + library is probably better for most downstreams than a hand-rolled + workaround, we now have compatibility shims that allow us to build on older + Go versions. +- Lower minimum version requirement for `golang.org/x/sys` to `v0.18.0` (we + need the wrappers for `fsconfig(2)`), which should also make backporting + patches to older branches easier. + +## [0.3.5] - 2024-12-06 ## + +### Fixed ### +- `MkdirAll` will now no longer return an `EEXIST` error if two racing + processes are creating the same directory. We will still verify that the path + is a directory, but this will avoid spurious errors when multiple threads or + programs are trying to `MkdirAll` the same path. opencontainers/runc#4543 + +## [0.3.4] - 2024-10-09 ## + +### Fixed ### +- Previously, some testing mocks we had resulted in us doing `import "testing"` + in non-`_test.go` code, which made some downstreams like Kubernetes unhappy. + This has been fixed. (#32) + +## [0.3.3] - 2024-09-30 ## + +### Fixed ### +- The mode and owner verification logic in `MkdirAll` has been removed. This + was originally intended to protect against some theoretical attacks but upon + further consideration these protections don't actually buy us anything and + they were causing spurious errors with more complicated filesystem setups. +- The "is the created directory empty" logic in `MkdirAll` has also been + removed. This was not causing us issues yet, but some pseudofilesystems (such + as `cgroup`) create non-empty directories and so this logic would've been + wrong for such cases. + +## [0.3.2] - 2024-09-13 ## + +### Changed ### +- Passing the `S_ISUID` or `S_ISGID` modes to `MkdirAllInRoot` will now return + an explicit error saying that those bits are ignored by `mkdirat(2)`. In the + past a different error was returned, but since the silent ignoring behaviour + is codified in the man pages a more explicit error seems apt. While silently + ignoring these bits would be the most compatible option, it could lead to + users thinking their code sets these bits when it doesn't. Programs that need + to deal with compatibility can mask the bits themselves. (#23, #25) + +### Fixed ### +- If a directory has `S_ISGID` set, then all child directories will have + `S_ISGID` set when created and a different gid will be used for any inode + created under the directory. Previously, the "expected owner and mode" + validation in `securejoin.MkdirAll` did not correctly handle this. We now + correctly handle this case. (#24, #25) + +## [0.3.1] - 2024-07-23 ## + +### Changed ### +- By allowing `Open(at)InRoot` to opt-out of the extra work done by `MkdirAll` + to do the necessary "partial lookups", `Open(at)InRoot` now does less work + for both implementations (resulting in a many-fold decrease in the number of + operations for `openat2`, and a modest improvement for non-`openat2`) and is + far more guaranteed to match the correct `openat2(RESOLVE_IN_ROOT)` + behaviour. +- We now use `readlinkat(fd, "")` where possible. For `Open(at)InRoot` this + effectively just means that we no longer risk getting spurious errors during + rename races. However, for our hardened procfs handler, this in theory should + prevent mount attacks from tricking us when doing magic-link readlinks (even + when using the unsafe host `/proc` handle). Unfortunately `Reopen` is still + potentially vulnerable to those kinds of somewhat-esoteric attacks. + + Technically this [will only work on post-2.6.39 kernels][linux-readlinkat-emptypath] + but it seems incredibly unlikely anyone is using `filepath-securejoin` on a + pre-2011 kernel. + +### Fixed ### +- Several improvements were made to the errors returned by `Open(at)InRoot` and + `MkdirAll` when dealing with invalid paths under the emulated (ie. + non-`openat2`) implementation. Previously, some paths would return the wrong + error (`ENOENT` when the last component was a non-directory), and other paths + would be returned as though they were acceptable (trailing-slash components + after a non-directory would be ignored by `Open(at)InRoot`). + + These changes were done to match `openat2`'s behaviour and purely is a + consistency fix (most users are going to be using `openat2` anyway). + +[linux-readlinkat-emptypath]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=65cfc6722361570bfe255698d9cd4dccaf47570d + +## [0.3.0] - 2024-07-11 ## + +### Added ### +- A new set of `*os.File`-based APIs have been added. These are adapted from + [libpathrs][] and we strongly suggest using them if possible (as they provide + far more protection against attacks than `SecureJoin`): + + - `Open(at)InRoot` resolves a path inside a rootfs and returns an `*os.File` + handle to the path. Note that the handle returned is an `O_PATH` handle, + which cannot be used for reading or writing (as well as some other + operations -- [see open(2) for more details][open.2]) + + - `Reopen` takes an `O_PATH` file handle and safely re-opens it to upgrade + it to a regular handle. This can also be used with non-`O_PATH` handles, + but `O_PATH` is the most obvious application. + + - `MkdirAll` is an implementation of `os.MkdirAll` that is safe to use to + create a directory tree within a rootfs. + + As these are new APIs, they may change in the future. However, they should be + safe to start migrating to as we have extensive tests ensuring they behave + correctly and are safe against various races and other attacks. + +[libpathrs]: https://github.com/openSUSE/libpathrs +[open.2]: https://www.man7.org/linux/man-pages/man2/open.2.html + +## [0.2.5] - 2024-05-03 ## + +### Changed ### +- Some minor changes were made to how lexical components (like `..` and `.`) + are handled during path generation in `SecureJoin`. There is no behaviour + change as a result of this fix (the resulting paths are the same). + +### Fixed ### +- The error returned when we hit a symlink loop now references the correct + path. (#10) + +## [0.2.4] - 2023-09-06 ## + +### Security ### +- This release fixes a potential security issue in filepath-securejoin when + used on Windows ([GHSA-6xv5-86q9-7xr8][], which could be used to generate + paths outside of the provided rootfs in certain cases), as well as improving + the overall behaviour of filepath-securejoin when dealing with Windows paths + that contain volume names. Thanks to Paulo Gomes for discovering and fixing + these issues. + +### Fixed ### +- Switch to GitHub Actions for CI so we can test on Windows as well as Linux + and MacOS. + +[GHSA-6xv5-86q9-7xr8]: https://github.com/advisories/GHSA-6xv5-86q9-7xr8 + +## [0.2.3] - 2021-06-04 ## + +### Changed ### +- Switch to Go 1.13-style `%w` error wrapping, letting us drop the dependency + on `github.com/pkg/errors`. + +## [0.2.2] - 2018-09-05 ## + +### Changed ### +- Use `syscall.ELOOP` as the base error for symlink loops, rather than our own + (internal) error. This allows callers to more easily use `errors.Is` to check + for this case. + +## [0.2.1] - 2018-09-05 ## + +### Fixed ### +- Use our own `IsNotExist` implementation, which lets us handle `ENOTDIR` + properly within `SecureJoin`. + +## [0.2.0] - 2017-07-19 ## + +We now have 100% test coverage! + +### Added ### +- Add a `SecureJoinVFS` API that can be used for mocking (as we do in our new + tests) or for implementing custom handling of lookup operations (such as for + rootless containers, where work is necessary to access directories with weird + modes because we don't have `CAP_DAC_READ_SEARCH` or `CAP_DAC_OVERRIDE`). + +## 0.1.0 - 2017-07-19 + +This is our first release of `github.com/cyphar/filepath-securejoin`, +containing a full implementation with a coverage of 93.5% (the only missing +cases are the error cases, which are hard to mocktest at the moment). + +[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.6...HEAD +[0.3.6]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.5...v0.3.6 +[0.3.5]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.4...v0.3.5 +[0.3.4]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.3...v0.3.4 +[0.3.3]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.2...v0.3.3 +[0.3.2]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.1...v0.3.2 +[0.3.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.0...v0.3.1 +[0.3.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.5...v0.3.0 +[0.2.5]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.4...v0.2.5 +[0.2.4]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.3...v0.2.4 +[0.2.3]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.2...v0.2.3 +[0.2.2]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.1...v0.2.2 +[0.2.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.0...v0.2.1 +[0.2.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.1.0...v0.2.0 diff --git a/vendor/github.com/cyphar/filepath-securejoin/LICENSE b/vendor/github.com/cyphar/filepath-securejoin/LICENSE index bec842f29..cb1ab88da 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/LICENSE +++ b/vendor/github.com/cyphar/filepath-securejoin/LICENSE @@ -1,5 +1,5 @@ Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. -Copyright (C) 2017 SUSE LLC. All rights reserved. +Copyright (C) 2017-2024 SUSE LLC. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md index 4eca0f235..eaeb53fcd 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/README.md +++ b/vendor/github.com/cyphar/filepath-securejoin/README.md @@ -1,32 +1,26 @@ ## `filepath-securejoin` ## +[![Go Documentation](https://pkg.go.dev/badge/github.com/cyphar/filepath-securejoin.svg)](https://pkg.go.dev/github.com/cyphar/filepath-securejoin) [![Build Status](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml/badge.svg)](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml) -An implementation of `SecureJoin`, a [candidate for inclusion in the Go -standard library][go#20126]. The purpose of this function is to be a "secure" -alternative to `filepath.Join`, and in particular it provides certain -guarantees that are not provided by `filepath.Join`. +### Old API ### -> **NOTE**: This code is *only* safe if you are not at risk of other processes -> modifying path components after you've used `SecureJoin`. If it is possible -> for a malicious process to modify path components of the resolved path, then -> you will be vulnerable to some fairly trivial TOCTOU race conditions. [There -> are some Linux kernel patches I'm working on which might allow for a better -> solution.][lwn-obeneath] -> -> In addition, with a slightly modified API it might be possible to use -> `O_PATH` and verify that the opened path is actually the resolved one -- but -> I have not done that yet. I might add it in the future as a helper function -> to help users verify the path (we can't just return `/proc/self/fd/` -> because that doesn't always work transparently for all users). +This library was originally just an implementation of `SecureJoin` which was +[intended to be included in the Go standard library][go#20126] as a safer +`filepath.Join` that would restrict the path lookup to be inside a root +directory. -This is the function prototype: +The implementation was based on code that existed in several container +runtimes. Unfortunately, this API is **fundamentally unsafe** against attackers +that can modify path components after `SecureJoin` returns and before the +caller uses the path, allowing for some fairly trivial TOCTOU attacks. -```go -func SecureJoin(root, unsafePath string) (string, error) -``` +`SecureJoin` (and `SecureJoinVFS`) are still provided by this library to +support legacy users, but new users are strongly suggested to avoid using +`SecureJoin` and instead use the [new api](#new-api) or switch to +[libpathrs][libpathrs]. -This library **guarantees** the following: +With the above limitations in mind, this library guarantees the following: * If no error is set, the resulting string **must** be a child path of `root` and will not contain any symlink path components (they will all be @@ -47,7 +41,7 @@ This library **guarantees** the following: A (trivial) implementation of this function on GNU/Linux systems could be done with the following (note that this requires root privileges and is far more opaque than the implementation in this library, and also requires that -`readlink` is inside the `root` path): +`readlink` is inside the `root` path and is trustworthy): ```go package securejoin @@ -70,9 +64,105 @@ func SecureJoin(root, unsafePath string) (string, error) { } ``` -[lwn-obeneath]: https://lwn.net/Articles/767547/ +[libpathrs]: https://github.com/openSUSE/libpathrs [go#20126]: https://github.com/golang/go/issues/20126 +### New API ### + +While we recommend users switch to [libpathrs][libpathrs] as soon as it has a +stable release, some methods implemented by libpathrs have been ported to this +library to ease the transition. These APIs are only supported on Linux. + +These APIs are implemented such that `filepath-securejoin` will +opportunistically use certain newer kernel APIs that make these operations far +more secure. In particular: + +* All of the lookup operations will use [`openat2`][openat2.2] on new enough + kernels (Linux 5.6 or later) to restrict lookups through magic-links and + bind-mounts (for certain operations) and to make use of `RESOLVE_IN_ROOT` to + efficiently resolve symlinks within a rootfs. + +* The APIs provide hardening against a malicious `/proc` mount to either detect + or avoid being tricked by a `/proc` that is not legitimate. This is done + using [`openat2`][openat2.2] for all users, and privileged users will also be + further protected by using [`fsopen`][fsopen.2] and [`open_tree`][open_tree.2] + (Linux 5.2 or later). + +[openat2.2]: https://www.man7.org/linux/man-pages/man2/openat2.2.html +[fsopen.2]: https://github.com/brauner/man-pages-md/blob/main/fsopen.md +[open_tree.2]: https://github.com/brauner/man-pages-md/blob/main/open_tree.md + +#### `OpenInRoot` #### + +```go +func OpenInRoot(root, unsafePath string) (*os.File, error) +func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) +func Reopen(handle *os.File, flags int) (*os.File, error) +``` + +`OpenInRoot` is a much safer version of + +```go +path, err := securejoin.SecureJoin(root, unsafePath) +file, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC) +``` + +that protects against various race attacks that could lead to serious security +issues, depending on the application. Note that the returned `*os.File` is an +`O_PATH` file descriptor, which is quite restricted. Callers will probably need +to use `Reopen` to get a more usable handle (this split is done to provide +useful features like PTY spawning and to avoid users accidentally opening bad +inodes that could cause a DoS). + +Callers need to be careful in how they use the returned `*os.File`. Usually it +is only safe to operate on the handle directly, and it is very easy to create a +security issue. [libpathrs][libpathrs] provides far more helpers to make using +these handles safer -- there is currently no plan to port them to +`filepath-securejoin`. + +`OpenatInRoot` is like `OpenInRoot` except that the root is provided using an +`*os.File`. This allows you to ensure that multiple `OpenatInRoot` (or +`MkdirAllHandle`) calls are operating on the same rootfs. + +> **NOTE**: Unlike `SecureJoin`, `OpenInRoot` will error out as soon as it hits +> a dangling symlink or non-existent path. This is in contrast to `SecureJoin` +> which treated non-existent components as though they were real directories, +> and would allow for partial resolution of dangling symlinks. These behaviours +> are at odds with how Linux treats non-existent paths and dangling symlinks, +> and so these are no longer allowed. + +#### `MkdirAll` #### + +```go +func MkdirAll(root, unsafePath string, mode int) error +func MkdirAllHandle(root *os.File, unsafePath string, mode int) (*os.File, error) +``` + +`MkdirAll` is a much safer version of + +```go +path, err := securejoin.SecureJoin(root, unsafePath) +err = os.MkdirAll(path, mode) +``` + +that protects against the same kinds of races that `OpenInRoot` protects +against. + +`MkdirAllHandle` is like `MkdirAll` except that the root is provided using an +`*os.File` (the reason for this is the same as with `OpenatInRoot`) and an +`*os.File` of the final created directory is returned (this directory is +guaranteed to be effectively identical to the directory created by +`MkdirAllHandle`, which is not possible to ensure by just using `OpenatInRoot` +after `MkdirAll`). + +> **NOTE**: Unlike `SecureJoin`, `MkdirAll` will error out as soon as it hits +> a dangling symlink or non-existent path. This is in contrast to `SecureJoin` +> which treated non-existent components as though they were real directories, +> and would allow for partial resolution of dangling symlinks. These behaviours +> are at odds with how Linux treats non-existent paths and dangling symlinks, +> and so these are no longer allowed. This means that `MkdirAll` will not +> create non-existent directories referenced by a dangling symlink. + ### License ### The license of this project is the same as Go, which is a BSD 3-clause license diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION index abd410582..449d7e73a 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/VERSION +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -1 +1 @@ -0.2.4 +0.3.6 diff --git a/vendor/github.com/cyphar/filepath-securejoin/doc.go b/vendor/github.com/cyphar/filepath-securejoin/doc.go new file mode 100644 index 000000000..1ec7d065e --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/doc.go @@ -0,0 +1,39 @@ +// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +// Copyright (C) 2017-2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package securejoin implements a set of helpers to make it easier to write Go +// code that is safe against symlink-related escape attacks. The primary idea +// is to let you resolve a path within a rootfs directory as if the rootfs was +// a chroot. +// +// securejoin has two APIs, a "legacy" API and a "modern" API. +// +// The legacy API is [SecureJoin] and [SecureJoinVFS]. These methods are +// **not** safe against race conditions where an attacker changes the +// filesystem after (or during) the [SecureJoin] operation. +// +// The new API is made up of [OpenInRoot] and [MkdirAll] (and derived +// functions). These are safe against racing attackers and have several other +// protections that are not provided by the legacy API. There are many more +// operations that most programs expect to be able to do safely, but we do not +// provide explicit support for them because we want to encourage users to +// switch to [libpathrs](https://github.com/openSUSE/libpathrs) which is a +// cross-language next-generation library that is entirely designed around +// operating on paths safely. +// +// securejoin has been used by several container runtimes (Docker, runc, +// Kubernetes, etc) for quite a few years as a de-facto standard for operating +// on container filesystem paths "safely". However, most users still use the +// legacy API which is unsafe against various attacks (there is a fairly long +// history of CVEs in dependent as a result). Users should switch to the modern +// API as soon as possible (or even better, switch to libpathrs). +// +// This project was initially intended to be included in the Go standard +// library, but [it was rejected](https://go.dev/issue/20126). There is now a +// [new Go proposal](https://go.dev/issue/67002) for a safe path resolution API +// that shares some of the goals of filepath-securejoin. However, that design +// is intended to work like `openat2(RESOLVE_BENEATH)` which does not fit the +// usecase of container runtimes and most system tools. +package securejoin diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go b/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go new file mode 100644 index 000000000..42452bbf9 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go @@ -0,0 +1,18 @@ +//go:build linux && go1.20 + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "fmt" +) + +// wrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except +// that on pre-1.20 Go versions only errors.Is() works properly (errors.Unwrap) +// is only guaranteed to give you baseErr. +func wrapBaseError(baseErr, extraErr error) error { + return fmt.Errorf("%w: %w", extraErr, baseErr) +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go b/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go new file mode 100644 index 000000000..e7adca3fd --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go @@ -0,0 +1,38 @@ +//go:build linux && !go1.20 + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "fmt" +) + +type wrappedError struct { + inner error + isError error +} + +func (err wrappedError) Is(target error) bool { + return err.isError == target +} + +func (err wrappedError) Unwrap() error { + return err.inner +} + +func (err wrappedError) Error() string { + return fmt.Sprintf("%v: %v", err.isError, err.inner) +} + +// wrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except +// that on pre-1.20 Go versions only errors.Is() works properly (errors.Unwrap) +// is only guaranteed to give you baseErr. +func wrapBaseError(baseErr, extraErr error) error { + return wrappedError{ + inner: baseErr, + isError: extraErr, + } +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go b/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go new file mode 100644 index 000000000..ddd6fa9a4 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go @@ -0,0 +1,32 @@ +//go:build linux && go1.21 + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "slices" + "sync" +) + +func slices_DeleteFunc[S ~[]E, E any](slice S, delFn func(E) bool) S { + return slices.DeleteFunc(slice, delFn) +} + +func slices_Contains[S ~[]E, E comparable](slice S, val E) bool { + return slices.Contains(slice, val) +} + +func slices_Clone[S ~[]E, E any](slice S) S { + return slices.Clone(slice) +} + +func sync_OnceValue[T any](f func() T) func() T { + return sync.OnceValue(f) +} + +func sync_OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) { + return sync.OnceValues(f) +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go b/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go new file mode 100644 index 000000000..f1e6fe7e7 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go @@ -0,0 +1,124 @@ +//go:build linux && !go1.21 + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "sync" +) + +// These are very minimal implementations of functions that appear in Go 1.21's +// stdlib, included so that we can build on older Go versions. Most are +// borrowed directly from the stdlib, and a few are modified to be "obviously +// correct" without needing to copy too many other helpers. + +// clearSlice is equivalent to the builtin clear from Go 1.21. +// Copied from the Go 1.24 stdlib implementation. +func clearSlice[S ~[]E, E any](slice S) { + var zero E + for i := range slice { + slice[i] = zero + } +} + +// Copied from the Go 1.24 stdlib implementation. +func slices_IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { + for i := range s { + if f(s[i]) { + return i + } + } + return -1 +} + +// Copied from the Go 1.24 stdlib implementation. +func slices_DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { + i := slices_IndexFunc(s, del) + if i == -1 { + return s + } + // Don't start copying elements until we find one to delete. + for j := i + 1; j < len(s); j++ { + if v := s[j]; !del(v) { + s[i] = v + i++ + } + } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC + return s[:i] +} + +// Similar to the stdlib slices.Contains, except that we don't have +// slices.Index so we need to use slices.IndexFunc for this non-Func helper. +func slices_Contains[S ~[]E, E comparable](s S, v E) bool { + return slices_IndexFunc(s, func(e E) bool { return e == v }) >= 0 +} + +// Copied from the Go 1.24 stdlib implementation. +func slices_Clone[S ~[]E, E any](s S) S { + // Preserve nil in case it matters. + if s == nil { + return nil + } + return append(S([]E{}), s...) +} + +// Copied from the Go 1.24 stdlib implementation. +func sync_OnceValue[T any](f func() T) func() T { + var ( + once sync.Once + valid bool + p any + result T + ) + g := func() { + defer func() { + p = recover() + if !valid { + panic(p) + } + }() + result = f() + f = nil + valid = true + } + return func() T { + once.Do(g) + if !valid { + panic(p) + } + return result + } +} + +// Copied from the Go 1.24 stdlib implementation. +func sync_OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) { + var ( + once sync.Once + valid bool + p any + r1 T1 + r2 T2 + ) + g := func() { + defer func() { + p = recover() + if !valid { + panic(p) + } + }() + r1, r2 = f() + f = nil + valid = true + } + return func() (T1, T2) { + once.Do(g) + if !valid { + panic(p) + } + return r1, r2 + } +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go index aa32b85fb..e0ee3f2b5 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/join.go +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -1,17 +1,11 @@ // Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. -// Copyright (C) 2017 SUSE LLC. All rights reserved. +// Copyright (C) 2017-2024 SUSE LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package securejoin is an implementation of the hopefully-soon-to-be-included -// SecureJoin helper that is meant to be part of the "path/filepath" package. -// The purpose of this project is to provide a PoC implementation to make the -// SecureJoin proposal (https://github.com/golang/go/issues/20126) more -// tangible. package securejoin import ( - "bytes" "errors" "os" "path/filepath" @@ -19,26 +13,34 @@ import ( "syscall" ) +const maxSymlinkLimit = 255 + // IsNotExist tells you if err is an error that implies that either the path // accessed does not exist (or path components don't exist). This is -// effectively a more broad version of os.IsNotExist. +// effectively a more broad version of [os.IsNotExist]. func IsNotExist(err error) bool { // Check that it's not actually an ENOTDIR, which in some cases is a more // convoluted case of ENOENT (usually involving weird paths). return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT) } -// SecureJoinVFS joins the two given path components (similar to Join) except +// SecureJoinVFS joins the two given path components (similar to [filepath.Join]) except // that the returned path is guaranteed to be scoped inside the provided root // path (when evaluated). Any symbolic links in the path are evaluated with the // given root treated as the root of the filesystem, similar to a chroot. The -// filesystem state is evaluated through the given VFS interface (if nil, the -// standard os.* family of functions are used). +// filesystem state is evaluated through the given [VFS] interface (if nil, the +// standard [os].* family of functions are used). // // Note that the guarantees provided by this function only apply if the path // components in the returned string are not modified (in other words are not // replaced with symlinks on the filesystem) after this function has returned. -// Such a symlink race is necessarily out-of-scope of SecureJoin. +// Such a symlink race is necessarily out-of-scope of SecureJoinVFS. +// +// NOTE: Due to the above limitation, Linux users are strongly encouraged to +// use [OpenInRoot] instead, which does safely protect against these kinds of +// attacks. There is no way to solve this problem with SecureJoinVFS because +// the API is fundamentally wrong (you cannot return a "safe" path string and +// guarantee it won't be modified afterwards). // // Volume names in unsafePath are always discarded, regardless if they are // provided via direct input or when evaluating symlinks. Therefore: @@ -51,75 +53,73 @@ func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { } unsafePath = filepath.FromSlash(unsafePath) - var path bytes.Buffer - n := 0 - for unsafePath != "" { - if n > 255 { - return "", &os.PathError{Op: "SecureJoin", Path: root + string(filepath.Separator) + unsafePath, Err: syscall.ELOOP} + var ( + currentPath string + remainingPath = unsafePath + linksWalked int + ) + for remainingPath != "" { + if v := filepath.VolumeName(remainingPath); v != "" { + remainingPath = remainingPath[len(v):] } - if v := filepath.VolumeName(unsafePath); v != "" { - unsafePath = unsafePath[len(v):] - } - - // Next path component, p. - i := strings.IndexRune(unsafePath, filepath.Separator) - var p string - if i == -1 { - p, unsafePath = unsafePath, "" + // Get the next path component. + var part string + if i := strings.IndexRune(remainingPath, filepath.Separator); i == -1 { + part, remainingPath = remainingPath, "" } else { - p, unsafePath = unsafePath[:i], unsafePath[i+1:] + part, remainingPath = remainingPath[:i], remainingPath[i+1:] } - // Create a cleaned path, using the lexical semantics of /../a, to - // create a "scoped" path component which can safely be joined to fullP - // for evaluation. At this point, path.String() doesn't contain any - // symlink components. - cleanP := filepath.Clean(string(filepath.Separator) + path.String() + p) - if cleanP == string(filepath.Separator) { - path.Reset() + // Apply the component lexically to the path we are building. + // currentPath does not contain any symlinks, and we are lexically + // dealing with a single component, so it's okay to do a filepath.Clean + // here. + nextPath := filepath.Join(string(filepath.Separator), currentPath, part) + if nextPath == string(filepath.Separator) { + currentPath = "" continue } - fullP := filepath.Clean(root + cleanP) + fullPath := root + string(filepath.Separator) + nextPath // Figure out whether the path is a symlink. - fi, err := vfs.Lstat(fullP) + fi, err := vfs.Lstat(fullPath) if err != nil && !IsNotExist(err) { return "", err } // Treat non-existent path components the same as non-symlinks (we // can't do any better here). if IsNotExist(err) || fi.Mode()&os.ModeSymlink == 0 { - path.WriteString(p) - path.WriteRune(filepath.Separator) + currentPath = nextPath continue } - // Only increment when we actually dereference a link. - n++ + // It's a symlink, so get its contents and expand it by prepending it + // to the yet-unparsed path. + linksWalked++ + if linksWalked > maxSymlinkLimit { + return "", &os.PathError{Op: "SecureJoin", Path: root + string(filepath.Separator) + unsafePath, Err: syscall.ELOOP} + } - // It's a symlink, expand it by prepending it to the yet-unparsed path. - dest, err := vfs.Readlink(fullP) + dest, err := vfs.Readlink(fullPath) if err != nil { return "", err } + remainingPath = dest + string(filepath.Separator) + remainingPath // Absolute symlinks reset any work we've already done. if filepath.IsAbs(dest) { - path.Reset() + currentPath = "" } - unsafePath = dest + string(filepath.Separator) + unsafePath } - // We have to clean path.String() here because it may contain '..' - // components that are entirely lexical, but would be misleading otherwise. - // And finally do a final clean to ensure that root is also lexically - // clean. - fullP := filepath.Clean(string(filepath.Separator) + path.String()) - return filepath.Clean(root + fullP), nil + // There should be no lexical components like ".." left in the path here, + // but for safety clean up the path before joining it to the root. + finalPath := filepath.Join(string(filepath.Separator), currentPath) + return filepath.Join(root, finalPath), nil } -// SecureJoin is a wrapper around SecureJoinVFS that just uses the os.* library -// of functions as the VFS. If in doubt, use this function over SecureJoinVFS. +// SecureJoin is a wrapper around [SecureJoinVFS] that just uses the [os].* library +// of functions as the [VFS]. If in doubt, use this function over [SecureJoinVFS]. func SecureJoin(root, unsafePath string) (string, error) { return SecureJoinVFS(root, unsafePath, nil) } diff --git a/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go b/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go new file mode 100644 index 000000000..be81e498d --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go @@ -0,0 +1,388 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "errors" + "fmt" + "os" + "path" + "path/filepath" + "strings" + + "golang.org/x/sys/unix" +) + +type symlinkStackEntry struct { + // (dir, remainingPath) is what we would've returned if the link didn't + // exist. This matches what openat2(RESOLVE_IN_ROOT) would return in + // this case. + dir *os.File + remainingPath string + // linkUnwalked is the remaining path components from the original + // Readlink which we have yet to walk. When this slice is empty, we + // drop the link from the stack. + linkUnwalked []string +} + +func (se symlinkStackEntry) String() string { + return fmt.Sprintf("<%s>/%s [->%s]", se.dir.Name(), se.remainingPath, strings.Join(se.linkUnwalked, "/")) +} + +func (se symlinkStackEntry) Close() { + _ = se.dir.Close() +} + +type symlinkStack []*symlinkStackEntry + +func (s *symlinkStack) IsEmpty() bool { + return s == nil || len(*s) == 0 +} + +func (s *symlinkStack) Close() { + if s != nil { + for _, link := range *s { + link.Close() + } + // TODO: Switch to clear once we switch to Go 1.21. + *s = nil + } +} + +var ( + errEmptyStack = errors.New("[internal] stack is empty") + errBrokenSymlinkStack = errors.New("[internal error] broken symlink stack") +) + +func (s *symlinkStack) popPart(part string) error { + if s == nil || s.IsEmpty() { + // If there is nothing in the symlink stack, then the part was from the + // real path provided by the user, and this is a no-op. + return errEmptyStack + } + if part == "." { + // "." components are no-ops -- we drop them when doing SwapLink. + return nil + } + + tailEntry := (*s)[len(*s)-1] + + // Double-check that we are popping the component we expect. + if len(tailEntry.linkUnwalked) == 0 { + return fmt.Errorf("%w: trying to pop component %q of empty stack entry %s", errBrokenSymlinkStack, part, tailEntry) + } + headPart := tailEntry.linkUnwalked[0] + if headPart != part { + return fmt.Errorf("%w: trying to pop component %q but the last stack entry is %s (%q)", errBrokenSymlinkStack, part, tailEntry, headPart) + } + + // Drop the component, but keep the entry around in case we are dealing + // with a "tail-chained" symlink. + tailEntry.linkUnwalked = tailEntry.linkUnwalked[1:] + return nil +} + +func (s *symlinkStack) PopPart(part string) error { + if err := s.popPart(part); err != nil { + if errors.Is(err, errEmptyStack) { + // Skip empty stacks. + err = nil + } + return err + } + + // Clean up any of the trailing stack entries that are empty. + for lastGood := len(*s) - 1; lastGood >= 0; lastGood-- { + entry := (*s)[lastGood] + if len(entry.linkUnwalked) > 0 { + break + } + entry.Close() + (*s) = (*s)[:lastGood] + } + return nil +} + +func (s *symlinkStack) push(dir *os.File, remainingPath, linkTarget string) error { + if s == nil { + return nil + } + // Split the link target and clean up any "" parts. + linkTargetParts := slices_DeleteFunc( + strings.Split(linkTarget, "/"), + func(part string) bool { return part == "" || part == "." }) + + // Copy the directory so the caller doesn't close our copy. + dirCopy, err := dupFile(dir) + if err != nil { + return err + } + + // Add to the stack. + *s = append(*s, &symlinkStackEntry{ + dir: dirCopy, + remainingPath: remainingPath, + linkUnwalked: linkTargetParts, + }) + return nil +} + +func (s *symlinkStack) SwapLink(linkPart string, dir *os.File, remainingPath, linkTarget string) error { + // If we are currently inside a symlink resolution, remove the symlink + // component from the last symlink entry, but don't remove the entry even + // if it's empty. If we are a "tail-chained" symlink (a trailing symlink we + // hit during a symlink resolution) we need to keep the old symlink until + // we finish the resolution. + if err := s.popPart(linkPart); err != nil { + if !errors.Is(err, errEmptyStack) { + return err + } + // Push the component regardless of whether the stack was empty. + } + return s.push(dir, remainingPath, linkTarget) +} + +func (s *symlinkStack) PopTopSymlink() (*os.File, string, bool) { + if s == nil || s.IsEmpty() { + return nil, "", false + } + tailEntry := (*s)[0] + *s = (*s)[1:] + return tailEntry.dir, tailEntry.remainingPath, true +} + +// partialLookupInRoot tries to lookup as much of the request path as possible +// within the provided root (a-la RESOLVE_IN_ROOT) and opens the final existing +// component of the requested path, returning a file handle to the final +// existing component and a string containing the remaining path components. +func partialLookupInRoot(root *os.File, unsafePath string) (*os.File, string, error) { + return lookupInRoot(root, unsafePath, true) +} + +func completeLookupInRoot(root *os.File, unsafePath string) (*os.File, error) { + handle, remainingPath, err := lookupInRoot(root, unsafePath, false) + if remainingPath != "" && err == nil { + // should never happen + err = fmt.Errorf("[bug] non-empty remaining path when doing a non-partial lookup: %q", remainingPath) + } + // lookupInRoot(partial=false) will always close the handle if an error is + // returned, so no need to double-check here. + return handle, err +} + +func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.File, _ string, _ error) { + unsafePath = filepath.ToSlash(unsafePath) // noop + + // This is very similar to SecureJoin, except that we operate on the + // components using file descriptors. We then return the last component we + // managed open, along with the remaining path components not opened. + + // Try to use openat2 if possible. + if hasOpenat2() { + return lookupOpenat2(root, unsafePath, partial) + } + + // Get the "actual" root path from /proc/self/fd. This is necessary if the + // root is some magic-link like /proc/$pid/root, in which case we want to + // make sure when we do checkProcSelfFdPath that we are using the correct + // root path. + logicalRootPath, err := procSelfFdReadlink(root) + if err != nil { + return nil, "", fmt.Errorf("get real root path: %w", err) + } + + currentDir, err := dupFile(root) + if err != nil { + return nil, "", fmt.Errorf("clone root fd: %w", err) + } + defer func() { + // If a handle is not returned, close the internal handle. + if Handle == nil { + _ = currentDir.Close() + } + }() + + // symlinkStack is used to emulate how openat2(RESOLVE_IN_ROOT) treats + // dangling symlinks. If we hit a non-existent path while resolving a + // symlink, we need to return the (dir, remainingPath) that we had when we + // hit the symlink (treating the symlink as though it were a regular file). + // The set of (dir, remainingPath) sets is stored within the symlinkStack + // and we add and remove parts when we hit symlink and non-symlink + // components respectively. We need a stack because of recursive symlinks + // (symlinks that contain symlink components in their target). + // + // Note that the stack is ONLY used for book-keeping. All of the actual + // path walking logic is still based on currentPath/remainingPath and + // currentDir (as in SecureJoin). + var symStack *symlinkStack + if partial { + symStack = new(symlinkStack) + defer symStack.Close() + } + + var ( + linksWalked int + currentPath string + remainingPath = unsafePath + ) + for remainingPath != "" { + // Save the current remaining path so if the part is not real we can + // return the path including the component. + oldRemainingPath := remainingPath + + // Get the next path component. + var part string + if i := strings.IndexByte(remainingPath, '/'); i == -1 { + part, remainingPath = remainingPath, "" + } else { + part, remainingPath = remainingPath[:i], remainingPath[i+1:] + } + // If we hit an empty component, we need to treat it as though it is + // "." so that trailing "/" and "//" components on a non-directory + // correctly return the right error code. + if part == "" { + part = "." + } + + // Apply the component lexically to the path we are building. + // currentPath does not contain any symlinks, and we are lexically + // dealing with a single component, so it's okay to do a filepath.Clean + // here. + nextPath := path.Join("/", currentPath, part) + // If we logically hit the root, just clone the root rather than + // opening the part and doing all of the other checks. + if nextPath == "/" { + if err := symStack.PopPart(part); err != nil { + return nil, "", fmt.Errorf("walking into root with part %q failed: %w", part, err) + } + // Jump to root. + rootClone, err := dupFile(root) + if err != nil { + return nil, "", fmt.Errorf("clone root fd: %w", err) + } + _ = currentDir.Close() + currentDir = rootClone + currentPath = nextPath + continue + } + + // Try to open the next component. + nextDir, err := openatFile(currentDir, part, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) + switch { + case err == nil: + st, err := nextDir.Stat() + if err != nil { + _ = nextDir.Close() + return nil, "", fmt.Errorf("stat component %q: %w", part, err) + } + + switch st.Mode() & os.ModeType { + case os.ModeSymlink: + // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See + // Linux commit 65cfc6722361 ("readlinkat(), fchownat() and + // fstatat() with empty relative pathnames"). + linkDest, err := readlinkatFile(nextDir, "") + // We don't need the handle anymore. + _ = nextDir.Close() + if err != nil { + return nil, "", err + } + + linksWalked++ + if linksWalked > maxSymlinkLimit { + return nil, "", &os.PathError{Op: "securejoin.lookupInRoot", Path: logicalRootPath + "/" + unsafePath, Err: unix.ELOOP} + } + + // Swap out the symlink's component for the link entry itself. + if err := symStack.SwapLink(part, currentDir, oldRemainingPath, linkDest); err != nil { + return nil, "", fmt.Errorf("walking into symlink %q failed: push symlink: %w", part, err) + } + + // Update our logical remaining path. + remainingPath = linkDest + "/" + remainingPath + // Absolute symlinks reset any work we've already done. + if path.IsAbs(linkDest) { + // Jump to root. + rootClone, err := dupFile(root) + if err != nil { + return nil, "", fmt.Errorf("clone root fd: %w", err) + } + _ = currentDir.Close() + currentDir = rootClone + currentPath = "/" + } + + default: + // If we are dealing with a directory, simply walk into it. + _ = currentDir.Close() + currentDir = nextDir + currentPath = nextPath + + // The part was real, so drop it from the symlink stack. + if err := symStack.PopPart(part); err != nil { + return nil, "", fmt.Errorf("walking into directory %q failed: %w", part, err) + } + + // If we are operating on a .., make sure we haven't escaped. + // We only have to check for ".." here because walking down + // into a regular component component cannot cause you to + // escape. This mirrors the logic in RESOLVE_IN_ROOT, except we + // have to check every ".." rather than only checking after a + // rename or mount on the system. + if part == ".." { + // Make sure the root hasn't moved. + if err := checkProcSelfFdPath(logicalRootPath, root); err != nil { + return nil, "", fmt.Errorf("root path moved during lookup: %w", err) + } + // Make sure the path is what we expect. + fullPath := logicalRootPath + nextPath + if err := checkProcSelfFdPath(fullPath, currentDir); err != nil { + return nil, "", fmt.Errorf("walking into %q had unexpected result: %w", part, err) + } + } + } + + default: + if !partial { + return nil, "", err + } + // If there are any remaining components in the symlink stack, we + // are still within a symlink resolution and thus we hit a dangling + // symlink. So pretend that the first symlink in the stack we hit + // was an ENOENT (to match openat2). + if oldDir, remainingPath, ok := symStack.PopTopSymlink(); ok { + _ = currentDir.Close() + return oldDir, remainingPath, err + } + // We have hit a final component that doesn't exist, so we have our + // partial open result. Note that we have to use the OLD remaining + // path, since the lookup failed. + return currentDir, oldRemainingPath, err + } + } + + // If the unsafePath had a trailing slash, we need to make sure we try to + // do a relative "." open so that we will correctly return an error when + // the final component is a non-directory (to match openat2). In the + // context of openat2, a trailing slash and a trailing "/." are completely + // equivalent. + if strings.HasSuffix(unsafePath, "/") { + nextDir, err := openatFile(currentDir, ".", unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) + if err != nil { + if !partial { + _ = currentDir.Close() + currentDir = nil + } + return currentDir, "", err + } + _ = currentDir.Close() + currentDir = nextDir + } + + // All of the components existed! + return currentDir, "", nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go b/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go new file mode 100644 index 000000000..5e559bb7a --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go @@ -0,0 +1,215 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "golang.org/x/sys/unix" +) + +var ( + errInvalidMode = errors.New("invalid permission mode") + errPossibleAttack = errors.New("possible attack detected") +) + +// MkdirAllHandle is equivalent to [MkdirAll], except that it is safer to use +// in two respects: +// +// - The caller provides the root directory as an *[os.File] (preferably O_PATH) +// handle. This means that the caller can be sure which root directory is +// being used. Note that this can be emulated by using /proc/self/fd/... as +// the root path with [os.MkdirAll]. +// +// - Once all of the directories have been created, an *[os.File] O_PATH handle +// to the directory at unsafePath is returned to the caller. This is done in +// an effectively-race-free way (an attacker would only be able to swap the +// final directory component), which is not possible to emulate with +// [MkdirAll]. +// +// In addition, the returned handle is obtained far more efficiently than doing +// a brand new lookup of unsafePath (such as with [SecureJoin] or openat2) after +// doing [MkdirAll]. If you intend to open the directory after creating it, you +// should use MkdirAllHandle. +func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err error) { + // Make sure there are no os.FileMode bits set. + if mode&^0o7777 != 0 { + return nil, fmt.Errorf("%w for mkdir 0o%.3o", errInvalidMode, mode) + } + // On Linux, mkdirat(2) (and os.Mkdir) silently ignore the suid and sgid + // bits. We could also silently ignore them but since we have very few + // users it seems more prudent to return an error so users notice that + // these bits will not be set. + if mode&^0o1777 != 0 { + return nil, fmt.Errorf("%w for mkdir 0o%.3o: suid and sgid are ignored by mkdir", errInvalidMode, mode) + } + + // Try to open as much of the path as possible. + currentDir, remainingPath, err := partialLookupInRoot(root, unsafePath) + defer func() { + if Err != nil { + _ = currentDir.Close() + } + }() + if err != nil && !errors.Is(err, unix.ENOENT) { + return nil, fmt.Errorf("find existing subpath of %q: %w", unsafePath, err) + } + + // If there is an attacker deleting directories as we walk into them, + // detect this proactively. Note this is guaranteed to detect if the + // attacker deleted any part of the tree up to currentDir. + // + // Once we walk into a dead directory, partialLookupInRoot would not be + // able to walk further down the tree (directories must be empty before + // they are deleted), and if the attacker has removed the entire tree we + // can be sure that anything that was originally inside a dead directory + // must also be deleted and thus is a dead directory in its own right. + // + // This is mostly a quality-of-life check, because mkdir will simply fail + // later if the attacker deletes the tree after this check. + if err := isDeadInode(currentDir); err != nil { + return nil, fmt.Errorf("finding existing subpath of %q: %w", unsafePath, err) + } + + // Re-open the path to match the O_DIRECTORY reopen loop later (so that we + // always return a non-O_PATH handle). We also check that we actually got a + // directory. + if reopenDir, err := Reopen(currentDir, unix.O_DIRECTORY|unix.O_CLOEXEC); errors.Is(err, unix.ENOTDIR) { + return nil, fmt.Errorf("cannot create subdirectories in %q: %w", currentDir.Name(), unix.ENOTDIR) + } else if err != nil { + return nil, fmt.Errorf("re-opening handle to %q: %w", currentDir.Name(), err) + } else { + _ = currentDir.Close() + currentDir = reopenDir + } + + remainingParts := strings.Split(remainingPath, string(filepath.Separator)) + if slices_Contains(remainingParts, "..") { + // The path contained ".." components after the end of the "real" + // components. We could try to safely resolve ".." here but that would + // add a bunch of extra logic for something that it's not clear even + // needs to be supported. So just return an error. + // + // If we do filepath.Clean(remainingPath) then we end up with the + // problem that ".." can erase a trailing dangling symlink and produce + // a path that doesn't quite match what the user asked for. + return nil, fmt.Errorf("%w: yet-to-be-created path %q contains '..' components", unix.ENOENT, remainingPath) + } + + // Make sure the mode doesn't have any type bits. + mode &^= unix.S_IFMT + + // Create the remaining components. + for _, part := range remainingParts { + switch part { + case "", ".": + // Skip over no-op paths. + continue + } + + // NOTE: mkdir(2) will not follow trailing symlinks, so we can safely + // create the final component without worrying about symlink-exchange + // attacks. + // + // If we get -EEXIST, it's possible that another program created the + // directory at the same time as us. In that case, just continue on as + // if we created it (if the created inode is not a directory, the + // following open call will fail). + if err := unix.Mkdirat(int(currentDir.Fd()), part, uint32(mode)); err != nil && !errors.Is(err, unix.EEXIST) { + err = &os.PathError{Op: "mkdirat", Path: currentDir.Name() + "/" + part, Err: err} + // Make the error a bit nicer if the directory is dead. + if deadErr := isDeadInode(currentDir); deadErr != nil { + // TODO: Once we bump the minimum Go version to 1.20, we can use + // multiple %w verbs for this wrapping. For now we need to use a + // compatibility shim for older Go versions. + //err = fmt.Errorf("%w (%w)", err, deadErr) + err = wrapBaseError(err, deadErr) + } + return nil, err + } + + // Get a handle to the next component. O_DIRECTORY means we don't need + // to use O_PATH. + var nextDir *os.File + if hasOpenat2() { + nextDir, err = openat2File(currentDir, part, &unix.OpenHow{ + Flags: unix.O_NOFOLLOW | unix.O_DIRECTORY | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_NO_XDEV, + }) + } else { + nextDir, err = openatFile(currentDir, part, unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) + } + if err != nil { + return nil, err + } + _ = currentDir.Close() + currentDir = nextDir + + // It's possible that the directory we just opened was swapped by an + // attacker. Unfortunately there isn't much we can do to protect + // against this, and MkdirAll's behaviour is that we will reuse + // existing directories anyway so the need to protect against this is + // incredibly limited (and arguably doesn't even deserve mention here). + // + // Ideally we might want to check that the owner and mode match what we + // would've created -- unfortunately, it is non-trivial to verify that + // the owner and mode of the created directory match. While plain Unix + // DAC rules seem simple enough to emulate, there are a bunch of other + // factors that can change the mode or owner of created directories + // (default POSIX ACLs, mount options like uid=1,gid=2,umask=0 on + // filesystems like vfat, etc etc). We used to try to verify this but + // it just lead to a series of spurious errors. + // + // We could also check that the directory is non-empty, but + // unfortunately some pseduofilesystems (like cgroupfs) create + // non-empty directories, which would result in different spurious + // errors. + } + return currentDir, nil +} + +// MkdirAll is a race-safe alternative to the [os.MkdirAll] function, +// where the new directory is guaranteed to be within the root directory (if an +// attacker can move directories from inside the root to outside the root, the +// created directory tree might be outside of the root but the key constraint +// is that at no point will we walk outside of the directory tree we are +// creating). +// +// Effectively, MkdirAll(root, unsafePath, mode) is equivalent to +// +// path, _ := securejoin.SecureJoin(root, unsafePath) +// err := os.MkdirAll(path, mode) +// +// But is much safer. The above implementation is unsafe because if an attacker +// can modify the filesystem tree between [SecureJoin] and [os.MkdirAll], it is +// possible for MkdirAll to resolve unsafe symlink components and create +// directories outside of the root. +// +// If you plan to open the directory after you have created it or want to use +// an open directory handle as the root, you should use [MkdirAllHandle] instead. +// This function is a wrapper around [MkdirAllHandle]. +// +// NOTE: The mode argument must be set the unix mode bits (unix.S_I...), not +// the Go generic mode bits ([os.FileMode]...). +func MkdirAll(root, unsafePath string, mode int) error { + rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) + if err != nil { + return err + } + defer rootDir.Close() + + f, err := MkdirAllHandle(rootDir, unsafePath, mode) + if err != nil { + return err + } + _ = f.Close() + return nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/open_linux.go b/vendor/github.com/cyphar/filepath-securejoin/open_linux.go new file mode 100644 index 000000000..230be73f0 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/open_linux.go @@ -0,0 +1,103 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "fmt" + "os" + "strconv" + + "golang.org/x/sys/unix" +) + +// OpenatInRoot is equivalent to [OpenInRoot], except that the root is provided +// using an *[os.File] handle, to ensure that the correct root directory is used. +func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) { + handle, err := completeLookupInRoot(root, unsafePath) + if err != nil { + return nil, &os.PathError{Op: "securejoin.OpenInRoot", Path: unsafePath, Err: err} + } + return handle, nil +} + +// OpenInRoot safely opens the provided unsafePath within the root. +// Effectively, OpenInRoot(root, unsafePath) is equivalent to +// +// path, _ := securejoin.SecureJoin(root, unsafePath) +// handle, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC) +// +// But is much safer. The above implementation is unsafe because if an attacker +// can modify the filesystem tree between [SecureJoin] and [os.OpenFile], it is +// possible for the returned file to be outside of the root. +// +// Note that the returned handle is an O_PATH handle, meaning that only a very +// limited set of operations will work on the handle. This is done to avoid +// accidentally opening an untrusted file that could cause issues (such as a +// disconnected TTY that could cause a DoS, or some other issue). In order to +// use the returned handle, you can "upgrade" it to a proper handle using +// [Reopen]. +func OpenInRoot(root, unsafePath string) (*os.File, error) { + rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, err + } + defer rootDir.Close() + return OpenatInRoot(rootDir, unsafePath) +} + +// Reopen takes an *[os.File] handle and re-opens it through /proc/self/fd. +// Reopen(file, flags) is effectively equivalent to +// +// fdPath := fmt.Sprintf("/proc/self/fd/%d", file.Fd()) +// os.OpenFile(fdPath, flags|unix.O_CLOEXEC) +// +// But with some extra hardenings to ensure that we are not tricked by a +// maliciously-configured /proc mount. While this attack scenario is not +// common, in container runtimes it is possible for higher-level runtimes to be +// tricked into configuring an unsafe /proc that can be used to attack file +// operations. See [CVE-2019-19921] for more details. +// +// [CVE-2019-19921]: https://github.com/advisories/GHSA-fh74-hm69-rqjw +func Reopen(handle *os.File, flags int) (*os.File, error) { + procRoot, err := getProcRoot() + if err != nil { + return nil, err + } + + // We can't operate on /proc/thread-self/fd/$n directly when doing a + // re-open, so we need to open /proc/thread-self/fd and then open a single + // final component. + procFdDir, closer, err := procThreadSelf(procRoot, "fd/") + if err != nil { + return nil, fmt.Errorf("get safe /proc/thread-self/fd handle: %w", err) + } + defer procFdDir.Close() + defer closer() + + // Try to detect if there is a mount on top of the magic-link we are about + // to open. If we are using unsafeHostProcRoot(), this could change after + // we check it (and there's nothing we can do about that) but for + // privateProcRoot() this should be guaranteed to be safe (at least since + // Linux 5.12[1], when anonymous mount namespaces were completely isolated + // from external mounts including mount propagation events). + // + // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts + // onto targets that reside on shared mounts"). + fdStr := strconv.Itoa(int(handle.Fd())) + if err := checkSymlinkOvermount(procRoot, procFdDir, fdStr); err != nil { + return nil, fmt.Errorf("check safety of /proc/thread-self/fd/%s magiclink: %w", fdStr, err) + } + + flags |= unix.O_CLOEXEC + // Rather than just wrapping openatFile, open-code it so we can copy + // handle.Name(). + reopenFd, err := unix.Openat(int(procFdDir.Fd()), fdStr, flags, 0) + if err != nil { + return nil, fmt.Errorf("reopen fd %d: %w", handle.Fd(), err) + } + return os.NewFile(uintptr(reopenFd), handle.Name()), nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go b/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go new file mode 100644 index 000000000..f7a13e69c --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go @@ -0,0 +1,127 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "golang.org/x/sys/unix" +) + +var hasOpenat2 = sync_OnceValue(func() bool { + fd, err := unix.Openat2(unix.AT_FDCWD, ".", &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_IN_ROOT, + }) + if err != nil { + return false + } + _ = unix.Close(fd) + return true +}) + +func scopedLookupShouldRetry(how *unix.OpenHow, err error) bool { + // RESOLVE_IN_ROOT (and RESOLVE_BENEATH) can return -EAGAIN if we resolve + // ".." while a mount or rename occurs anywhere on the system. This could + // happen spuriously, or as the result of an attacker trying to mess with + // us during lookup. + // + // In addition, scoped lookups have a "safety check" at the end of + // complete_walk which will return -EXDEV if the final path is not in the + // root. + return how.Resolve&(unix.RESOLVE_IN_ROOT|unix.RESOLVE_BENEATH) != 0 && + (errors.Is(err, unix.EAGAIN) || errors.Is(err, unix.EXDEV)) +} + +const scopedLookupMaxRetries = 10 + +func openat2File(dir *os.File, path string, how *unix.OpenHow) (*os.File, error) { + fullPath := dir.Name() + "/" + path + // Make sure we always set O_CLOEXEC. + how.Flags |= unix.O_CLOEXEC + var tries int + for tries < scopedLookupMaxRetries { + fd, err := unix.Openat2(int(dir.Fd()), path, how) + if err != nil { + if scopedLookupShouldRetry(how, err) { + // We retry a couple of times to avoid the spurious errors, and + // if we are being attacked then returning -EAGAIN is the best + // we can do. + tries++ + continue + } + return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: err} + } + // If we are using RESOLVE_IN_ROOT, the name we generated may be wrong. + // NOTE: The procRoot code MUST NOT use RESOLVE_IN_ROOT, otherwise + // you'll get infinite recursion here. + if how.Resolve&unix.RESOLVE_IN_ROOT == unix.RESOLVE_IN_ROOT { + if actualPath, err := rawProcSelfFdReadlink(fd); err == nil { + fullPath = actualPath + } + } + return os.NewFile(uintptr(fd), fullPath), nil + } + return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: errPossibleAttack} +} + +func lookupOpenat2(root *os.File, unsafePath string, partial bool) (*os.File, string, error) { + if !partial { + file, err := openat2File(root, unsafePath, &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS, + }) + return file, "", err + } + return partialLookupOpenat2(root, unsafePath) +} + +// partialLookupOpenat2 is an alternative implementation of +// partialLookupInRoot, using openat2(RESOLVE_IN_ROOT) to more safely get a +// handle to the deepest existing child of the requested path within the root. +func partialLookupOpenat2(root *os.File, unsafePath string) (*os.File, string, error) { + // TODO: Implement this as a git-bisect-like binary search. + + unsafePath = filepath.ToSlash(unsafePath) // noop + endIdx := len(unsafePath) + var lastError error + for endIdx > 0 { + subpath := unsafePath[:endIdx] + + handle, err := openat2File(root, subpath, &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS, + }) + if err == nil { + // Jump over the slash if we have a non-"" remainingPath. + if endIdx < len(unsafePath) { + endIdx += 1 + } + // We found a subpath! + return handle, unsafePath[endIdx:], lastError + } + if errors.Is(err, unix.ENOENT) || errors.Is(err, unix.ENOTDIR) { + // That path doesn't exist, let's try the next directory up. + endIdx = strings.LastIndexByte(subpath, '/') + lastError = err + continue + } + return nil, "", fmt.Errorf("open subpath: %w", err) + } + // If we couldn't open anything, the whole subpath is missing. Return a + // copy of the root fd so that the caller doesn't close this one by + // accident. + rootClone, err := dupFile(root) + if err != nil { + return nil, "", err + } + return rootClone, unsafePath, lastError +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go b/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go new file mode 100644 index 000000000..949fb5f2d --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go @@ -0,0 +1,59 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "os" + "path/filepath" + + "golang.org/x/sys/unix" +) + +func dupFile(f *os.File) (*os.File, error) { + fd, err := unix.FcntlInt(f.Fd(), unix.F_DUPFD_CLOEXEC, 0) + if err != nil { + return nil, os.NewSyscallError("fcntl(F_DUPFD_CLOEXEC)", err) + } + return os.NewFile(uintptr(fd), f.Name()), nil +} + +func openatFile(dir *os.File, path string, flags int, mode int) (*os.File, error) { + // Make sure we always set O_CLOEXEC. + flags |= unix.O_CLOEXEC + fd, err := unix.Openat(int(dir.Fd()), path, flags, uint32(mode)) + if err != nil { + return nil, &os.PathError{Op: "openat", Path: dir.Name() + "/" + path, Err: err} + } + // All of the paths we use with openatFile(2) are guaranteed to be + // lexically safe, so we can use path.Join here. + fullPath := filepath.Join(dir.Name(), path) + return os.NewFile(uintptr(fd), fullPath), nil +} + +func fstatatFile(dir *os.File, path string, flags int) (unix.Stat_t, error) { + var stat unix.Stat_t + if err := unix.Fstatat(int(dir.Fd()), path, &stat, flags); err != nil { + return stat, &os.PathError{Op: "fstatat", Path: dir.Name() + "/" + path, Err: err} + } + return stat, nil +} + +func readlinkatFile(dir *os.File, path string) (string, error) { + size := 4096 + for { + linkBuf := make([]byte, size) + n, err := unix.Readlinkat(int(dir.Fd()), path, linkBuf) + if err != nil { + return "", &os.PathError{Op: "readlinkat", Path: dir.Name() + "/" + path, Err: err} + } + if n != size { + return string(linkBuf[:n]), nil + } + // Possible truncation, resize the buffer. + size *= 2 + } +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go b/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go new file mode 100644 index 000000000..809a579cb --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go @@ -0,0 +1,452 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "errors" + "fmt" + "os" + "runtime" + "strconv" + + "golang.org/x/sys/unix" +) + +func fstat(f *os.File) (unix.Stat_t, error) { + var stat unix.Stat_t + if err := unix.Fstat(int(f.Fd()), &stat); err != nil { + return stat, &os.PathError{Op: "fstat", Path: f.Name(), Err: err} + } + return stat, nil +} + +func fstatfs(f *os.File) (unix.Statfs_t, error) { + var statfs unix.Statfs_t + if err := unix.Fstatfs(int(f.Fd()), &statfs); err != nil { + return statfs, &os.PathError{Op: "fstatfs", Path: f.Name(), Err: err} + } + return statfs, nil +} + +// The kernel guarantees that the root inode of a procfs mount has an +// f_type of PROC_SUPER_MAGIC and st_ino of PROC_ROOT_INO. +const ( + procSuperMagic = 0x9fa0 // PROC_SUPER_MAGIC + procRootIno = 1 // PROC_ROOT_INO +) + +func verifyProcRoot(procRoot *os.File) error { + if statfs, err := fstatfs(procRoot); err != nil { + return err + } else if statfs.Type != procSuperMagic { + return fmt.Errorf("%w: incorrect procfs root filesystem type 0x%x", errUnsafeProcfs, statfs.Type) + } + if stat, err := fstat(procRoot); err != nil { + return err + } else if stat.Ino != procRootIno { + return fmt.Errorf("%w: incorrect procfs root inode number %d", errUnsafeProcfs, stat.Ino) + } + return nil +} + +var hasNewMountApi = sync_OnceValue(func() bool { + // All of the pieces of the new mount API we use (fsopen, fsconfig, + // fsmount, open_tree) were added together in Linux 5.1[1,2], so we can + // just check for one of the syscalls and the others should also be + // available. + // + // Just try to use open_tree(2) to open a file without OPEN_TREE_CLONE. + // This is equivalent to openat(2), but tells us if open_tree is + // available (and thus all of the other basic new mount API syscalls). + // open_tree(2) is most light-weight syscall to test here. + // + // [1]: merge commit 400913252d09 + // [2]: + fd, err := unix.OpenTree(-int(unix.EBADF), "/", unix.OPEN_TREE_CLOEXEC) + if err != nil { + return false + } + _ = unix.Close(fd) + return true +}) + +func fsopen(fsName string, flags int) (*os.File, error) { + // Make sure we always set O_CLOEXEC. + flags |= unix.FSOPEN_CLOEXEC + fd, err := unix.Fsopen(fsName, flags) + if err != nil { + return nil, os.NewSyscallError("fsopen "+fsName, err) + } + return os.NewFile(uintptr(fd), "fscontext:"+fsName), nil +} + +func fsmount(ctx *os.File, flags, mountAttrs int) (*os.File, error) { + // Make sure we always set O_CLOEXEC. + flags |= unix.FSMOUNT_CLOEXEC + fd, err := unix.Fsmount(int(ctx.Fd()), flags, mountAttrs) + if err != nil { + return nil, os.NewSyscallError("fsmount "+ctx.Name(), err) + } + return os.NewFile(uintptr(fd), "fsmount:"+ctx.Name()), nil +} + +func newPrivateProcMount() (*os.File, error) { + procfsCtx, err := fsopen("proc", unix.FSOPEN_CLOEXEC) + if err != nil { + return nil, err + } + defer procfsCtx.Close() + + // Try to configure hidepid=ptraceable,subset=pid if possible, but ignore errors. + _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "hidepid", "ptraceable") + _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "subset", "pid") + + // Get an actual handle. + if err := unix.FsconfigCreate(int(procfsCtx.Fd())); err != nil { + return nil, os.NewSyscallError("fsconfig create procfs", err) + } + return fsmount(procfsCtx, unix.FSMOUNT_CLOEXEC, unix.MS_RDONLY|unix.MS_NODEV|unix.MS_NOEXEC|unix.MS_NOSUID) +} + +func openTree(dir *os.File, path string, flags uint) (*os.File, error) { + dirFd := -int(unix.EBADF) + dirName := "." + if dir != nil { + dirFd = int(dir.Fd()) + dirName = dir.Name() + } + // Make sure we always set O_CLOEXEC. + flags |= unix.OPEN_TREE_CLOEXEC + fd, err := unix.OpenTree(dirFd, path, flags) + if err != nil { + return nil, &os.PathError{Op: "open_tree", Path: path, Err: err} + } + return os.NewFile(uintptr(fd), dirName+"/"+path), nil +} + +func clonePrivateProcMount() (_ *os.File, Err error) { + // Try to make a clone without using AT_RECURSIVE if we can. If this works, + // we can be sure there are no over-mounts and so if the root is valid then + // we're golden. Otherwise, we have to deal with over-mounts. + procfsHandle, err := openTree(nil, "/proc", unix.OPEN_TREE_CLONE) + if err != nil || hookForcePrivateProcRootOpenTreeAtRecursive(procfsHandle) { + procfsHandle, err = openTree(nil, "/proc", unix.OPEN_TREE_CLONE|unix.AT_RECURSIVE) + } + if err != nil { + return nil, fmt.Errorf("creating a detached procfs clone: %w", err) + } + defer func() { + if Err != nil { + _ = procfsHandle.Close() + } + }() + if err := verifyProcRoot(procfsHandle); err != nil { + return nil, err + } + return procfsHandle, nil +} + +func privateProcRoot() (*os.File, error) { + if !hasNewMountApi() || hookForceGetProcRootUnsafe() { + return nil, fmt.Errorf("new mount api: %w", unix.ENOTSUP) + } + // Try to create a new procfs mount from scratch if we can. This ensures we + // can get a procfs mount even if /proc is fake (for whatever reason). + procRoot, err := newPrivateProcMount() + if err != nil || hookForcePrivateProcRootOpenTree(procRoot) { + // Try to clone /proc then... + procRoot, err = clonePrivateProcMount() + } + return procRoot, err +} + +func unsafeHostProcRoot() (_ *os.File, Err error) { + procRoot, err := os.OpenFile("/proc", unix.O_PATH|unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, err + } + defer func() { + if Err != nil { + _ = procRoot.Close() + } + }() + if err := verifyProcRoot(procRoot); err != nil { + return nil, err + } + return procRoot, nil +} + +func doGetProcRoot() (*os.File, error) { + procRoot, err := privateProcRoot() + if err != nil { + // Fall back to using a /proc handle if making a private mount failed. + // If we have openat2, at least we can avoid some kinds of over-mount + // attacks, but without openat2 there's not much we can do. + procRoot, err = unsafeHostProcRoot() + } + return procRoot, err +} + +var getProcRoot = sync_OnceValues(func() (*os.File, error) { + return doGetProcRoot() +}) + +var hasProcThreadSelf = sync_OnceValue(func() bool { + return unix.Access("/proc/thread-self/", unix.F_OK) == nil +}) + +var errUnsafeProcfs = errors.New("unsafe procfs detected") + +type procThreadSelfCloser func() + +// procThreadSelf returns a handle to /proc/thread-self/ (or an +// equivalent handle on older kernels where /proc/thread-self doesn't exist). +// Once finished with the handle, you must call the returned closer function +// (runtime.UnlockOSThread). You must not pass the returned *os.File to other +// Go threads or use the handle after calling the closer. +// +// This is similar to ProcThreadSelf from runc, but with extra hardening +// applied and using *os.File. +func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThreadSelfCloser, Err error) { + // We need to lock our thread until the caller is done with the handle + // because between getting the handle and using it we could get interrupted + // by the Go runtime and hit the case where the underlying thread is + // swapped out and the original thread is killed, resulting in + // pull-your-hair-out-hard-to-debug issues in the caller. + runtime.LockOSThread() + defer func() { + if Err != nil { + runtime.UnlockOSThread() + } + }() + + // Figure out what prefix we want to use. + threadSelf := "thread-self/" + if !hasProcThreadSelf() || hookForceProcSelfTask() { + /// Pre-3.17 kernels don't have /proc/thread-self, so do it manually. + threadSelf = "self/task/" + strconv.Itoa(unix.Gettid()) + "/" + if _, err := fstatatFile(procRoot, threadSelf, unix.AT_SYMLINK_NOFOLLOW); err != nil || hookForceProcSelf() { + // In this case, we running in a pid namespace that doesn't match + // the /proc mount we have. This can happen inside runc. + // + // Unfortunately, there is no nice way to get the correct TID to + // use here because of the age of the kernel, so we have to just + // use /proc/self and hope that it works. + threadSelf = "self/" + } + } + + // Grab the handle. + var ( + handle *os.File + err error + ) + if hasOpenat2() { + // We prefer being able to use RESOLVE_NO_XDEV if we can, to be + // absolutely sure we are operating on a clean /proc handle that + // doesn't have any cheeky overmounts that could trick us (including + // symlink mounts on top of /proc/thread-self). RESOLVE_BENEATH isn't + // strictly needed, but just use it since we have it. + // + // NOTE: /proc/self is technically a magic-link (the contents of the + // symlink are generated dynamically), but it doesn't use + // nd_jump_link() so RESOLVE_NO_MAGICLINKS allows it. + // + // NOTE: We MUST NOT use RESOLVE_IN_ROOT here, as openat2File uses + // procSelfFdReadlink to clean up the returned f.Name() if we use + // RESOLVE_IN_ROOT (which would lead to an infinite recursion). + handle, err = openat2File(procRoot, threadSelf+subpath, &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_NOFOLLOW | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_XDEV | unix.RESOLVE_NO_MAGICLINKS, + }) + if err != nil { + // TODO: Once we bump the minimum Go version to 1.20, we can use + // multiple %w verbs for this wrapping. For now we need to use a + // compatibility shim for older Go versions. + //err = fmt.Errorf("%w: %w", errUnsafeProcfs, err) + return nil, nil, wrapBaseError(err, errUnsafeProcfs) + } + } else { + handle, err = openatFile(procRoot, threadSelf+subpath, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) + if err != nil { + // TODO: Once we bump the minimum Go version to 1.20, we can use + // multiple %w verbs for this wrapping. For now we need to use a + // compatibility shim for older Go versions. + //err = fmt.Errorf("%w: %w", errUnsafeProcfs, err) + return nil, nil, wrapBaseError(err, errUnsafeProcfs) + } + defer func() { + if Err != nil { + _ = handle.Close() + } + }() + // We can't detect bind-mounts of different parts of procfs on top of + // /proc (a-la RESOLVE_NO_XDEV), but we can at least be sure that we + // aren't on the wrong filesystem here. + if statfs, err := fstatfs(handle); err != nil { + return nil, nil, err + } else if statfs.Type != procSuperMagic { + return nil, nil, fmt.Errorf("%w: incorrect /proc/self/fd filesystem type 0x%x", errUnsafeProcfs, statfs.Type) + } + } + return handle, runtime.UnlockOSThread, nil +} + +// STATX_MNT_ID_UNIQUE is provided in golang.org/x/sys@v0.20.0, but in order to +// avoid bumping the requirement for a single constant we can just define it +// ourselves. +const STATX_MNT_ID_UNIQUE = 0x4000 + +var hasStatxMountId = sync_OnceValue(func() bool { + var ( + stx unix.Statx_t + // We don't care which mount ID we get. The kernel will give us the + // unique one if it is supported. + wantStxMask uint32 = STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID + ) + err := unix.Statx(-int(unix.EBADF), "/", 0, int(wantStxMask), &stx) + return err == nil && stx.Mask&wantStxMask != 0 +}) + +func getMountId(dir *os.File, path string) (uint64, error) { + // If we don't have statx(STATX_MNT_ID*) support, we can't do anything. + if !hasStatxMountId() { + return 0, nil + } + + var ( + stx unix.Statx_t + // We don't care which mount ID we get. The kernel will give us the + // unique one if it is supported. + wantStxMask uint32 = STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID + ) + + err := unix.Statx(int(dir.Fd()), path, unix.AT_EMPTY_PATH|unix.AT_SYMLINK_NOFOLLOW, int(wantStxMask), &stx) + if stx.Mask&wantStxMask == 0 { + // It's not a kernel limitation, for some reason we couldn't get a + // mount ID. Assume it's some kind of attack. + err = fmt.Errorf("%w: could not get mount id", errUnsafeProcfs) + } + if err != nil { + return 0, &os.PathError{Op: "statx(STATX_MNT_ID_...)", Path: dir.Name() + "/" + path, Err: err} + } + return stx.Mnt_id, nil +} + +func checkSymlinkOvermount(procRoot *os.File, dir *os.File, path string) error { + // Get the mntId of our procfs handle. + expectedMountId, err := getMountId(procRoot, "") + if err != nil { + return err + } + // Get the mntId of the target magic-link. + gotMountId, err := getMountId(dir, path) + if err != nil { + return err + } + // As long as the directory mount is alive, even with wrapping mount IDs, + // we would expect to see a different mount ID here. (Of course, if we're + // using unsafeHostProcRoot() then an attaker could change this after we + // did this check.) + if expectedMountId != gotMountId { + return fmt.Errorf("%w: symlink %s/%s has an overmount obscuring the real link (mount ids do not match %d != %d)", errUnsafeProcfs, dir.Name(), path, expectedMountId, gotMountId) + } + return nil +} + +func doRawProcSelfFdReadlink(procRoot *os.File, fd int) (string, error) { + fdPath := fmt.Sprintf("fd/%d", fd) + procFdLink, closer, err := procThreadSelf(procRoot, fdPath) + if err != nil { + return "", fmt.Errorf("get safe /proc/thread-self/%s handle: %w", fdPath, err) + } + defer procFdLink.Close() + defer closer() + + // Try to detect if there is a mount on top of the magic-link. Since we use the handle directly + // provide to the closure. If the closure uses the handle directly, this + // should be safe in general (a mount on top of the path afterwards would + // not affect the handle itself) and will definitely be safe if we are + // using privateProcRoot() (at least since Linux 5.12[1], when anonymous + // mount namespaces were completely isolated from external mounts including + // mount propagation events). + // + // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts + // onto targets that reside on shared mounts"). + if err := checkSymlinkOvermount(procRoot, procFdLink, ""); err != nil { + return "", fmt.Errorf("check safety of /proc/thread-self/fd/%d magiclink: %w", fd, err) + } + + // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See Linux commit + // 65cfc6722361 ("readlinkat(), fchownat() and fstatat() with empty + // relative pathnames"). + return readlinkatFile(procFdLink, "") +} + +func rawProcSelfFdReadlink(fd int) (string, error) { + procRoot, err := getProcRoot() + if err != nil { + return "", err + } + return doRawProcSelfFdReadlink(procRoot, fd) +} + +func procSelfFdReadlink(f *os.File) (string, error) { + return rawProcSelfFdReadlink(int(f.Fd())) +} + +var ( + errPossibleBreakout = errors.New("possible breakout detected") + errInvalidDirectory = errors.New("wandered into deleted directory") + errDeletedInode = errors.New("cannot verify path of deleted inode") +) + +func isDeadInode(file *os.File) error { + // If the nlink of a file drops to 0, there is an attacker deleting + // directories during our walk, which could result in weird /proc values. + // It's better to error out in this case. + stat, err := fstat(file) + if err != nil { + return fmt.Errorf("check for dead inode: %w", err) + } + if stat.Nlink == 0 { + err := errDeletedInode + if stat.Mode&unix.S_IFMT == unix.S_IFDIR { + err = errInvalidDirectory + } + return fmt.Errorf("%w %q", err, file.Name()) + } + return nil +} + +func checkProcSelfFdPath(path string, file *os.File) error { + if err := isDeadInode(file); err != nil { + return err + } + actualPath, err := procSelfFdReadlink(file) + if err != nil { + return fmt.Errorf("get path of handle: %w", err) + } + if actualPath != path { + return fmt.Errorf("%w: handle path %q doesn't match expected path %q", errPossibleBreakout, actualPath, path) + } + return nil +} + +// Test hooks used in the procfs tests to verify that the fallback logic works. +// See testing_mocks_linux_test.go and procfs_linux_test.go for more details. +var ( + hookForcePrivateProcRootOpenTree = hookDummyFile + hookForcePrivateProcRootOpenTreeAtRecursive = hookDummyFile + hookForceGetProcRootUnsafe = hookDummy + + hookForceProcSelfTask = hookDummy + hookForceProcSelf = hookDummy +) + +func hookDummy() bool { return false } +func hookDummyFile(_ *os.File) bool { return false } diff --git a/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/vendor/github.com/cyphar/filepath-securejoin/vfs.go index a82a5eae1..36373f8c5 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/vfs.go +++ b/vendor/github.com/cyphar/filepath-securejoin/vfs.go @@ -1,4 +1,4 @@ -// Copyright (C) 2017 SUSE LLC. All rights reserved. +// Copyright (C) 2017-2024 SUSE LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -10,19 +10,19 @@ import "os" // are several projects (umoci and go-mtree) that are using this sort of // interface. -// VFS is the minimal interface necessary to use SecureJoinVFS. A nil VFS is -// equivalent to using the standard os.* family of functions. This is mainly +// VFS is the minimal interface necessary to use [SecureJoinVFS]. A nil VFS is +// equivalent to using the standard [os].* family of functions. This is mainly // used for the purposes of mock testing, but also can be used to otherwise use -// SecureJoin with VFS-like system. +// [SecureJoinVFS] with VFS-like system. type VFS interface { - // Lstat returns a FileInfo describing the named file. If the file is a - // symbolic link, the returned FileInfo describes the symbolic link. Lstat - // makes no attempt to follow the link. These semantics are identical to - // os.Lstat. + // Lstat returns an [os.FileInfo] describing the named file. If the + // file is a symbolic link, the returned [os.FileInfo] describes the + // symbolic link. Lstat makes no attempt to follow the link. + // The semantics are identical to [os.Lstat]. Lstat(name string) (os.FileInfo, error) - // Readlink returns the destination of the named symbolic link. These - // semantics are identical to os.Readlink. + // Readlink returns the destination of the named symbolic link. + // The semantics are identical to [os.Readlink]. Readlink(name string) (string, error) } @@ -30,12 +30,6 @@ type VFS interface { // module. type osVFS struct{} -// Lstat returns a FileInfo describing the named file. If the file is a -// symbolic link, the returned FileInfo describes the symbolic link. Lstat -// makes no attempt to follow the link. These semantics are identical to -// os.Lstat. func (o osVFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) } -// Readlink returns the destination of the named symbolic link. These -// semantics are identical to os.Readlink. func (o osVFS) Readlink(name string) (string, error) { return os.Readlink(name) } diff --git a/vendor/github.com/go-git/go-billy/v5/Makefile b/vendor/github.com/go-git/go-billy/v5/Makefile index 74dad8b49..3c95ddeaa 100644 --- a/vendor/github.com/go-git/go-billy/v5/Makefile +++ b/vendor/github.com/go-git/go-billy/v5/Makefile @@ -1,6 +1,7 @@ # Go parameters GOCMD = go GOTEST = $(GOCMD) test +WASIRUN_WRAPPER := $(CURDIR)/scripts/wasirun-wrapper .PHONY: test test: @@ -9,3 +10,9 @@ test: test-coverage: echo "" > $(COVERAGE_REPORT); \ $(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./... + +.PHONY: wasitest +wasitest: export GOARCH=wasm +wasitest: export GOOS=wasip1 +wasitest: + $(GOTEST) -exec $(WASIRUN_WRAPPER) ./... diff --git a/vendor/github.com/go-git/go-billy/v5/fs.go b/vendor/github.com/go-git/go-billy/v5/fs.go index a9efccdeb..d86f9d823 100644 --- a/vendor/github.com/go-git/go-billy/v5/fs.go +++ b/vendor/github.com/go-git/go-billy/v5/fs.go @@ -164,6 +164,8 @@ type File interface { // Name returns the name of the file as presented to Open. Name() string io.Writer + // TODO: Add io.WriterAt for v6 + // io.WriterAt io.Reader io.ReaderAt io.Seeker diff --git a/vendor/github.com/go-git/go-billy/v5/memfs/memory.go b/vendor/github.com/go-git/go-billy/v5/memfs/memory.go index dab73968b..6cbd7d08c 100644 --- a/vendor/github.com/go-git/go-billy/v5/memfs/memory.go +++ b/vendor/github.com/go-git/go-billy/v5/memfs/memory.go @@ -9,6 +9,7 @@ import ( "path/filepath" "sort" "strings" + "syscall" "time" "github.com/go-git/go-billy/v5" @@ -18,16 +19,19 @@ import ( const separator = filepath.Separator -// Memory a very convenient filesystem based on memory files +var errNotLink = errors.New("not a link") + +// Memory a very convenient filesystem based on memory files. type Memory struct { s *storage tempCount int } -//New returns a new Memory filesystem. +// New returns a new Memory filesystem. func New() billy.Filesystem { fs := &Memory{s: newStorage()} + fs.s.New("/", 0755|os.ModeDir, 0) return chroot.New(fs, string(separator)) } @@ -57,7 +61,9 @@ func (fs *Memory) OpenFile(filename string, flag int, perm os.FileMode) (billy.F } if target, isLink := fs.resolveLink(filename, f); isLink { - return fs.OpenFile(target, flag, perm) + if target != filename { + return fs.OpenFile(target, flag, perm) + } } } @@ -68,8 +74,6 @@ func (fs *Memory) OpenFile(filename string, flag int, perm os.FileMode) (billy.F return f.Duplicate(filename, perm, flag), nil } -var errNotLink = errors.New("not a link") - func (fs *Memory) resolveLink(fullpath string, f *file) (target string, isLink bool) { if !isSymlink(f.mode) { return fullpath, false @@ -131,8 +135,12 @@ func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (fs *Memory) ReadDir(path string) ([]os.FileInfo, error) { if f, has := fs.s.Get(path); has { if target, isLink := fs.resolveLink(path, f); isLink { - return fs.ReadDir(target) + if target != path { + return fs.ReadDir(target) + } } + } else { + return nil, &os.PathError{Op: "open", Path: path, Err: syscall.ENOENT} } var entries []os.FileInfo @@ -169,17 +177,19 @@ func (fs *Memory) Remove(filename string) error { return fs.s.Remove(filename) } +// Falls back to Go's filepath.Join, which works differently depending on the +// OS where the code is being executed. func (fs *Memory) Join(elem ...string) string { return filepath.Join(elem...) } func (fs *Memory) Symlink(target, link string) error { - _, err := fs.Stat(link) + _, err := fs.Lstat(link) if err == nil { return os.ErrExist } - if !os.IsNotExist(err) { + if !errors.Is(err, os.ErrNotExist) { return err } @@ -230,7 +240,7 @@ func (f *file) Read(b []byte) (int, error) { n, err := f.ReadAt(b, f.position) f.position += int64(n) - if err == io.EOF && n != 0 { + if errors.Is(err, io.EOF) && n != 0 { err = nil } @@ -269,6 +279,10 @@ func (f *file) Seek(offset int64, whence int) (int64, error) { } func (f *file) Write(p []byte) (int, error) { + return f.WriteAt(p, f.position) +} + +func (f *file) WriteAt(p []byte, off int64) (int, error) { if f.isClosed { return 0, os.ErrClosed } @@ -277,8 +291,8 @@ func (f *file) Write(p []byte) (int, error) { return 0, errors.New("write not supported") } - n, err := f.content.WriteAt(p, f.position) - f.position += int64(n) + n, err := f.content.WriteAt(p, off) + f.position = off + int64(n) return n, err } diff --git a/vendor/github.com/go-git/go-billy/v5/memfs/storage.go b/vendor/github.com/go-git/go-billy/v5/memfs/storage.go index e3c4e38bf..16b48ce00 100644 --- a/vendor/github.com/go-git/go-billy/v5/memfs/storage.go +++ b/vendor/github.com/go-git/go-billy/v5/memfs/storage.go @@ -6,6 +6,7 @@ import ( "io" "os" "path/filepath" + "strings" "sync" ) @@ -112,7 +113,7 @@ func (s *storage) Rename(from, to string) error { move := [][2]string{{from, to}} for pathFrom := range s.files { - if pathFrom == from || !filepath.HasPrefix(pathFrom, from) { + if pathFrom == from || !strings.HasPrefix(pathFrom, from) { continue } diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_bound.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_bound.go index b4b6dbc07..c0a610990 100644 --- a/vendor/github.com/go-git/go-billy/v5/osfs/os_bound.go +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_bound.go @@ -246,6 +246,10 @@ func (fs *BoundOS) insideBaseDir(filename string) (bool, error) { // a dir that is within the fs.baseDir, by first evaluating any symlinks // that either filename or fs.baseDir may contain. func (fs *BoundOS) insideBaseDirEval(filename string) (bool, error) { + // "/" contains all others. + if fs.baseDir == "/" { + return true, nil + } dir, err := filepath.EvalSymlinks(filepath.Dir(filename)) if dir == "" || os.IsNotExist(err) { dir = filepath.Dir(filename) @@ -255,7 +259,7 @@ func (fs *BoundOS) insideBaseDirEval(filename string) (bool, error) { wd = fs.baseDir } if filename != wd && dir != wd && !strings.HasPrefix(dir, wd+string(filepath.Separator)) { - return false, fmt.Errorf("path outside base dir") + return false, fmt.Errorf("%q: path outside base dir %q: %w", filename, fs.baseDir, os.ErrNotExist) } return true, nil } diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go index d834a1145..6fb8273f1 100644 --- a/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go @@ -1,5 +1,5 @@ -//go:build !plan9 && !windows && !js -// +build !plan9,!windows,!js +//go:build !plan9 && !windows && !wasm +// +build !plan9,!windows,!wasm package osfs diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_wasip1.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_wasip1.go new file mode 100644 index 000000000..79e6e3319 --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_wasip1.go @@ -0,0 +1,34 @@ +//go:build wasip1 +// +build wasip1 + +package osfs + +import ( + "os" + "syscall" +) + +func (f *file) Lock() error { + f.m.Lock() + defer f.m.Unlock() + return nil +} + +func (f *file) Unlock() error { + f.m.Lock() + defer f.m.Unlock() + return nil +} + +func rename(from, to string) error { + return os.Rename(from, to) +} + +// umask sets umask to a new value, and returns a func which allows the +// caller to reset it back to what it was originally. +func umask(new int) func() { + old := syscall.Umask(new) + return func() { + syscall.Umask(old) + } +} diff --git a/vendor/github.com/go-git/go-billy/v5/util/util.go b/vendor/github.com/go-git/go-billy/v5/util/util.go index 5c77128c3..2cdd832c7 100644 --- a/vendor/github.com/go-git/go-billy/v5/util/util.go +++ b/vendor/github.com/go-git/go-billy/v5/util/util.go @@ -1,6 +1,7 @@ package util import ( + "errors" "io" "os" "path/filepath" @@ -33,14 +34,14 @@ func removeAll(fs billy.Basic, path string) error { // Simple case: if Remove works, we're done. err := fs.Remove(path) - if err == nil || os.IsNotExist(err) { + if err == nil || errors.Is(err, os.ErrNotExist) { return nil } // Otherwise, is this a directory we need to recurse into? dir, serr := fs.Stat(path) if serr != nil { - if os.IsNotExist(serr) { + if errors.Is(serr, os.ErrNotExist) { return nil } @@ -60,7 +61,7 @@ func removeAll(fs billy.Basic, path string) error { // Directory. fis, err := dirfs.ReadDir(path) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { // Race. It was deleted between the Lstat and Open. // Return nil per RemoveAll's docs. return nil @@ -81,7 +82,7 @@ func removeAll(fs billy.Basic, path string) error { // Remove directory. err1 := fs.Remove(path) - if err1 == nil || os.IsNotExist(err1) { + if err1 == nil || errors.Is(err1, os.ErrNotExist) { return nil } @@ -96,22 +97,26 @@ func removeAll(fs billy.Basic, path string) error { // WriteFile writes data to a file named by filename in the given filesystem. // If the file does not exist, WriteFile creates it with permissions perm; // otherwise WriteFile truncates it before writing. -func WriteFile(fs billy.Basic, filename string, data []byte, perm os.FileMode) error { +func WriteFile(fs billy.Basic, filename string, data []byte, perm os.FileMode) (err error) { f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) if err != nil { return err } + defer func() { + if f != nil { + err1 := f.Close() + if err == nil { + err = err1 + } + } + }() n, err := f.Write(data) if err == nil && n < len(data) { err = io.ErrShortWrite } - if err1 := f.Close(); err == nil { - err = err1 - } - - return err + return nil } // Random number state. @@ -154,7 +159,7 @@ func TempFile(fs billy.Basic, dir, prefix string) (f billy.File, err error) { for i := 0; i < 10000; i++ { name := filepath.Join(dir, prefix+nextSuffix()) f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if os.IsExist(err) { + if errors.Is(err, os.ErrExist) { if nconflict++; nconflict > 10 { randmu.Lock() rand = reseed() @@ -185,7 +190,7 @@ func TempDir(fs billy.Dir, dir, prefix string) (name string, err error) { for i := 0; i < 10000; i++ { try := filepath.Join(dir, prefix+nextSuffix()) err = fs.MkdirAll(try, 0700) - if os.IsExist(err) { + if errors.Is(err, os.ErrExist) { if nconflict++; nconflict > 10 { randmu.Lock() rand = reseed() @@ -193,8 +198,8 @@ func TempDir(fs billy.Dir, dir, prefix string) (name string, err error) { } continue } - if os.IsNotExist(err) { - if _, err := os.Stat(dir); os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { + if _, err := os.Stat(dir); errors.Is(err, os.ErrNotExist) { return "", err } } @@ -272,7 +277,7 @@ func ReadFile(fs billy.Basic, name string) ([]byte, error) { data = data[:len(data)+n] if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { err = nil } diff --git a/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md b/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md index c1f280d4d..ba1fb90ac 100644 --- a/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md +++ b/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md @@ -11,7 +11,7 @@ compatibility status with go-git. | `init` | `--bare` | ✅ | | | | `init` | `--template`
`--separate-git-dir`
`--shared` | ❌ | | | | `clone` | | ✅ | | - [PlainClone](_examples/clone/main.go) | -| `clone` | Authentication:
- none
- access token
- username + password
- ssh | ✅ | | - [clone ssh](_examples/clone/auth/ssh/main.go)
- [clone access token](_examples/clone/auth/basic/access_token/main.go)
- [clone user + password](_examples/clone/auth/basic/username_password/main.go) | +| `clone` | Authentication:
- none
- access token
- username + password
- ssh | ✅ | | - [clone ssh (private_key)](_examples/clone/auth/ssh/private_key/main.go)
- [clone ssh (ssh_agent)](_examples/clone/auth/ssh/ssh_agent/main.go)
- [clone access token](_examples/clone/auth/basic/access_token/main.go)
- [clone user + password](_examples/clone/auth/basic/username_password/main.go) | | `clone` | `--progress`
`--single-branch`
`--depth`
`--origin`
`--recurse-submodules`
`--shared` | ✅ | | - [recurse submodules](_examples/clone/main.go)
- [progress](_examples/progress/main.go) | ## Basic snapshotting @@ -27,14 +27,15 @@ compatibility status with go-git. ## Branching and merging -| Feature | Sub-feature | Status | Notes | Examples | -| ----------- | ----------- | ------ | --------------------------------------- | ----------------------------------------------------------------------------------------------- | -| `branch` | | ✅ | | - [branch](_examples/branch/main.go) | -| `checkout` | | ✅ | Basic usages of checkout are supported. | - [checkout](_examples/checkout/main.go) | -| `merge` | | ❌ | | | -| `mergetool` | | ❌ | | | -| `stash` | | ❌ | | | -| `tag` | | ✅ | | - [tag](_examples/tag/main.go)
- [tag create and push](_examples/tag-create-push/main.go) | +| Feature | Sub-feature | Status | Notes | Examples | +| ----------- | ----------- | ------------ | --------------------------------------- | ----------------------------------------------------------------------------------------------- | +| `branch` | | ✅ | | - [branch](_examples/branch/main.go) | +| `checkout` | | ✅ | Basic usages of checkout are supported. | - [checkout](_examples/checkout/main.go) | +| `merge` | | ⚠️ (partial) | Fast-forward only | | +| `mergetool` | | ❌ | | | +| `stash` | | ❌ | | | +| `sparse-checkout` | | ✅ | | - [sparse-checkout](_examples/sparse-checkout/main.go) | +| `tag` | | ✅ | | - [tag](_examples/tag/main.go)
- [tag create and push](_examples/tag-create-push/main.go) | ## Sharing and updating projects diff --git a/vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md b/vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md index fce25328a..a5b01823b 100644 --- a/vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md +++ b/vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md @@ -31,6 +31,13 @@ In order for a PR to be accepted it needs to pass a list of requirements: - If the PR is a new feature, it has to come with a suite of unit tests, that tests the new functionality. - In any case, all the PRs have to pass the personal evaluation of at least one of the maintainers of go-git. +### Branches + +The `master` branch is currently used for maintaining the `v5` major release only. The accepted changes would +be dependency bumps, bug fixes and small changes that aren't needed for `v6`. New development should target the +`v6-exp` branch, and if agreed with at least one go-git maintainer, it can be back ported to `v5` by creating +a new PR that targets `master`. + ### Format of the commit message Every commit message should describe what was changed, under which context and, if applicable, the GitHub issue it relates to: diff --git a/vendor/github.com/go-git/go-git/v5/Makefile b/vendor/github.com/go-git/go-git/v5/Makefile index 1e1039674..3d5b54f7e 100644 --- a/vendor/github.com/go-git/go-git/v5/Makefile +++ b/vendor/github.com/go-git/go-git/v5/Makefile @@ -28,6 +28,7 @@ build-git: test: @echo "running against `git version`"; \ $(GOTEST) -race ./... + $(GOTEST) -v _examples/common_test.go _examples/common.go --examples TEMP_REPO := $(shell mktemp) test-sha256: diff --git a/vendor/github.com/go-git/go-git/v5/blame.go b/vendor/github.com/go-git/go-git/v5/blame.go index 2a877dcdf..e3cb39aec 100644 --- a/vendor/github.com/go-git/go-git/v5/blame.go +++ b/vendor/github.com/go-git/go-git/v5/blame.go @@ -97,13 +97,10 @@ func Blame(c *object.Commit, path string) (*BlameResult, error) { if err != nil { return nil, err } - if finished == true { + if finished { break } } - if err != nil { - return nil, err - } b.lineToCommit = make([]*object.Commit, finalLength) for i := range needsMap { @@ -309,8 +306,8 @@ func (b *blame) addBlames(curItems []*queueItem) (bool, error) { for h := range hunks { hLines := countLines(hunks[h].Text) for hl := 0; hl < hLines; hl++ { - switch { - case hunks[h].Type == diffmatchpatch.DiffEqual: + switch hunks[h].Type { + case diffmatchpatch.DiffEqual: prevl++ curl++ if curl == curItem.NeedsMap[need].Cur { @@ -322,7 +319,7 @@ func (b *blame) addBlames(curItems []*queueItem) (bool, error) { break out } } - case hunks[h].Type == diffmatchpatch.DiffInsert: + case diffmatchpatch.DiffInsert: curl++ if curl == curItem.NeedsMap[need].Cur { // the line we want is added, it may have been added here (or by another parent), skip it for now @@ -331,7 +328,7 @@ func (b *blame) addBlames(curItems []*queueItem) (bool, error) { break out } } - case hunks[h].Type == diffmatchpatch.DiffDelete: + case diffmatchpatch.DiffDelete: prevl += hLines continue out default: diff --git a/vendor/github.com/go-git/go-git/v5/config/config.go b/vendor/github.com/go-git/go-git/v5/config/config.go index 6d41c15dc..33f6e37d2 100644 --- a/vendor/github.com/go-git/go-git/v5/config/config.go +++ b/vendor/github.com/go-git/go-git/v5/config/config.go @@ -252,6 +252,7 @@ const ( extensionsSection = "extensions" fetchKey = "fetch" urlKey = "url" + pushurlKey = "pushurl" bareKey = "bare" worktreeKey = "worktree" commentCharKey = "commentChar" @@ -633,6 +634,7 @@ func (c *RemoteConfig) unmarshal(s *format.Subsection) error { c.Name = c.raw.Name c.URLs = append([]string(nil), c.raw.Options.GetAll(urlKey)...) + c.URLs = append(c.URLs, c.raw.Options.GetAll(pushurlKey)...) c.Fetch = fetch c.Mirror = c.raw.Options.Get(mirrorKey) == "true" diff --git a/vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go b/vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go index c46c21b79..2444f33ec 100644 --- a/vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go +++ b/vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go @@ -43,6 +43,11 @@ func tokenizeExpression(ch rune, tokenType token, check runeCategoryValidator, r return tokenType, string(data), nil } +// maxRevisionLength holds the maximum length that will be parsed for a +// revision. Git itself doesn't enforce a max length, but rather leans on +// the OS to enforce it via its ARG_MAX. +const maxRevisionLength = 128 * 1024 // 128kb + var zeroRune = rune(0) // scanner represents a lexical scanner. @@ -52,7 +57,7 @@ type scanner struct { // newScanner returns a new instance of scanner. func newScanner(r io.Reader) *scanner { - return &scanner{r: bufio.NewReader(r)} + return &scanner{r: bufio.NewReader(io.LimitReader(r, maxRevisionLength))} } // Scan extracts tokens and their strings counterpart diff --git a/vendor/github.com/go-git/go-git/v5/options.go b/vendor/github.com/go-git/go-git/v5/options.go index 8902b7e3e..3cd0f952c 100644 --- a/vendor/github.com/go-git/go-git/v5/options.go +++ b/vendor/github.com/go-git/go-git/v5/options.go @@ -89,6 +89,25 @@ type CloneOptions struct { Shared bool } +// MergeOptions describes how a merge should be performed. +type MergeOptions struct { + // Strategy defines the merge strategy to be used. + Strategy MergeStrategy +} + +// MergeStrategy represents the different types of merge strategies. +type MergeStrategy int8 + +const ( + // FastForwardMerge represents a Git merge strategy where the current + // branch can be simply updated to point to the HEAD of the branch being + // merged. This is only possible if the history of the branch being merged + // is a linear descendant of the current branch, with no conflicting commits. + // + // This is the default option. + FastForwardMerge MergeStrategy = iota +) + // Validate validates the fields and sets the default values. func (o *CloneOptions) Validate() error { if o.URL == "" { @@ -166,7 +185,7 @@ const ( // AllTags fetch all tags from the remote (i.e., fetch remote tags // refs/tags/* into local tags with the same name) AllTags - //NoTags fetch no tags from the remote at all + // NoTags fetch no tags from the remote at all NoTags ) @@ -198,6 +217,9 @@ type FetchOptions struct { CABundle []byte // ProxyOptions provides info required for connecting to a proxy. ProxyOptions transport.ProxyOptions + // Prune specify that local refs that match given RefSpecs and that do + // not exist remotely will be removed. + Prune bool } // Validate validates the fields and sets the default values. @@ -324,9 +346,9 @@ var ( // CheckoutOptions describes how a checkout operation should be performed. type CheckoutOptions struct { - // Hash is the hash of the commit to be checked out. If used, HEAD will be - // in detached mode. If Create is not used, Branch and Hash are mutually - // exclusive. + // Hash is the hash of a commit or tag to be checked out. If used, HEAD + // will be in detached mode. If Create is not used, Branch and Hash are + // mutually exclusive. Hash plumbing.Hash // Branch to be checked out, if Branch and Hash are empty is set to `master`. Branch plumbing.ReferenceName @@ -394,6 +416,9 @@ type ResetOptions struct { // the index (resetting it to the tree of Commit) and the working tree // depending on Mode. If empty MixedReset is used. Mode ResetMode + // Files, if not empty will constrain the reseting the index to only files + // specified in this list. + Files []string } // Validate validates the fields and sets the default values. @@ -405,6 +430,11 @@ func (o *ResetOptions) Validate(r *Repository) error { } o.Commit = ref.Hash() + } else { + _, err := r.CommitObject(o.Commit) + if err != nil { + return fmt.Errorf("invalid reset option: %w", err) + } } return nil @@ -474,6 +504,11 @@ type AddOptions struct { // Glob adds all paths, matching pattern, to the index. If pattern matches a // directory path, all directory contents are added to the index recursively. Glob string + // SkipStatus adds the path with no status check. This option is relevant only + // when the `Path` option is specified and does not apply when the `All` option is used. + // Notice that when passing an ignored path it will be added anyway. + // When true it can speed up adding files to the worktree in very large repositories. + SkipStatus bool } // Validate validates the fields and sets the default values. @@ -507,6 +542,10 @@ type CommitOptions struct { // commit will not be signed. The private key must be present and already // decrypted. SignKey *openpgp.Entity + // Signer denotes a cryptographic signer to sign the commit with. + // A nil value here means the commit will not be signed. + // Takes precedence over SignKey. + Signer Signer // Amend will create a new commit object and replace the commit that HEAD currently // points to. Cannot be used with All nor Parents. Amend bool @@ -754,3 +793,26 @@ type PlainInitOptions struct { // Validate validates the fields and sets the default values. func (o *PlainInitOptions) Validate() error { return nil } + +var ( + ErrNoRestorePaths = errors.New("you must specify path(s) to restore") +) + +// RestoreOptions describes how a restore should be performed. +type RestoreOptions struct { + // Marks to restore the content in the index + Staged bool + // Marks to restore the content of the working tree + Worktree bool + // List of file paths that will be restored + Files []string +} + +// Validate validates the fields and sets the default values. +func (o *RestoreOptions) Validate() error { + if len(o.Files) == 0 { + return ErrNoRestorePaths + } + + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go index d8fb30c16..92df5a3de 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go @@ -64,6 +64,10 @@ func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error) for _, fi := range fis { if fi.IsDir() && fi.Name() != gitDir { + if NewMatcher(ps).Match(append(path, fi.Name()), true) { + continue + } + var subps []Pattern subps, err = ReadPatterns(fs, append(path, fi.Name())) if err != nil { @@ -116,7 +120,7 @@ func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) { return } -// LoadGlobalPatterns loads gitignore patterns from from the gitignore file +// LoadGlobalPatterns loads gitignore patterns from the gitignore file // declared in a user's ~/.gitconfig file. If the ~/.gitconfig file does not // exist the function will return nil. If the core.excludesfile property // is not declared, the function will return nil. If the file pointed to by @@ -132,7 +136,7 @@ func LoadGlobalPatterns(fs billy.Filesystem) (ps []Pattern, err error) { return loadPatterns(fs, fs.Join(home, gitconfigFile)) } -// LoadSystemPatterns loads gitignore patterns from from the gitignore file +// LoadSystemPatterns loads gitignore patterns from the gitignore file // declared in a system's /etc/gitconfig file. If the /etc/gitconfig file does // not exist the function will return nil. If the core.excludesfile property // is not declared, the function will return nil. If the file pointed to by diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go index 6778cf74e..fc25d3702 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go @@ -24,8 +24,8 @@ var ( // ErrInvalidChecksum is returned by Decode if the SHA1 hash mismatch with // the read content ErrInvalidChecksum = errors.New("invalid checksum") - - errUnknownExtension = errors.New("unknown extension") + // ErrUnknownExtension is returned when an index extension is encountered that is considered mandatory + ErrUnknownExtension = errors.New("unknown extension") ) const ( @@ -39,6 +39,7 @@ const ( // A Decoder reads and decodes index files from an input stream. type Decoder struct { + buf *bufio.Reader r io.Reader hash hash.Hash lastEntry *Entry @@ -49,8 +50,10 @@ type Decoder struct { // NewDecoder returns a new decoder that reads from r. func NewDecoder(r io.Reader) *Decoder { h := hash.New(hash.CryptoType) + buf := bufio.NewReader(r) return &Decoder{ - r: io.TeeReader(r, h), + buf: buf, + r: io.TeeReader(buf, h), hash: h, extReader: bufio.NewReader(nil), } @@ -210,71 +213,75 @@ func (d *Decoder) readExtensions(idx *Index) error { // count that they are not supported by jgit or libgit var expected []byte + var peeked []byte var err error - var header [4]byte + // we should always be able to peek for 4 bytes (header) + 4 bytes (extlen) + final hash + // if this fails, we know that we're at the end of the index + peekLen := 4 + 4 + d.hash.Size() + for { expected = d.hash.Sum(nil) - - var n int - if n, err = io.ReadFull(d.r, header[:]); err != nil { - if n == 0 { - err = io.EOF - } - + peeked, err = d.buf.Peek(peekLen) + if len(peeked) < peekLen { + // there can't be an extension at this point, so let's bail out break } - - err = d.readExtension(idx, header[:]) - if err != nil { - break - } - } - - if err != errUnknownExtension { - return err - } - - return d.readChecksum(expected, header) -} - -func (d *Decoder) readExtension(idx *Index, header []byte) error { - switch { - case bytes.Equal(header, treeExtSignature): - r, err := d.getExtensionReader() if err != nil { return err } + err = d.readExtension(idx) + if err != nil { + return err + } + } + + return d.readChecksum(expected) +} + +func (d *Decoder) readExtension(idx *Index) error { + var header [4]byte + + if _, err := io.ReadFull(d.r, header[:]); err != nil { + return err + } + + r, err := d.getExtensionReader() + if err != nil { + return err + } + + switch { + case bytes.Equal(header[:], treeExtSignature): idx.Cache = &Tree{} d := &treeExtensionDecoder{r} if err := d.Decode(idx.Cache); err != nil { return err } - case bytes.Equal(header, resolveUndoExtSignature): - r, err := d.getExtensionReader() - if err != nil { - return err - } - + case bytes.Equal(header[:], resolveUndoExtSignature): idx.ResolveUndo = &ResolveUndo{} d := &resolveUndoDecoder{r} if err := d.Decode(idx.ResolveUndo); err != nil { return err } - case bytes.Equal(header, endOfIndexEntryExtSignature): - r, err := d.getExtensionReader() - if err != nil { - return err - } - + case bytes.Equal(header[:], endOfIndexEntryExtSignature): idx.EndOfIndexEntry = &EndOfIndexEntry{} d := &endOfIndexEntryDecoder{r} if err := d.Decode(idx.EndOfIndexEntry); err != nil { return err } default: - return errUnknownExtension + // See https://git-scm.com/docs/index-format, which says: + // If the first byte is 'A'..'Z' the extension is optional and can be ignored. + if header[0] < 'A' || header[0] > 'Z' { + return ErrUnknownExtension + } + + d := &unknownExtensionDecoder{r} + if err := d.Decode(); err != nil { + return err + } } return nil @@ -290,11 +297,10 @@ func (d *Decoder) getExtensionReader() (*bufio.Reader, error) { return d.extReader, nil } -func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error { +func (d *Decoder) readChecksum(expected []byte) error { var h plumbing.Hash - copy(h[:4], alreadyRead[:]) - if _, err := io.ReadFull(d.r, h[4:]); err != nil { + if _, err := io.ReadFull(d.r, h[:]); err != nil { return err } @@ -476,3 +482,22 @@ func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error { _, err = io.ReadFull(d.r, e.Hash[:]) return err } + +type unknownExtensionDecoder struct { + r *bufio.Reader +} + +func (d *unknownExtensionDecoder) Decode() error { + var buf [1024]byte + + for { + _, err := d.r.Read(buf[:]) + if err == io.EOF { + break + } + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go index fa2d81445..c232e0323 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go @@ -3,8 +3,11 @@ package index import ( "bytes" "errors" + "fmt" "io" + "path" "sort" + "strings" "time" "github.com/go-git/go-git/v5/plumbing/hash" @@ -13,7 +16,7 @@ import ( var ( // EncodeVersionSupported is the range of supported index versions - EncodeVersionSupported uint32 = 3 + EncodeVersionSupported uint32 = 4 // ErrInvalidTimestamp is returned by Encode if a Index with a Entry with // negative timestamp values @@ -22,20 +25,25 @@ var ( // An Encoder writes an Index to an output stream. type Encoder struct { - w io.Writer - hash hash.Hash + w io.Writer + hash hash.Hash + lastEntry *Entry } // NewEncoder returns a new encoder that writes to w. func NewEncoder(w io.Writer) *Encoder { h := hash.New(hash.CryptoType) mw := io.MultiWriter(w, h) - return &Encoder{mw, h} + return &Encoder{mw, h, nil} } // Encode writes the Index to the stream of the encoder. func (e *Encoder) Encode(idx *Index) error { - // TODO: support v4 + return e.encode(idx, true) +} + +func (e *Encoder) encode(idx *Index, footer bool) error { + // TODO: support extensions if idx.Version > EncodeVersionSupported { return ErrUnsupportedVersion @@ -49,7 +57,10 @@ func (e *Encoder) Encode(idx *Index) error { return err } - return e.encodeFooter() + if footer { + return e.encodeFooter() + } + return nil } func (e *Encoder) encodeHeader(idx *Index) error { @@ -64,7 +75,7 @@ func (e *Encoder) encodeEntries(idx *Index) error { sort.Sort(byName(idx.Entries)) for _, entry := range idx.Entries { - if err := e.encodeEntry(entry); err != nil { + if err := e.encodeEntry(idx, entry); err != nil { return err } entryLength := entryHeaderLength @@ -73,7 +84,7 @@ func (e *Encoder) encodeEntries(idx *Index) error { } wrote := entryLength + len(entry.Name) - if err := e.padEntry(wrote); err != nil { + if err := e.padEntry(idx, wrote); err != nil { return err } } @@ -81,7 +92,7 @@ func (e *Encoder) encodeEntries(idx *Index) error { return nil } -func (e *Encoder) encodeEntry(entry *Entry) error { +func (e *Encoder) encodeEntry(idx *Index, entry *Entry) error { sec, nsec, err := e.timeToUint32(&entry.CreatedAt) if err != nil { return err @@ -132,9 +143,68 @@ func (e *Encoder) encodeEntry(entry *Entry) error { return err } + switch idx.Version { + case 2, 3: + err = e.encodeEntryName(entry) + case 4: + err = e.encodeEntryNameV4(entry) + default: + err = ErrUnsupportedVersion + } + + return err +} + +func (e *Encoder) encodeEntryName(entry *Entry) error { return binary.Write(e.w, []byte(entry.Name)) } +func (e *Encoder) encodeEntryNameV4(entry *Entry) error { + name := entry.Name + l := 0 + if e.lastEntry != nil { + dir := path.Dir(e.lastEntry.Name) + "/" + if strings.HasPrefix(entry.Name, dir) { + l = len(e.lastEntry.Name) - len(dir) + name = strings.TrimPrefix(entry.Name, dir) + } else { + l = len(e.lastEntry.Name) + } + } + + e.lastEntry = entry + + err := binary.WriteVariableWidthInt(e.w, int64(l)) + if err != nil { + return err + } + + return binary.Write(e.w, []byte(name+string('\x00'))) +} + +func (e *Encoder) encodeRawExtension(signature string, data []byte) error { + if len(signature) != 4 { + return fmt.Errorf("invalid signature length") + } + + _, err := e.w.Write([]byte(signature)) + if err != nil { + return err + } + + err = binary.WriteUint32(e.w, uint32(len(data))) + if err != nil { + return err + } + + _, err = e.w.Write(data) + if err != nil { + return err + } + + return nil +} + func (e *Encoder) timeToUint32(t *time.Time) (uint32, uint32, error) { if t.IsZero() { return 0, 0, nil @@ -147,7 +217,11 @@ func (e *Encoder) timeToUint32(t *time.Time) (uint32, uint32, error) { return uint32(t.Unix()), uint32(t.Nanosecond()), nil } -func (e *Encoder) padEntry(wrote int) error { +func (e *Encoder) padEntry(idx *Index, wrote int) error { + if idx.Version == 4 { + return nil + } + padLen := 8 - wrote%8 _, err := e.w.Write(bytes.Repeat([]byte{'\x00'}, padLen)) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go index 07a61120e..a60ec0b24 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go @@ -32,19 +32,17 @@ func (idx *deltaIndex) findMatch(src, tgt []byte, tgtOffset int) (srcOffset, l i return 0, -1 } - if len(tgt) >= tgtOffset+s && len(src) >= blksz { - h := hashBlock(tgt, tgtOffset) - tIdx := h & idx.mask - eIdx := idx.table[tIdx] - if eIdx != 0 { - srcOffset = idx.entries[eIdx] - } else { - return - } - - l = matchLength(src, tgt, tgtOffset, srcOffset) + h := hashBlock(tgt, tgtOffset) + tIdx := h & idx.mask + eIdx := idx.table[tIdx] + if eIdx == 0 { + return } + srcOffset = idx.entries[eIdx] + + l = matchLength(src, tgt, tgtOffset, srcOffset) + return } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go index 960769c7c..a9c6b9b56 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go @@ -26,6 +26,13 @@ var ( const ( payload = 0x7f // 0111 1111 continuation = 0x80 // 1000 0000 + + // maxPatchPreemptionSize defines what is the max size of bytes to be + // premptively made available for a patch operation. + maxPatchPreemptionSize uint = 65536 + + // minDeltaSize defines the smallest size for a delta. + minDeltaSize = 4 ) type offset struct { @@ -86,9 +93,13 @@ func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) { } // PatchDelta returns the result of applying the modification deltas in delta to src. -// An error will be returned if delta is corrupted (ErrDeltaLen) or an action command +// An error will be returned if delta is corrupted (ErrInvalidDelta) or an action command // is not copy from source or copy from delta (ErrDeltaCmd). func PatchDelta(src, delta []byte) ([]byte, error) { + if len(src) == 0 || len(delta) < minDeltaSize { + return nil, ErrInvalidDelta + } + b := &bytes.Buffer{} if err := patchDelta(b, src, delta); err != nil { return nil, err @@ -239,7 +250,9 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error { remainingTargetSz := targetSz var cmd byte - dst.Grow(int(targetSz)) + + growSz := min(targetSz, maxPatchPreemptionSize) + dst.Grow(int(growSz)) for { if len(delta) == 0 { return ErrInvalidDelta @@ -403,6 +416,10 @@ func patchDeltaWriter(dst io.Writer, base io.ReaderAt, delta io.Reader, // This must be called twice on the delta data buffer, first to get the // expected source buffer size, and again to get the target buffer size. func decodeLEB128(input []byte) (uint, []byte) { + if len(input) == 0 { + return 0, input + } + var num, sz uint var b byte for { diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go index fbb137de0..706d984ee 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go @@ -140,6 +140,8 @@ func asciiHexToByte(b byte) (byte, error) { return b - '0', nil case b >= 'a' && b <= 'f': return b - 'a' + 10, nil + case b >= 'A' && b <= 'F': + return b - 'A' + 10, nil default: return 0, ErrInvalidPktLen } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go index ceed5d01e..3d096e18b 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go @@ -27,7 +27,7 @@ const ( // the commit with the "mergetag" header. headermergetag string = "mergetag" - defaultUtf8CommitMesageEncoding MessageEncoding = "UTF-8" + defaultUtf8CommitMessageEncoding MessageEncoding = "UTF-8" ) // Hash represents the hash of an object @@ -189,7 +189,7 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { } c.Hash = o.Hash() - c.Encoding = defaultUtf8CommitMesageEncoding + c.Encoding = defaultUtf8CommitMessageEncoding reader, err := o.Reader() if err != nil { @@ -335,7 +335,7 @@ func (c *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) { } } - if string(c.Encoding) != "" && c.Encoding != defaultUtf8CommitMesageEncoding { + if string(c.Encoding) != "" && c.Encoding != defaultUtf8CommitMessageEncoding { if _, err = fmt.Fprintf(w, "\n%s %s", headerencoding, c.Encoding); err != nil { return err } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go index aa0ca15fd..c1ec8ba7a 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go @@ -57,6 +57,8 @@ func (c *commitPathIter) Next() (*Commit, error) { } func (c *commitPathIter) getNextFileCommit() (*Commit, error) { + var parentTree, currentTree *Tree + for { // Parent-commit can be nil if the current-commit is the initial commit parentCommit, parentCommitErr := c.sourceIter.Next() @@ -68,13 +70,17 @@ func (c *commitPathIter) getNextFileCommit() (*Commit, error) { parentCommit = nil } - // Fetch the trees of the current and parent commits - currentTree, currTreeErr := c.currentCommit.Tree() - if currTreeErr != nil { - return nil, currTreeErr + if parentTree == nil { + var currTreeErr error + currentTree, currTreeErr = c.currentCommit.Tree() + if currTreeErr != nil { + return nil, currTreeErr + } + } else { + currentTree = parentTree + parentTree = nil } - var parentTree *Tree if parentCommit != nil { var parentTreeErr error parentTree, parentTreeErr = parentCommit.Tree() @@ -115,7 +121,8 @@ func (c *commitPathIter) hasFileChange(changes Changes, parent *Commit) bool { // filename matches, now check if source iterator contains all commits (from all refs) if c.checkParent { - if parent != nil && isParentHash(parent.Hash, c.currentCommit) { + // Check if parent is beyond the initial commit + if parent == nil || isParentHash(parent.Hash, c.currentCommit) { return true } continue diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go index dd8fef447..3c61f626a 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go @@ -6,7 +6,7 @@ import ( "errors" "fmt" "io" - "math" + "strconv" "strings" "github.com/go-git/go-git/v5/plumbing" @@ -234,69 +234,56 @@ func (fileStats FileStats) String() string { return printStat(fileStats) } +// printStat prints the stats of changes in content of files. +// Original implementation: https://github.com/git/git/blob/1a87c842ece327d03d08096395969aca5e0a6996/diff.c#L2615 +// Parts of the output: +// |<+++/---> +// example: " main.go | 10 +++++++--- " func printStat(fileStats []FileStat) string { - padLength := float64(len(" ")) - newlineLength := float64(len("\n")) - separatorLength := float64(len("|")) - // Soft line length limit. The text length calculation below excludes - // length of the change number. Adding that would take it closer to 80, - // but probably not more than 80, until it's a huge number. - lineLength := 72.0 + maxGraphWidth := uint(53) + maxNameLen := 0 + maxChangeLen := 0 + + scaleLinear := func(it, width, max uint) uint { + if it == 0 || max == 0 { + return 0 + } + + return 1 + (it * (width - 1) / max) + } - // Get the longest filename and longest total change. - var longestLength float64 - var longestTotalChange float64 for _, fs := range fileStats { - if int(longestLength) < len(fs.Name) { - longestLength = float64(len(fs.Name)) + if len(fs.Name) > maxNameLen { + maxNameLen = len(fs.Name) } - totalChange := fs.Addition + fs.Deletion - if int(longestTotalChange) < totalChange { - longestTotalChange = float64(totalChange) + + changes := strconv.Itoa(fs.Addition + fs.Deletion) + if len(changes) > maxChangeLen { + maxChangeLen = len(changes) } } - // Parts of the output: - // |<+++/---> - // example: " main.go | 10 +++++++--- " - - // - leftTextLength := padLength + longestLength + padLength - - // <+++++/-----> - // Excluding number length here. - rightTextLength := padLength + padLength + newlineLength - - totalTextArea := leftTextLength + separatorLength + rightTextLength - heightOfHistogram := lineLength - totalTextArea - - // Scale the histogram. - var scaleFactor float64 - if longestTotalChange > heightOfHistogram { - // Scale down to heightOfHistogram. - scaleFactor = longestTotalChange / heightOfHistogram - } else { - scaleFactor = 1.0 - } - - finalOutput := "" + result := "" for _, fs := range fileStats { - addn := float64(fs.Addition) - deln := float64(fs.Deletion) - addc := int(math.Floor(addn/scaleFactor)) - delc := int(math.Floor(deln/scaleFactor)) - if addc < 0 { - addc = 0 - } - if delc < 0 { - delc = 0 - } - adds := strings.Repeat("+", addc) - dels := strings.Repeat("-", delc) - finalOutput += fmt.Sprintf(" %s | %d %s%s\n", fs.Name, (fs.Addition + fs.Deletion), adds, dels) - } + add := uint(fs.Addition) + del := uint(fs.Deletion) + np := maxNameLen - len(fs.Name) + cp := maxChangeLen - len(strconv.Itoa(fs.Addition+fs.Deletion)) - return finalOutput + total := add + del + if total > maxGraphWidth { + add = scaleLinear(add, maxGraphWidth, total) + del = scaleLinear(del, maxGraphWidth, total) + } + + adds := strings.Repeat("+", int(add)) + dels := strings.Repeat("-", int(del)) + namePad := strings.Repeat(" ", np) + changePad := strings.Repeat(" ", cp) + + result += fmt.Sprintf(" %s%s | %s%d %s%s\n", fs.Name, namePad, changePad, total, adds, dels) + } + return result } func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats { diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/signature.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/signature.go index 91cf371f0..f9c3d306b 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/signature.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/signature.go @@ -19,6 +19,7 @@ var ( // a PKCS#7 (S/MIME) signature. x509SignatureFormat = signatureFormat{ []byte("-----BEGIN CERTIFICATE-----"), + []byte("-----BEGIN SIGNED MESSAGE-----"), } // sshSignatureFormat is the format of an SSH signature. diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go index e9f7666b8..2e1b78915 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go @@ -7,6 +7,7 @@ import ( "io" "path" "path/filepath" + "sort" "strings" "github.com/go-git/go-git/v5/plumbing" @@ -27,6 +28,7 @@ var ( ErrFileNotFound = errors.New("file not found") ErrDirectoryNotFound = errors.New("directory not found") ErrEntryNotFound = errors.New("entry not found") + ErrEntriesNotSorted = errors.New("entries in tree are not sorted") ) // Tree is basically like a directory - it references a bunch of other trees @@ -270,7 +272,30 @@ func (t *Tree) Decode(o plumbing.EncodedObject) (err error) { return nil } +type TreeEntrySorter []TreeEntry + +func (s TreeEntrySorter) Len() int { + return len(s) +} + +func (s TreeEntrySorter) Less(i, j int) bool { + name1 := s[i].Name + name2 := s[j].Name + if s[i].Mode == filemode.Dir { + name1 += "/" + } + if s[j].Mode == filemode.Dir { + name2 += "/" + } + return name1 < name2 +} + +func (s TreeEntrySorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + // Encode transforms a Tree into a plumbing.EncodedObject. +// The tree entries must be sorted by name. func (t *Tree) Encode(o plumbing.EncodedObject) (err error) { o.SetType(plumbing.TreeObject) w, err := o.Writer() @@ -279,7 +304,15 @@ func (t *Tree) Encode(o plumbing.EncodedObject) (err error) { } defer ioutil.CheckClose(w, &err) + + if !sort.IsSorted(TreeEntrySorter(t.Entries)) { + return ErrEntriesNotSorted + } + for _, entry := range t.Entries { + if strings.IndexByte(entry.Name, 0) != -1 { + return fmt.Errorf("malformed filename %q", entry.Name) + } if _, err = fmt.Fprintf(w, "%o %s", entry.Mode, entry.Name); err != nil { return err } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go index 6e7b334cb..2adb64528 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go @@ -88,7 +88,9 @@ func (t *treeNoder) Children() ([]noder.Noder, error) { } } - return transformChildren(parent) + var err error + t.children, err = transformChildren(parent) + return t.children, err } // Returns the children of a tree as treenoders. diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/filter.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/filter.go new file mode 100644 index 000000000..145fc711c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/filter.go @@ -0,0 +1,76 @@ +package packp + +import ( + "errors" + "fmt" + "github.com/go-git/go-git/v5/plumbing" + "net/url" + "strings" +) + +var ErrUnsupportedObjectFilterType = errors.New("unsupported object filter type") + +// Filter values enable the partial clone capability which causes +// the server to omit objects that match the filter. +// +// See [Git's documentation] for more details. +// +// [Git's documentation]: https://github.com/git/git/blob/e02ecfcc534e2021aae29077a958dd11c3897e4c/Documentation/rev-list-options.txt#L948 +type Filter string + +type BlobLimitPrefix string + +const ( + BlobLimitPrefixNone BlobLimitPrefix = "" + BlobLimitPrefixKibi BlobLimitPrefix = "k" + BlobLimitPrefixMebi BlobLimitPrefix = "m" + BlobLimitPrefixGibi BlobLimitPrefix = "g" +) + +// FilterBlobNone omits all blobs. +func FilterBlobNone() Filter { + return "blob:none" +} + +// FilterBlobLimit omits blobs of size at least n bytes (when prefix is +// BlobLimitPrefixNone), n kibibytes (when prefix is BlobLimitPrefixKibi), +// n mebibytes (when prefix is BlobLimitPrefixMebi) or n gibibytes (when +// prefix is BlobLimitPrefixGibi). n can be zero, in which case all blobs +// will be omitted. +func FilterBlobLimit(n uint64, prefix BlobLimitPrefix) Filter { + return Filter(fmt.Sprintf("blob:limit=%d%s", n, prefix)) +} + +// FilterTreeDepth omits all blobs and trees whose depth from the root tree +// is larger or equal to depth. +func FilterTreeDepth(depth uint64) Filter { + return Filter(fmt.Sprintf("tree:%d", depth)) +} + +// FilterObjectType omits all objects which are not of the requested type t. +// Supported types are TagObject, CommitObject, TreeObject and BlobObject. +func FilterObjectType(t plumbing.ObjectType) (Filter, error) { + switch t { + case plumbing.TagObject: + fallthrough + case plumbing.CommitObject: + fallthrough + case plumbing.TreeObject: + fallthrough + case plumbing.BlobObject: + return Filter(fmt.Sprintf("object:type=%s", t.String())), nil + default: + return "", fmt.Errorf("%w: %s", ErrUnsupportedObjectFilterType, t.String()) + } +} + +// FilterCombine combines multiple Filter values together. +func FilterCombine(filters ...Filter) Filter { + var escapedFilters []string + + for _, filter := range filters { + escapedFilters = append(escapedFilters, url.QueryEscape(string(filter))) + } + + return Filter(fmt.Sprintf("combine:%s", strings.Join(escapedFilters, "+"))) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go index 0116f962e..01d95a3ab 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go @@ -114,7 +114,7 @@ func (d *Demuxer) nextPackData() ([]byte, error) { size := len(content) if size == 0 { - return nil, nil + return nil, io.EOF } else if size > d.max { return nil, ErrMaxPackedExceeded } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go index a9ddb538b..d760ad660 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go @@ -120,6 +120,9 @@ func (r *ServerResponse) decodeACKLine(line []byte) error { } sp := bytes.Index(line, []byte(" ")) + if sp+41 > len(line) { + return fmt.Errorf("malformed ACK %q", line) + } h := plumbing.NewHash(string(line[sp+1 : sp+41])) r.ACKs = append(r.ACKs, h) return nil diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go index 344f8c7e3..ef4e08a10 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go @@ -17,6 +17,7 @@ type UploadRequest struct { Wants []plumbing.Hash Shallows []plumbing.Hash Depth Depth + Filter Filter } // Depth values stores the desired depth of the requested packfile: see diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_encode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_encode.go index c451e2316..8b19c0f67 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_encode.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_encode.go @@ -132,6 +132,17 @@ func (e *ulReqEncoder) encodeDepth() stateFn { return nil } + return e.encodeFilter +} + +func (e *ulReqEncoder) encodeFilter() stateFn { + if filter := e.data.Filter; filter != "" { + if err := e.pe.Encodef("filter %s\n", filter); err != nil { + e.err = fmt.Errorf("encoding filter %s: %s", filter, err) + return nil + } + } + return e.encodeFlush } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/reference.go b/vendor/github.com/go-git/go-git/v5/plumbing/reference.go index ddba93029..4daa34164 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/reference.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/reference.go @@ -188,7 +188,7 @@ func (r ReferenceName) Validate() error { isBranch := r.IsBranch() isTag := r.IsTag() - for _, part := range parts { + for i, part := range parts { // rule 6 if len(part) == 0 { return ErrInvalidReferenceName @@ -205,7 +205,7 @@ func (r ReferenceName) Validate() error { return ErrInvalidReferenceName } - if (isBranch || isTag) && strings.HasPrefix(part, "-") { // branches & tags can't start with - + if (isBranch || isTag) && strings.HasPrefix(part, "-") && (i == 2) { // branches & tags can't start with - return ErrInvalidReferenceName } } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go index b05437fbf..fae1aa98c 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go @@ -19,6 +19,7 @@ import ( "fmt" "io" "net/url" + "path/filepath" "strconv" "strings" @@ -295,7 +296,11 @@ func parseFile(endpoint string) (*Endpoint, bool) { return nil, false } - path := endpoint + path, err := filepath.Abs(endpoint) + if err != nil { + return nil, false + } + return &Endpoint{ Protocol: "file", Path: path, diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go index 38714e2ad..d921d0a5a 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go @@ -7,6 +7,7 @@ import ( "io" "os" "path/filepath" + "runtime" "strings" "github.com/go-git/go-git/v5/plumbing/transport" @@ -95,7 +96,23 @@ func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.Auth } } - return &command{cmd: execabs.Command(cmd, ep.Path)}, nil + return &command{cmd: execabs.Command(cmd, adjustPathForWindows(ep.Path))}, nil +} + +func isDriveLetter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// On Windows, the path that results from a file: URL has a leading slash. This +// has to be removed if there's a drive letter +func adjustPathForWindows(p string) string { + if runtime.GOOS != "windows" { + return p + } + if len(p) >= 3 && p[0] == '/' && isDriveLetter(p[1]) && p[2] == ':' { + return p[1:] + } + return p } type command struct { diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go index 54126febf..df01890b2 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go @@ -17,6 +17,7 @@ import ( "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/protocol/packp" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" "github.com/go-git/go-git/v5/plumbing/transport" "github.com/go-git/go-git/v5/utils/ioutil" "github.com/golang/groupcache/lru" @@ -24,7 +25,7 @@ import ( // it requires a bytes.Buffer, because we need to know the length func applyHeadersToRequest(req *http.Request, content *bytes.Buffer, host string, requestType string) { - req.Header.Add("User-Agent", "git/1.0") + req.Header.Add("User-Agent", capability.DefaultAgent()) req.Header.Add("Host", host) // host:port if content == nil { @@ -91,9 +92,9 @@ func advertisedReferences(ctx context.Context, s *session, serviceName string) ( } type client struct { - c *http.Client + client *http.Client transports *lru.Cache - m sync.RWMutex + mutex sync.RWMutex } // ClientOptions holds user configurable options for the client. @@ -147,7 +148,7 @@ func NewClientWithOptions(c *http.Client, opts *ClientOptions) transport.Transpo } } cl := &client{ - c: c, + client: c, } if opts != nil { @@ -234,10 +235,10 @@ func newSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (* // if the client wasn't configured to have a cache for transports then just configure // the transport and use it directly, otherwise try to use the cache. if c.transports == nil { - tr, ok := c.c.Transport.(*http.Transport) + tr, ok := c.client.Transport.(*http.Transport) if !ok { return nil, fmt.Errorf("expected underlying client transport to be of type: %s; got: %s", - reflect.TypeOf(transport), reflect.TypeOf(c.c.Transport)) + reflect.TypeOf(transport), reflect.TypeOf(c.client.Transport)) } transport = tr.Clone() @@ -258,7 +259,7 @@ func newSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (* transport, found = c.fetchTransport(transportOpts) if !found { - transport = c.c.Transport.(*http.Transport).Clone() + transport = c.client.Transport.(*http.Transport).Clone() configureTransport(transport, ep) c.addTransport(transportOpts, transport) } @@ -266,12 +267,12 @@ func newSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (* httpClient = &http.Client{ Transport: transport, - CheckRedirect: c.c.CheckRedirect, - Jar: c.c.Jar, - Timeout: c.c.Timeout, + CheckRedirect: c.client.CheckRedirect, + Jar: c.client.Jar, + Timeout: c.client.Timeout, } } else { - httpClient = c.c + httpClient = c.client } s := &session{ @@ -430,11 +431,11 @@ func NewErr(r *http.Response) error { switch r.StatusCode { case http.StatusUnauthorized: - return transport.ErrAuthenticationRequired + return fmt.Errorf("%w: %s", transport.ErrAuthenticationRequired, reason) case http.StatusForbidden: - return transport.ErrAuthorizationFailed + return fmt.Errorf("%w: %s", transport.ErrAuthorizationFailed, reason) case http.StatusNotFound: - return transport.ErrRepositoryNotFound + return fmt.Errorf("%w: %s", transport.ErrRepositoryNotFound, reason) } return plumbing.NewUnexpectedError(&Err{r, reason}) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/transport.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/transport.go index 052f3c8e2..c8db38920 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/transport.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/transport.go @@ -14,21 +14,21 @@ type transportOptions struct { } func (c *client) addTransport(opts transportOptions, transport *http.Transport) { - c.m.Lock() + c.mutex.Lock() c.transports.Add(opts, transport) - c.m.Unlock() + c.mutex.Unlock() } func (c *client) removeTransport(opts transportOptions) { - c.m.Lock() + c.mutex.Lock() c.transports.Remove(opts) - c.m.Unlock() + c.mutex.Unlock() } func (c *client) fetchTransport(opts transportOptions) (*http.Transport, bool) { - c.m.RLock() + c.mutex.RLock() t, ok := c.transports.Get(opts) - c.m.RUnlock() + c.mutex.RUnlock() if !ok { return nil, false } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/loader.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/loader.go index e7e2b075e..f03a91c6d 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/loader.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/loader.go @@ -40,8 +40,16 @@ func (l *fsLoader) Load(ep *transport.Endpoint) (storer.Storer, error) { return nil, err } - if _, err := fs.Stat("config"); err != nil { - return nil, transport.ErrRepositoryNotFound + var bare bool + if _, err := fs.Stat("config"); err == nil { + bare = true + } + + if !bare { + // do not use git.GitDirName due to import cycle + if _, err := fs.Stat(".git"); err != nil { + return nil, transport.ErrRepositoryNotFound + } } return filesystem.NewStorage(fs, cache.NewObjectLRUDefault()), nil diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go index 46fda73fa..05dea448f 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go @@ -49,7 +49,9 @@ type runner struct { func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) { c := &command{command: cmd, endpoint: ep, config: r.config} if auth != nil { - c.setAuth(auth) + if err := c.setAuth(auth); err != nil { + return nil, err + } } if err := c.connect(); err != nil { diff --git a/vendor/github.com/go-git/go-git/v5/remote.go b/vendor/github.com/go-git/go-git/v5/remote.go index 0cb70bc00..e2c734e75 100644 --- a/vendor/github.com/go-git/go-git/v5/remote.go +++ b/vendor/github.com/go-git/go-git/v5/remote.go @@ -9,6 +9,7 @@ import ( "time" "github.com/go-git/go-billy/v5/osfs" + "github.com/go-git/go-git/v5/config" "github.com/go-git/go-git/v5/internal/url" "github.com/go-git/go-git/v5/plumbing" @@ -82,7 +83,7 @@ func (r *Remote) String() string { var fetch, push string if len(r.c.URLs) > 0 { fetch = r.c.URLs[0] - push = r.c.URLs[0] + push = r.c.URLs[len(r.c.URLs)-1] } return fmt.Sprintf("%s\t%s (fetch)\n%[1]s\t%[3]s (push)", r.c.Name, fetch, push) @@ -109,8 +110,8 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) { return fmt.Errorf("remote names don't match: %s != %s", o.RemoteName, r.c.Name) } - if o.RemoteURL == "" { - o.RemoteURL = r.c.URLs[0] + if o.RemoteURL == "" && len(r.c.URLs) > 0 { + o.RemoteURL = r.c.URLs[len(r.c.URLs)-1] } s, err := newSendPackSession(o.RemoteURL, o.Auth, o.InsecureSkipTLS, o.CABundle, o.ProxyOptions) @@ -470,6 +471,14 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen } } + var updatedPrune bool + if o.Prune { + updatedPrune, err = r.pruneRemotes(o.RefSpecs, localRefs, remoteRefs) + if err != nil { + return nil, err + } + } + updated, err := r.updateLocalReferenceStorage(o.RefSpecs, refs, remoteRefs, specToRefs, o.Tags, o.Force) if err != nil { return nil, err @@ -482,8 +491,19 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen } } - if !updated { - return remoteRefs, NoErrAlreadyUpToDate + if !updated && !updatedPrune { + // No references updated, but may have fetched new objects, check if we now have any of our wants + for _, hash := range req.Wants { + exists, _ := objectExists(r.s, hash) + if exists { + updated = true + break + } + } + + if !updated { + return remoteRefs, NoErrAlreadyUpToDate + } } return remoteRefs, nil @@ -574,6 +594,27 @@ func (r *Remote) fetchPack(ctx context.Context, o *FetchOptions, s transport.Upl return err } +func (r *Remote) pruneRemotes(specs []config.RefSpec, localRefs []*plumbing.Reference, remoteRefs memory.ReferenceStorage) (bool, error) { + var updatedPrune bool + for _, spec := range specs { + rev := spec.Reverse() + for _, ref := range localRefs { + if !rev.Match(ref.Name()) { + continue + } + _, err := remoteRefs.Reference(rev.Dst(ref.Name())) + if errors.Is(err, plumbing.ErrReferenceNotFound) { + updatedPrune = true + err := r.s.RemoveReference(ref.Name()) + if err != nil { + return false, err + } + } + } + } + return updatedPrune, nil +} + func (r *Remote) addReferencesToUpdate( refspecs []config.RefSpec, localRefs []*plumbing.Reference, @@ -849,17 +890,12 @@ func getHavesFromRef( return nil } - // No need to load the commit if we know the remote already - // has this hash. - if remoteRefs[h] { - haves[h] = true - return nil - } - commit, err := object.GetCommit(s, h) if err != nil { - // Ignore the error if this isn't a commit. - haves[ref.Hash()] = true + if !errors.Is(err, plumbing.ErrObjectNotFound) { + // Ignore the error if this isn't a commit. + haves[ref.Hash()] = true + } return nil } @@ -1099,7 +1135,7 @@ func isFastForward(s storer.EncodedObjectStorer, old, new plumbing.Hash, earlies } found := false - // stop iterating at the earlist shallow commit, ignoring its parents + // stop iterating at the earliest shallow commit, ignoring its parents // note: when pull depth is smaller than the number of new changes on the remote, this fails due to missing parents. // as far as i can tell, without the commits in-between the shallow pull and the earliest shallow, there's no // real way of telling whether it will be a fast-forward merge. diff --git a/vendor/github.com/go-git/go-git/v5/repository.go b/vendor/github.com/go-git/go-git/v5/repository.go index 1524a6913..200098e7a 100644 --- a/vendor/github.com/go-git/go-git/v5/repository.go +++ b/vendor/github.com/go-git/go-git/v5/repository.go @@ -51,19 +51,21 @@ var ( // ErrFetching is returned when the packfile could not be downloaded ErrFetching = errors.New("unable to fetch packfile") - ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch") - ErrRepositoryNotExists = errors.New("repository does not exist") - ErrRepositoryIncomplete = errors.New("repository's commondir path does not exist") - ErrRepositoryAlreadyExists = errors.New("repository already exists") - ErrRemoteNotFound = errors.New("remote not found") - ErrRemoteExists = errors.New("remote already exists") - ErrAnonymousRemoteName = errors.New("anonymous remote name must be 'anonymous'") - ErrWorktreeNotProvided = errors.New("worktree should be provided") - ErrIsBareRepository = errors.New("worktree not available in a bare repository") - ErrUnableToResolveCommit = errors.New("unable to resolve commit") - ErrPackedObjectsNotSupported = errors.New("packed objects not supported") - ErrSHA256NotSupported = errors.New("go-git was not compiled with SHA256 support") - ErrAlternatePathNotSupported = errors.New("alternate path must use the file scheme") + ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch") + ErrRepositoryNotExists = errors.New("repository does not exist") + ErrRepositoryIncomplete = errors.New("repository's commondir path does not exist") + ErrRepositoryAlreadyExists = errors.New("repository already exists") + ErrRemoteNotFound = errors.New("remote not found") + ErrRemoteExists = errors.New("remote already exists") + ErrAnonymousRemoteName = errors.New("anonymous remote name must be 'anonymous'") + ErrWorktreeNotProvided = errors.New("worktree should be provided") + ErrIsBareRepository = errors.New("worktree not available in a bare repository") + ErrUnableToResolveCommit = errors.New("unable to resolve commit") + ErrPackedObjectsNotSupported = errors.New("packed objects not supported") + ErrSHA256NotSupported = errors.New("go-git was not compiled with SHA256 support") + ErrAlternatePathNotSupported = errors.New("alternate path must use the file scheme") + ErrUnsupportedMergeStrategy = errors.New("unsupported merge strategy") + ErrFastForwardMergeNotPossible = errors.New("not possible to fast-forward merge changes") ) // Repository represents a git repository @@ -954,7 +956,7 @@ func (r *Repository) clone(ctx context.Context, o *CloneOptions) error { } if o.RecurseSubmodules != NoRecurseSubmodules { - if err := w.updateSubmodules(&SubmoduleUpdateOptions{ + if err := w.updateSubmodules(ctx, &SubmoduleUpdateOptions{ RecurseSubmodules: o.RecurseSubmodules, Depth: func() int { if o.ShallowSubmodules { @@ -1035,7 +1037,7 @@ func (r *Repository) setIsBare(isBare bool) error { return r.Storer.SetConfig(cfg) } -func (r *Repository) updateRemoteConfigIfNeeded(o *CloneOptions, c *config.RemoteConfig, head *plumbing.Reference) error { +func (r *Repository) updateRemoteConfigIfNeeded(o *CloneOptions, c *config.RemoteConfig, _ *plumbing.Reference) error { if !o.SingleBranch { return nil } @@ -1769,8 +1771,43 @@ func (r *Repository) RepackObjects(cfg *RepackConfig) (err error) { return nil } +// Merge merges the reference branch into the current branch. +// +// If the merge is not possible (or supported) returns an error without changing +// the HEAD for the current branch. Possible errors include: +// - The merge strategy is not supported. +// - The specific strategy cannot be used (e.g. using FastForwardMerge when one is not possible). +func (r *Repository) Merge(ref plumbing.Reference, opts MergeOptions) error { + if opts.Strategy != FastForwardMerge { + return ErrUnsupportedMergeStrategy + } + + // Ignore error as not having a shallow list is optional here. + shallowList, _ := r.Storer.Shallow() + var earliestShallow *plumbing.Hash + if len(shallowList) > 0 { + earliestShallow = &shallowList[0] + } + + head, err := r.Head() + if err != nil { + return err + } + + ff, err := isFastForward(r.Storer, head.Hash(), ref.Hash(), earliestShallow) + if err != nil { + return err + } + + if !ff { + return ErrFastForwardMergeNotPossible + } + + return r.Storer.SetReference(plumbing.NewHashReference(head.Name(), ref.Hash())) +} + // createNewObjectPack is a helper for RepackObjects taking care -// of creating a new pack. It is used so the the PackfileWriter +// of creating a new pack. It is used so the PackfileWriter // deferred close has the right scope. func (r *Repository) createNewObjectPack(cfg *RepackConfig) (h plumbing.Hash, err error) { ow := newObjectWalker(r.Storer) diff --git a/vendor/github.com/go-git/go-git/v5/signer.go b/vendor/github.com/go-git/go-git/v5/signer.go new file mode 100644 index 000000000..e3ef7ebd3 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/signer.go @@ -0,0 +1,33 @@ +package git + +import ( + "io" + + "github.com/go-git/go-git/v5/plumbing" +) + +// signableObject is an object which can be signed. +type signableObject interface { + EncodeWithoutSignature(o plumbing.EncodedObject) error +} + +// Signer is an interface for signing git objects. +// message is a reader containing the encoded object to be signed. +// Implementors should return the encoded signature and an error if any. +// See https://git-scm.com/docs/gitformat-signature for more information. +type Signer interface { + Sign(message io.Reader) ([]byte, error) +} + +func signObject(signer Signer, obj signableObject) ([]byte, error) { + encoded := &plumbing.MemoryObject{} + if err := obj.EncodeWithoutSignature(encoded); err != nil { + return nil, err + } + r, err := encoded.Reader() + if err != nil { + return nil, err + } + + return signer.Sign(r) +} diff --git a/vendor/github.com/go-git/go-git/v5/status.go b/vendor/github.com/go-git/go-git/v5/status.go index 7f18e0227..d14f7e657 100644 --- a/vendor/github.com/go-git/go-git/v5/status.go +++ b/vendor/github.com/go-git/go-git/v5/status.go @@ -4,6 +4,9 @@ import ( "bytes" "fmt" "path/filepath" + + mindex "github.com/go-git/go-git/v5/utils/merkletrie/index" + "github.com/go-git/go-git/v5/utils/merkletrie/noder" ) // Status represents the current status of a Worktree. @@ -77,3 +80,69 @@ const ( Copied StatusCode = 'C' UpdatedButUnmerged StatusCode = 'U' ) + +// StatusStrategy defines the different types of strategies when processing +// the worktree status. +type StatusStrategy int + +const ( + // TODO: (V6) Review the default status strategy. + // TODO: (V6) Review the type used to represent Status, to enable lazy + // processing of statuses going direct to the backing filesystem. + defaultStatusStrategy = Empty + + // Empty starts its status map from empty. Missing entries for a given + // path means that the file is untracked. This causes a known issue (#119) + // whereby unmodified files can be incorrectly reported as untracked. + // + // This can be used when returning the changed state within a modified Worktree. + // For example, to check whether the current worktree is clean. + Empty StatusStrategy = 0 + // Preload goes through all existing nodes from the index and add them to the + // status map as unmodified. This is currently the most reliable strategy + // although it comes at a performance cost in large repositories. + // + // This method is recommended when fetching the status of unmodified files. + // For example, to confirm the status of a specific file that is either + // untracked or unmodified. + Preload StatusStrategy = 1 +) + +func (s StatusStrategy) new(w *Worktree) (Status, error) { + switch s { + case Preload: + return preloadStatus(w) + case Empty: + return make(Status), nil + } + return nil, fmt.Errorf("%w: %+v", ErrUnsupportedStatusStrategy, s) +} + +func preloadStatus(w *Worktree) (Status, error) { + idx, err := w.r.Storer.Index() + if err != nil { + return nil, err + } + + idxRoot := mindex.NewRootNode(idx) + nodes := []noder.Noder{idxRoot} + + status := make(Status) + for len(nodes) > 0 { + var node noder.Noder + node, nodes = nodes[0], nodes[1:] + if node.IsDir() { + children, err := node.Children() + if err != nil { + return nil, err + } + nodes = append(nodes, children...) + continue + } + fs := status.File(node.Name()) + fs.Worktree = Unmodified + fs.Staging = Unmodified + } + + return status, nil +} diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go index 31c469481..72c9ccfc1 100644 --- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go @@ -72,6 +72,9 @@ var ( // ErrIsDir is returned when a reference file is attempting to be read, // but the path specified is a directory. ErrIsDir = errors.New("reference path is a directory") + // ErrEmptyRefFile is returned when a reference file is attempted to be read, + // but the file is empty + ErrEmptyRefFile = errors.New("ref file is empty") ) // Options holds configuration for the storage. @@ -249,7 +252,7 @@ func (d *DotGit) objectPacks() ([]plumbing.Hash, error) { continue } - h := plumbing.NewHash(n[5 : len(n)-5]) //pack-(hash).pack + h := plumbing.NewHash(n[5 : len(n)-5]) // pack-(hash).pack if h.IsZero() { // Ignore files with badly-formatted names. continue @@ -661,18 +664,33 @@ func (d *DotGit) readReferenceFrom(rd io.Reader, name string) (ref *plumbing.Ref return nil, err } + if len(b) == 0 { + return nil, ErrEmptyRefFile + } + line := strings.TrimSpace(string(b)) return plumbing.NewReferenceFromStrings(name, line), nil } +// checkReferenceAndTruncate reads the reference from the given file, or the `pack-refs` file if +// the file was empty. Then it checks that the old reference matches the stored reference and +// truncates the file. func (d *DotGit) checkReferenceAndTruncate(f billy.File, old *plumbing.Reference) error { if old == nil { return nil } + ref, err := d.readReferenceFrom(f, old.Name().String()) + if errors.Is(err, ErrEmptyRefFile) { + // This may happen if the reference is being read from a newly created file. + // In that case, try getting the reference from the packed refs file. + ref, err = d.packedRef(old.Name()) + } + if err != nil { return err } + if ref.Hash() != old.Hash() { return storage.ErrReferenceHasChanged } @@ -701,7 +719,11 @@ func (d *DotGit) SetRef(r, old *plumbing.Reference) error { // Symbolic references are resolved and included in the output. func (d *DotGit) Refs() ([]*plumbing.Reference, error) { var refs []*plumbing.Reference - var seen = make(map[plumbing.ReferenceName]bool) + seen := make(map[plumbing.ReferenceName]bool) + if err := d.addRefFromHEAD(&refs); err != nil { + return nil, err + } + if err := d.addRefsFromRefDir(&refs, seen); err != nil { return nil, err } @@ -710,10 +732,6 @@ func (d *DotGit) Refs() ([]*plumbing.Reference, error) { return nil, err } - if err := d.addRefFromHEAD(&refs); err != nil { - return nil, err - } - return refs, nil } @@ -815,7 +833,8 @@ func (d *DotGit) addRefsFromPackedRefsFile(refs *[]*plumbing.Reference, f billy. } func (d *DotGit) openAndLockPackedRefs(doCreate bool) ( - pr billy.File, err error) { + pr billy.File, err error, +) { var f billy.File defer func() { if err != nil && f != nil { @@ -1020,7 +1039,7 @@ func (d *DotGit) readReferenceFile(path, name string) (ref *plumbing.Reference, func (d *DotGit) CountLooseRefs() (int, error) { var refs []*plumbing.Reference - var seen = make(map[plumbing.ReferenceName]bool) + seen := make(map[plumbing.ReferenceName]bool) if err := d.addRefsFromRefDir(&refs, seen); err != nil { return 0, err } diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/index.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/index.go index a19176f83..a86ef3e2e 100644 --- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/index.go +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/index.go @@ -48,7 +48,7 @@ func (s *IndexStorage) Index() (i *index.Index, err error) { defer ioutil.CheckClose(f, &err) - d := index.NewDecoder(bufio.NewReader(f)) + d := index.NewDecoder(f) err = d.Decode(idx) return idx, err } diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go index e812fe934..91b4aceae 100644 --- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go @@ -431,13 +431,13 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb defer ioutil.CheckClose(w, &err) - s.objectCache.Put(obj) - bufp := copyBufferPool.Get().(*[]byte) buf := *bufp _, err = io.CopyBuffer(w, r, buf) copyBufferPool.Put(bufp) + s.objectCache.Put(obj) + return obj, err } diff --git a/vendor/github.com/go-git/go-git/v5/submodule.go b/vendor/github.com/go-git/go-git/v5/submodule.go index 84f020dc7..afabb6aca 100644 --- a/vendor/github.com/go-git/go-git/v5/submodule.go +++ b/vendor/github.com/go-git/go-git/v5/submodule.go @@ -214,10 +214,10 @@ func (s *Submodule) update(ctx context.Context, o *SubmoduleUpdateOptions, force return err } - return s.doRecursiveUpdate(r, o) + return s.doRecursiveUpdate(ctx, r, o) } -func (s *Submodule) doRecursiveUpdate(r *Repository, o *SubmoduleUpdateOptions) error { +func (s *Submodule) doRecursiveUpdate(ctx context.Context, r *Repository, o *SubmoduleUpdateOptions) error { if o.RecurseSubmodules == NoRecurseSubmodules { return nil } @@ -236,7 +236,7 @@ func (s *Submodule) doRecursiveUpdate(r *Repository, o *SubmoduleUpdateOptions) *new = *o new.RecurseSubmodules-- - return l.Update(new) + return l.UpdateContext(ctx, new) } func (s *Submodule) fetchAndCheckout( diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/change.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/change.go index cc6dc8907..450feb4ba 100644 --- a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/change.go +++ b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/change.go @@ -1,12 +1,17 @@ package merkletrie import ( + "errors" "fmt" "io" "github.com/go-git/go-git/v5/utils/merkletrie/noder" ) +var ( + ErrEmptyFileName = errors.New("empty filename in tree entry") +) + // Action values represent the kind of things a Change can represent: // insertion, deletions or modifications of files. type Action int @@ -121,6 +126,10 @@ func (l *Changes) AddRecursiveDelete(root noder.Path) error { type noderToChangeFn func(noder.Path) Change // NewInsert or NewDelete func (l *Changes) addRecursive(root noder.Path, ctor noderToChangeFn) error { + if root.String() == "" { + return ErrEmptyFileName + } + if !root.IsDir() { l.Add(ctor(root)) return nil diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go index 8090942dd..4ef2d9907 100644 --- a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go +++ b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go @@ -11,7 +11,7 @@ package merkletrie // corresponding changes and move the iterators further over both // trees. // -// The table bellow show all the possible comparison results, along +// The table below shows all the possible comparison results, along // with what changes should we produce and how to advance the // iterators. // diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go index 7bba0d03e..33800627d 100644 --- a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go +++ b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go @@ -29,6 +29,8 @@ type node struct { hash []byte children []noder.Noder isDir bool + mode os.FileMode + size int64 } // NewRootNode returns the root node based on a given billy.Filesystem. @@ -48,8 +50,15 @@ func NewRootNode( // difftree algorithm will detect changes in the contents of files and also in // their mode. // +// Please note that the hash is calculated on first invocation of Hash(), +// meaning that it will not update when the underlying file changes +// between invocations. +// // The hash of a directory is always a 24-bytes slice of zero values func (n *node) Hash() []byte { + if n.hash == nil { + n.calculateHash() + } return n.hash } @@ -121,81 +130,74 @@ func (n *node) calculateChildren() error { func (n *node) newChildNode(file os.FileInfo) (*node, error) { path := path.Join(n.path, file.Name()) - hash, err := n.calculateHash(path, file) - if err != nil { - return nil, err - } - node := &node{ fs: n.fs, submodules: n.submodules, path: path, - hash: hash, isDir: file.IsDir(), + size: file.Size(), + mode: file.Mode(), } - if hash, isSubmodule := n.submodules[path]; isSubmodule { - node.hash = append(hash[:], filemode.Submodule.Bytes()...) + if _, isSubmodule := n.submodules[path]; isSubmodule { node.isDir = false } return node, nil } -func (n *node) calculateHash(path string, file os.FileInfo) ([]byte, error) { - if file.IsDir() { - return make([]byte, 24), nil +func (n *node) calculateHash() { + if n.isDir { + n.hash = make([]byte, 24) + return + } + mode, err := filemode.NewFromOSFileMode(n.mode) + if err != nil { + n.hash = plumbing.ZeroHash[:] + return + } + if submoduleHash, isSubmodule := n.submodules[n.path]; isSubmodule { + n.hash = append(submoduleHash[:], filemode.Submodule.Bytes()...) + return } - var hash plumbing.Hash - var err error - if file.Mode()&os.ModeSymlink != 0 { - hash, err = n.doCalculateHashForSymlink(path, file) + if n.mode&os.ModeSymlink != 0 { + hash = n.doCalculateHashForSymlink() } else { - hash, err = n.doCalculateHashForRegular(path, file) + hash = n.doCalculateHashForRegular() } - - if err != nil { - return nil, err - } - - mode, err := filemode.NewFromOSFileMode(file.Mode()) - if err != nil { - return nil, err - } - - return append(hash[:], mode.Bytes()...), nil + n.hash = append(hash[:], mode.Bytes()...) } -func (n *node) doCalculateHashForRegular(path string, file os.FileInfo) (plumbing.Hash, error) { - f, err := n.fs.Open(path) +func (n *node) doCalculateHashForRegular() plumbing.Hash { + f, err := n.fs.Open(n.path) if err != nil { - return plumbing.ZeroHash, err + return plumbing.ZeroHash } defer f.Close() - h := plumbing.NewHasher(plumbing.BlobObject, file.Size()) + h := plumbing.NewHasher(plumbing.BlobObject, n.size) if _, err := io.Copy(h, f); err != nil { - return plumbing.ZeroHash, err + return plumbing.ZeroHash } - return h.Sum(), nil + return h.Sum() } -func (n *node) doCalculateHashForSymlink(path string, file os.FileInfo) (plumbing.Hash, error) { - target, err := n.fs.Readlink(path) +func (n *node) doCalculateHashForSymlink() plumbing.Hash { + target, err := n.fs.Readlink(n.path) if err != nil { - return plumbing.ZeroHash, err + return plumbing.ZeroHash } - h := plumbing.NewHasher(plumbing.BlobObject, file.Size()) + h := plumbing.NewHasher(plumbing.BlobObject, n.size) if _, err := h.Write([]byte(target)); err != nil { - return plumbing.ZeroHash, err + return plumbing.ZeroHash } - return h.Sum(), nil + return h.Sum() } func (n *node) String() string { diff --git a/vendor/github.com/go-git/go-git/v5/utils/sync/bufio.go b/vendor/github.com/go-git/go-git/v5/utils/sync/bufio.go index 5009ea804..42f60f7ea 100644 --- a/vendor/github.com/go-git/go-git/v5/utils/sync/bufio.go +++ b/vendor/github.com/go-git/go-git/v5/utils/sync/bufio.go @@ -13,7 +13,7 @@ var bufioReader = sync.Pool{ } // GetBufioReader returns a *bufio.Reader that is managed by a sync.Pool. -// Returns a bufio.Reader that is resetted with reader and ready for use. +// Returns a bufio.Reader that is reset with reader and ready for use. // // After use, the *bufio.Reader should be put back into the sync.Pool // by calling PutBufioReader. diff --git a/vendor/github.com/go-git/go-git/v5/utils/sync/bytes.go b/vendor/github.com/go-git/go-git/v5/utils/sync/bytes.go index dd06fc0bc..c67b97837 100644 --- a/vendor/github.com/go-git/go-git/v5/utils/sync/bytes.go +++ b/vendor/github.com/go-git/go-git/v5/utils/sync/bytes.go @@ -35,7 +35,7 @@ func PutByteSlice(buf *[]byte) { } // GetBytesBuffer returns a *bytes.Buffer that is managed by a sync.Pool. -// Returns a buffer that is resetted and ready for use. +// Returns a buffer that is reset and ready for use. // // After use, the *bytes.Buffer should be put back into the sync.Pool // by calling PutBytesBuffer. diff --git a/vendor/github.com/go-git/go-git/v5/utils/sync/zlib.go b/vendor/github.com/go-git/go-git/v5/utils/sync/zlib.go index c61388595..edf674d85 100644 --- a/vendor/github.com/go-git/go-git/v5/utils/sync/zlib.go +++ b/vendor/github.com/go-git/go-git/v5/utils/sync/zlib.go @@ -35,7 +35,7 @@ type ZLibReader struct { } // GetZlibReader returns a ZLibReader that is managed by a sync.Pool. -// Returns a ZLibReader that is resetted using a dictionary that is +// Returns a ZLibReader that is reset using a dictionary that is // also managed by a sync.Pool. // // After use, the ZLibReader should be put back into the sync.Pool @@ -58,7 +58,7 @@ func PutZlibReader(z ZLibReader) { } // GetZlibWriter returns a *zlib.Writer that is managed by a sync.Pool. -// Returns a writer that is resetted with w and ready for use. +// Returns a writer that is reset with w and ready for use. // // After use, the *zlib.Writer should be put back into the sync.Pool // by calling PutZlibWriter. diff --git a/vendor/github.com/go-git/go-git/v5/worktree.go b/vendor/github.com/go-git/go-git/v5/worktree.go index ad525c1a4..dded08e99 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree.go +++ b/vendor/github.com/go-git/go-git/v5/worktree.go @@ -25,11 +25,12 @@ import ( ) var ( - ErrWorktreeNotClean = errors.New("worktree is not clean") - ErrSubmoduleNotFound = errors.New("submodule not found") - ErrUnstagedChanges = errors.New("worktree contains unstaged changes") - ErrGitModulesSymlink = errors.New(gitmodulesFile + " is a symlink") - ErrNonFastForwardUpdate = errors.New("non-fast-forward update") + ErrWorktreeNotClean = errors.New("worktree is not clean") + ErrSubmoduleNotFound = errors.New("submodule not found") + ErrUnstagedChanges = errors.New("worktree contains unstaged changes") + ErrGitModulesSymlink = errors.New(gitmodulesFile + " is a symlink") + ErrNonFastForwardUpdate = errors.New("non-fast-forward update") + ErrRestoreWorktreeOnlyNotSupported = errors.New("worktree only is not supported") ) // Worktree represents a git worktree. @@ -139,7 +140,7 @@ func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error { } if o.RecurseSubmodules != NoRecurseSubmodules { - return w.updateSubmodules(&SubmoduleUpdateOptions{ + return w.updateSubmodules(ctx, &SubmoduleUpdateOptions{ RecurseSubmodules: o.RecurseSubmodules, Auth: o.Auth, }) @@ -148,13 +149,13 @@ func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error { return nil } -func (w *Worktree) updateSubmodules(o *SubmoduleUpdateOptions) error { +func (w *Worktree) updateSubmodules(ctx context.Context, o *SubmoduleUpdateOptions) error { s, err := w.Submodules() if err != nil { return err } o.Init = true - return s.Update(o) + return s.UpdateContext(ctx, o) } // Checkout switch branches or restore working tree files. @@ -227,20 +228,17 @@ func (w *Worktree) createBranch(opts *CheckoutOptions) error { } func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing.Hash, error) { - if !opts.Hash.IsZero() { - return opts.Hash, nil + hash := opts.Hash + if hash.IsZero() { + b, err := w.r.Reference(opts.Branch, true) + if err != nil { + return plumbing.ZeroHash, err + } + + hash = b.Hash() } - b, err := w.r.Reference(opts.Branch, true) - if err != nil { - return plumbing.ZeroHash, err - } - - if !b.Name().IsTag() { - return b.Hash(), nil - } - - o, err := w.r.Object(plumbing.AnyObject, b.Hash()) + o, err := w.r.Object(plumbing.AnyObject, hash) if err != nil { return plumbing.ZeroHash, err } @@ -248,7 +246,7 @@ func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing switch o := o.(type) { case *object.Tag: if o.TargetType != plumbing.CommitObject { - return plumbing.ZeroHash, fmt.Errorf("unsupported tag object target %q", o.TargetType) + return plumbing.ZeroHash, fmt.Errorf("%w: tag target %q", object.ErrUnsupportedObject, o.TargetType) } return o.Target, nil @@ -256,7 +254,7 @@ func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing return o.Hash, nil } - return plumbing.ZeroHash, fmt.Errorf("unsupported tag target %q", o.Type()) + return plumbing.ZeroHash, fmt.Errorf("%w: %q", object.ErrUnsupportedObject, o.Type()) } func (w *Worktree) setHEADToCommit(commit plumbing.Hash) error { @@ -310,13 +308,13 @@ func (w *Worktree) ResetSparsely(opts *ResetOptions, dirs []string) error { } if opts.Mode == MixedReset || opts.Mode == MergeReset || opts.Mode == HardReset { - if err := w.resetIndex(t, dirs); err != nil { + if err := w.resetIndex(t, dirs, opts.Files); err != nil { return err } } if opts.Mode == MergeReset || opts.Mode == HardReset { - if err := w.resetWorktree(t); err != nil { + if err := w.resetWorktree(t, opts.Files); err != nil { return err } } @@ -324,20 +322,52 @@ func (w *Worktree) ResetSparsely(opts *ResetOptions, dirs []string) error { return nil } +// Restore restores specified files in the working tree or stage with contents from +// a restore source. If a path is tracked but does not exist in the restore, +// source, it will be removed to match the source. +// +// If Staged and Worktree are true, then the restore source will be the index. +// If only Staged is true, then the restore source will be HEAD. +// If only Worktree is true or neither Staged nor Worktree are true, will +// result in ErrRestoreWorktreeOnlyNotSupported because restoring the working +// tree while leaving the stage untouched is not currently supported. +// +// Restore with no files specified will return ErrNoRestorePaths. +func (w *Worktree) Restore(o *RestoreOptions) error { + if err := o.Validate(); err != nil { + return err + } + + if o.Staged { + opts := &ResetOptions{ + Files: o.Files, + } + + if o.Worktree { + // If we are doing both Worktree and Staging then it is a hard reset + opts.Mode = HardReset + } else { + // If we are doing just staging then it is a mixed reset + opts.Mode = MixedReset + } + + return w.Reset(opts) + } + + return ErrRestoreWorktreeOnlyNotSupported +} + // Reset the worktree to a specified state. func (w *Worktree) Reset(opts *ResetOptions) error { return w.ResetSparsely(opts, nil) } -func (w *Worktree) resetIndex(t *object.Tree, dirs []string) error { +func (w *Worktree) resetIndex(t *object.Tree, dirs []string, files []string) error { idx, err := w.r.Storer.Index() - if len(dirs) > 0 { - idx.SkipUnless(dirs) - } - if err != nil { return err } + b := newIndexBuilder(idx) changes, err := w.diffTreeWithStaging(t, true) @@ -365,6 +395,13 @@ func (w *Worktree) resetIndex(t *object.Tree, dirs []string) error { name = ch.From.String() } + if len(files) > 0 { + contains := inFiles(files, name) + if !contains { + continue + } + } + b.Remove(name) if e == nil { continue @@ -379,10 +416,26 @@ func (w *Worktree) resetIndex(t *object.Tree, dirs []string) error { } b.Write(idx) + + if len(dirs) > 0 { + idx.SkipUnless(dirs) + } + return w.r.Storer.SetIndex(idx) } -func (w *Worktree) resetWorktree(t *object.Tree) error { +func inFiles(files []string, v string) bool { + v = filepath.Clean(v) + for _, s := range files { + if filepath.Clean(s) == v { + return true + } + } + + return false +} + +func (w *Worktree) resetWorktree(t *object.Tree, files []string) error { changes, err := w.diffStagingWithWorktree(true, false) if err != nil { return err @@ -398,6 +451,25 @@ func (w *Worktree) resetWorktree(t *object.Tree) error { if err := w.validChange(ch); err != nil { return err } + + if len(files) > 0 { + file := "" + if ch.From != nil { + file = ch.From.String() + } else if ch.To != nil { + file = ch.To.String() + } + + if file == "" { + continue + } + + contains := inFiles(files, file) + if !contains { + continue + } + } + if err := w.checkoutChange(ch, t, b); err != nil { return err } @@ -431,6 +503,10 @@ var worktreeDeny = map[string]struct{}{ func validPath(paths ...string) error { for _, p := range paths { parts := strings.FieldsFunc(p, func(r rune) bool { return (r == '\\' || r == '/') }) + if len(parts) == 0 { + return fmt.Errorf("invalid path: %q", p) + } + if _, denied := worktreeDeny[strings.ToLower(parts[0])]; denied { return fmt.Errorf("invalid path prefix: %q", p) } @@ -641,7 +717,7 @@ func (w *Worktree) checkoutChangeRegularFile(name string, return err } - return w.addIndexFromFile(name, e.Hash, idx) + return w.addIndexFromFile(name, e.Hash, f.Mode, idx) } return nil @@ -724,18 +800,13 @@ func (w *Worktree) addIndexFromTreeEntry(name string, f *object.TreeEntry, idx * return nil } -func (w *Worktree) addIndexFromFile(name string, h plumbing.Hash, idx *indexBuilder) error { +func (w *Worktree) addIndexFromFile(name string, h plumbing.Hash, mode filemode.FileMode, idx *indexBuilder) error { idx.Remove(name) fi, err := w.Filesystem.Lstat(name) if err != nil { return err } - mode, err := filemode.NewFromOSFileMode(fi.Mode()) - if err != nil { - return err - } - e := &index.Entry{ Hash: h, Name: name, @@ -1057,7 +1128,7 @@ func rmFileAndDirsIfEmpty(fs billy.Filesystem, name string) error { dir := filepath.Dir(name) for { removed, err := removeDirIfEmpty(fs, dir) - if err != nil { + if err != nil && !os.IsNotExist(err) { return err } diff --git a/vendor/github.com/go-git/go-git/v5/worktree_commit.go b/vendor/github.com/go-git/go-git/v5/worktree_commit.go index eaa21c3f1..9b1988ae6 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree_commit.go +++ b/vendor/github.com/go-git/go-git/v5/worktree_commit.go @@ -3,7 +3,9 @@ package git import ( "bytes" "errors" + "io" "path" + "regexp" "sort" "strings" @@ -14,6 +16,7 @@ import ( "github.com/go-git/go-git/v5/storage" "github.com/ProtonMail/go-crypto/openpgp" + "github.com/ProtonMail/go-crypto/openpgp/packet" "github.com/go-git/go-billy/v5" ) @@ -21,6 +24,10 @@ var ( // ErrEmptyCommit occurs when a commit is attempted using a clean // working tree, with no changes to be committed. ErrEmptyCommit = errors.New("cannot create empty commit: clean working tree") + + // characters to be removed from user name and/or email before using them to build a commit object + // See https://git-scm.com/docs/git-commit#_commit_information + invalidCharactersRe = regexp.MustCompile(`[<>\n]`) ) // Commit stores the current contents of the index in a new commit along with @@ -36,36 +43,53 @@ func (w *Worktree) Commit(msg string, opts *CommitOptions) (plumbing.Hash, error } } - var treeHash plumbing.Hash - if opts.Amend { head, err := w.r.Head() if err != nil { return plumbing.ZeroHash, err } - - t, err := w.r.getTreeFromCommitHash(head.Hash()) + headCommit, err := w.r.CommitObject(head.Hash()) if err != nil { return plumbing.ZeroHash, err } - treeHash = t.Hash - opts.Parents = []plumbing.Hash{head.Hash()} - } else { - idx, err := w.r.Storer.Index() + opts.Parents = nil + if len(headCommit.ParentHashes) != 0 { + opts.Parents = []plumbing.Hash{headCommit.ParentHashes[0]} + } + } + + idx, err := w.r.Storer.Index() + if err != nil { + return plumbing.ZeroHash, err + } + + // First handle the case of the first commit in the repository being empty. + if len(opts.Parents) == 0 && len(idx.Entries) == 0 && !opts.AllowEmptyCommits { + return plumbing.ZeroHash, ErrEmptyCommit + } + + h := &buildTreeHelper{ + fs: w.Filesystem, + s: w.r.Storer, + } + + treeHash, err := h.BuildTree(idx, opts) + if err != nil { + return plumbing.ZeroHash, err + } + + previousTree := plumbing.ZeroHash + if len(opts.Parents) > 0 { + parentCommit, err := w.r.CommitObject(opts.Parents[0]) if err != nil { return plumbing.ZeroHash, err } + previousTree = parentCommit.TreeHash + } - h := &buildTreeHelper{ - fs: w.Filesystem, - s: w.r.Storer, - } - - treeHash, err = h.BuildTree(idx, opts) - if err != nil { - return plumbing.ZeroHash, err - } + if treeHash == previousTree && !opts.AllowEmptyCommits { + return plumbing.ZeroHash, ErrEmptyCommit } commit, err := w.buildCommitObject(msg, opts, treeHash) @@ -118,19 +142,24 @@ func (w *Worktree) updateHEAD(commit plumbing.Hash) error { func (w *Worktree) buildCommitObject(msg string, opts *CommitOptions, tree plumbing.Hash) (plumbing.Hash, error) { commit := &object.Commit{ - Author: *opts.Author, - Committer: *opts.Committer, + Author: w.sanitize(*opts.Author), + Committer: w.sanitize(*opts.Committer), Message: msg, TreeHash: tree, ParentHashes: opts.Parents, } - if opts.SignKey != nil { - sig, err := w.buildCommitSignature(commit, opts.SignKey) + // Convert SignKey into a Signer if set. Existing Signer should take priority. + signer := opts.Signer + if signer == nil && opts.SignKey != nil { + signer = &gpgSigner{key: opts.SignKey} + } + if signer != nil { + sig, err := signObject(signer, commit) if err != nil { return plumbing.ZeroHash, err } - commit.PGPSignature = sig + commit.PGPSignature = string(sig) } obj := w.r.Storer.NewEncodedObject() @@ -140,20 +169,25 @@ func (w *Worktree) buildCommitObject(msg string, opts *CommitOptions, tree plumb return w.r.Storer.SetEncodedObject(obj) } -func (w *Worktree) buildCommitSignature(commit *object.Commit, signKey *openpgp.Entity) (string, error) { - encoded := &plumbing.MemoryObject{} - if err := commit.Encode(encoded); err != nil { - return "", err - } - r, err := encoded.Reader() - if err != nil { - return "", err +func (w *Worktree) sanitize(signature object.Signature) object.Signature { + return object.Signature{ + Name: invalidCharactersRe.ReplaceAllString(signature.Name, ""), + Email: invalidCharactersRe.ReplaceAllString(signature.Email, ""), + When: signature.When, } +} + +type gpgSigner struct { + key *openpgp.Entity + cfg *packet.Config +} + +func (s *gpgSigner) Sign(message io.Reader) ([]byte, error) { var b bytes.Buffer - if err := openpgp.ArmoredDetachSign(&b, signKey, r, nil); err != nil { - return "", err + if err := openpgp.ArmoredDetachSign(&b, s.key, message, s.cfg); err != nil { + return nil, err } - return b.String(), nil + return b.Bytes(), nil } // buildTreeHelper converts a given index.Index file into multiple git objects @@ -170,10 +204,6 @@ type buildTreeHelper struct { // BuildTree builds the tree objects and push its to the storer, the hash // of the root tree is returned. func (h *buildTreeHelper) BuildTree(idx *index.Index, opts *CommitOptions) (plumbing.Hash, error) { - if len(idx.Entries) == 0 && (opts == nil || !opts.AllowEmptyCommits) { - return plumbing.ZeroHash, ErrEmptyCommit - } - const rootNode = "" h.trees = map[string]*object.Tree{rootNode: {}} h.entries = map[string]*object.TreeEntry{} @@ -263,4 +293,4 @@ func (h *buildTreeHelper) copyTreeToStorageRecursive(parent string, t *object.Tr return hash, nil } return h.s.SetEncodedObject(o) -} \ No newline at end of file +} diff --git a/vendor/github.com/go-git/go-git/v5/worktree_linux.go b/vendor/github.com/go-git/go-git/v5/worktree_linux.go index 6fcace2f9..f6b85fe3d 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree_linux.go +++ b/vendor/github.com/go-git/go-git/v5/worktree_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package git @@ -21,6 +22,6 @@ func init() { } } -func isSymlinkWindowsNonAdmin(err error) bool { +func isSymlinkWindowsNonAdmin(_ error) bool { return false } diff --git a/vendor/github.com/go-git/go-git/v5/worktree_status.go b/vendor/github.com/go-git/go-git/v5/worktree_status.go index 730108754..7870d138d 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree_status.go +++ b/vendor/github.com/go-git/go-git/v5/worktree_status.go @@ -29,10 +29,23 @@ var ( // ErrGlobNoMatches in an AddGlob if the glob pattern does not match any // files in the worktree. ErrGlobNoMatches = errors.New("glob pattern did not match any files") + // ErrUnsupportedStatusStrategy occurs when an invalid StatusStrategy is used + // when processing the Worktree status. + ErrUnsupportedStatusStrategy = errors.New("unsupported status strategy") ) // Status returns the working tree status. func (w *Worktree) Status() (Status, error) { + return w.StatusWithOptions(StatusOptions{Strategy: defaultStatusStrategy}) +} + +// StatusOptions defines the options for Worktree.StatusWithOptions(). +type StatusOptions struct { + Strategy StatusStrategy +} + +// StatusWithOptions returns the working tree status. +func (w *Worktree) StatusWithOptions(o StatusOptions) (Status, error) { var hash plumbing.Hash ref, err := w.r.Head() @@ -44,11 +57,14 @@ func (w *Worktree) Status() (Status, error) { hash = ref.Hash() } - return w.status(hash) + return w.status(o.Strategy, hash) } -func (w *Worktree) status(commit plumbing.Hash) (Status, error) { - s := make(Status) +func (w *Worktree) status(ss StatusStrategy, commit plumbing.Hash) (Status, error) { + s, err := ss.new(w) + if err != nil { + return nil, err + } left, err := w.diffCommitWithStaging(commit, false) if err != nil { @@ -271,7 +287,7 @@ func diffTreeIsEquals(a, b noder.Hasher) bool { // no error is returned. When path is a file, the blob.Hash is returned. func (w *Worktree) Add(path string) (plumbing.Hash, error) { // TODO(mcuadros): deprecate in favor of AddWithOption in v6. - return w.doAdd(path, make([]gitignore.Pattern, 0)) + return w.doAdd(path, make([]gitignore.Pattern, 0), false) } func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string, ignorePattern []gitignore.Pattern) (added bool, err error) { @@ -321,7 +337,7 @@ func (w *Worktree) AddWithOptions(opts *AddOptions) error { } if opts.All { - _, err := w.doAdd(".", w.Excludes) + _, err := w.doAdd(".", w.Excludes, false) return err } @@ -329,16 +345,11 @@ func (w *Worktree) AddWithOptions(opts *AddOptions) error { return w.AddGlob(opts.Glob) } - _, err := w.Add(opts.Path) + _, err := w.doAdd(opts.Path, make([]gitignore.Pattern, 0), opts.SkipStatus) return err } -func (w *Worktree) doAdd(path string, ignorePattern []gitignore.Pattern) (plumbing.Hash, error) { - s, err := w.Status() - if err != nil { - return plumbing.ZeroHash, err - } - +func (w *Worktree) doAdd(path string, ignorePattern []gitignore.Pattern, skipStatus bool) (plumbing.Hash, error) { idx, err := w.r.Storer.Index() if err != nil { return plumbing.ZeroHash, err @@ -348,6 +359,19 @@ func (w *Worktree) doAdd(path string, ignorePattern []gitignore.Pattern) (plumbi var added bool fi, err := w.Filesystem.Lstat(path) + + // status is required for doAddDirectory + var s Status + var err2 error + if !skipStatus || fi == nil || fi.IsDir() { + s, err2 = w.Status() + if err2 != nil { + return plumbing.ZeroHash, err2 + } + } + + path = filepath.Clean(path) + if err != nil || !fi.IsDir() { added, h, err = w.doAddFile(idx, s, path, ignorePattern) } else { @@ -421,8 +445,9 @@ func (w *Worktree) AddGlob(pattern string) error { // doAddFile create a new blob from path and update the index, added is true if // the file added is different from the index. +// if s status is nil will skip the status check and update the index anyway func (w *Worktree) doAddFile(idx *index.Index, s Status, path string, ignorePattern []gitignore.Pattern) (added bool, h plumbing.Hash, err error) { - if s.File(path).Worktree == Unmodified { + if s != nil && s.File(path).Worktree == Unmodified { return false, h, nil } if len(ignorePattern) > 0 { @@ -481,7 +506,7 @@ func (w *Worktree) copyFileToStorage(path string) (hash plumbing.Hash, err error return w.r.Storer.SetEncodedObject(obj) } -func (w *Worktree) fillEncodedObjectFromFile(dst io.Writer, path string, fi os.FileInfo) (err error) { +func (w *Worktree) fillEncodedObjectFromFile(dst io.Writer, path string, _ os.FileInfo) (err error) { src, err := w.Filesystem.Open(path) if err != nil { return err @@ -496,7 +521,7 @@ func (w *Worktree) fillEncodedObjectFromFile(dst io.Writer, path string, fi os.F return err } -func (w *Worktree) fillEncodedObjectFromSymlink(dst io.Writer, path string, fi os.FileInfo) error { +func (w *Worktree) fillEncodedObjectFromSymlink(dst io.Writer, path string, _ os.FileInfo) error { target, err := w.Filesystem.Readlink(path) if err != nil { return err @@ -536,9 +561,11 @@ func (w *Worktree) doUpdateFileToIndex(e *index.Entry, filename string, h plumbi return err } - if e.Mode.IsRegular() { - e.Size = uint32(info.Size()) - } + // The entry size must always reflect the current state, otherwise + // it will cause go-git's Worktree.Status() to divert from "git status". + // The size of a symlink is the length of the path to the target. + // The size of Regular and Executable files is the size of the files. + e.Size = uint32(info.Size()) fillSystemInfo(e, info.Sys()) return nil diff --git a/vendor/github.com/opencloud-eu/reva/v2/cmd/revad/internal/grace/grace.go b/vendor/github.com/opencloud-eu/reva/v2/cmd/revad/internal/grace/grace.go index 8aaf18aaf..afdc6da6b 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/cmd/revad/internal/grace/grace.go +++ b/vendor/github.com/opencloud-eu/reva/v2/cmd/revad/internal/grace/grace.go @@ -298,7 +298,7 @@ func (w *Watcher) TrapSignals() { // TODO: Ideally this would call exit() but properly return an error. The // exit() is problematic (i.e. racey) especiaily when orchestrating multiple -// reva services from some external runtime (like in the "ocis server" case +// reva services from some external runtime (like in the "opencloud server" case func gracefulShutdown(w *Watcher) { w.log.Info().Int("Timeout", w.gracefulShutdownTimeout).Msg("preparing for a graceful shutdown with deadline") go func() { @@ -336,7 +336,7 @@ func gracefulShutdown(w *Watcher) { // TODO: Ideally this would call exit() but properly return an error. The // exit() is problematic (i.e. racey) especiaily when orchestrating multiple -// reva services from some external runtime (like in the "ocis server" case +// reva services from some external runtime (like in the "opencloud server" case func hardShutdown(w *Watcher) { w.log.Info().Msg("preparing for hard shutdown, aborting all conns") for _, s := range w.ss { diff --git a/vendor/github.com/opencloud-eu/reva/v2/internal/grpc/services/storageprovider/storageprovider.go b/vendor/github.com/opencloud-eu/reva/v2/internal/grpc/services/storageprovider/storageprovider.go index 3eaf7a87b..5798613e0 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/internal/grpc/services/storageprovider/storageprovider.go +++ b/vendor/github.com/opencloud-eu/reva/v2/internal/grpc/services/storageprovider/storageprovider.go @@ -291,7 +291,6 @@ func (s *Service) InitiateFileDownload(ctx context.Context, req *provider.Initia // TODO(labkode): maybe add short-lived token? // We now simply point the client to the data server. // For example, https://data-server.example.org/home/docs/myfile.txt - // or ownclouds://data-server.example.org/home/docs/myfile.txt log := appctx.GetLogger(ctx) u := *s.dataServerURL log.Info().Str("data-server", u.String()).Interface("ref", req.Ref).Msg("file download") @@ -399,7 +398,7 @@ func (s *Service) InitiateFileUpload(ctx context.Context, req *provider.Initiate if req.Opaque.Map["Upload-Checksum"] != nil { metadata["checksum"] = string(req.Opaque.Map["Upload-Checksum"].Value) } - // ownCloud mtime to set for the uploaded file + // OpenCloud mtime to set for the uploaded file if req.Opaque.Map["X-OC-Mtime"] != nil { metadata["mtime"] = string(req.Opaque.Map["X-OC-Mtime"].Value) } @@ -430,7 +429,7 @@ func (s *Service) InitiateFileUpload(ctx context.Context, req *provider.Initiate // - it is also unassigned // - ends in 9 as the 409 conflict // - is near the 4xx errors about conditions: 415 Unsupported Media Type, 416 Range Not Satisfiable or 417 Expectation Failed - // owncloud only expects a 400 Bad request so InvalidArg is good enough for now + // OpenCloud only expects a 400 Bad request so InvalidArg is good enough for now // seealso errtypes.StatusChecksumMismatch case errtypes.PermissionDenied: st = status.NewPermissionDenied(ctx, err, "permission denied") diff --git a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/propfind/propfind.go b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/propfind/propfind.go index 359591dd6..9676a3a55 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/propfind/propfind.go +++ b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/propfind/propfind.go @@ -1604,7 +1604,7 @@ func mdToPropResponse(ctx context.Context, pf *XML, md *provider.ResourceInfo, p // see everts stance on this https://stackoverflow.com/a/31621912, he points to http://tools.ietf.org/html/rfc4918#section-15.3 // > Purpose: Contains the Content-Length header returned by a GET without accept headers. // which only would make sense when eg. rendering a plain HTML filelisting when GETing a collection, - // which is not the case ... so we don't return it on collections. owncloud has oc:size for that + // which is not the case ... so we don't return it on collections. OpenCloud has oc:size for that // TODO we cannot find out if md.Size is set or not because ints in go default to 0 if md.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { appendToNotFound(prop.NotFound("d:getcontentlength")) @@ -1638,7 +1638,7 @@ func mdToPropResponse(ctx context.Context, pf *XML, md *provider.ResourceInfo, p if md.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { // always returns the current usage, // in oc10 there seems to be a bug that makes the size in webdav differ from the one in the user properties, not taking shares into account - // in ocis we plan to always mak the quota a property of the storage space + // in OpenCloud we plan to always mak the quota a property of the storage space appendToOK(prop.Escaped("d:quota-used-bytes", size)) } else { appendToNotFound(prop.NotFound("d:quota-used-bytes")) diff --git a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/put.go b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/put.go index d2c8f3dd3..17984855c 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/put.go +++ b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/put.go @@ -246,7 +246,7 @@ func (s *svc) handlePut(ctx context.Context, w http.ResponseWriter, r *http.Requ utils.AppendPlainToOpaque(opaque, net.HeaderUploadLength, strconv.FormatInt(length, 10)) - // curl -X PUT https://demo.owncloud.com/remote.php/webdav/testcs.bin -u demo:demo -d '123' -v -H 'OC-Checksum: SHA1:40bd001563085fc35165329ea1ff5c5ecbdbbeef' + // curl -X PUT https://demo.example.org/remote.php/webdav/testcs.bin -u demo:demo -d '123' -v -H 'OC-Checksum: SHA1:40bd001563085fc35165329ea1ff5c5ecbdbbeef' var cparts []string // TUS Upload-Checksum header takes precedence @@ -257,7 +257,7 @@ func (s *svc) handlePut(ctx context.Context, w http.ResponseWriter, r *http.Requ w.WriteHeader(http.StatusBadRequest) return } - // Then try owncloud header + // Then try OpenCloud header } else if checksum := r.Header.Get(net.HeaderOCChecksum); checksum != "" { cparts = strings.SplitN(checksum, ":", 2) if len(cparts) != 2 { diff --git a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/tus.go b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/tus.go index 6ebfc4a33..f88dc2192 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/tus.go +++ b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/tus.go @@ -121,7 +121,7 @@ func (s *svc) handleTusPost(ctx context.Context, w http.ResponseWriter, r *http. // r.Header.Get(net.HeaderOCChecksum) // TODO must be SHA1, ADLER32 or MD5 ... in capital letters???? - // curl -X PUT https://demo.owncloud.com/remote.php/webdav/testcs.bin -u demo:demo -d '123' -v -H 'OC-Checksum: SHA1:40bd001563085fc35165329ea1ff5c5ecbdbbeef' + // curl -X PUT https://demo.example.org/remote.php/webdav/testcs.bin -u demo:demo -d '123' -v -H 'OC-Checksum: SHA1:40bd001563085fc35165329ea1ff5c5ecbdbbeef' // TODO check Expect: 100-continue diff --git a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocs/handlers/apps/sharing/sharees/sharees.go b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocs/handlers/apps/sharing/sharees/sharees.go index ded81f959..ae49eed68 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocs/handlers/apps/sharing/sharees/sharees.go +++ b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocs/handlers/apps/sharing/sharees/sharees.go @@ -35,7 +35,7 @@ import ( "github.com/opencloud-eu/reva/v2/pkg/storage/utils/templates" ) -// Handler implements the ownCloud sharing API +// Handler implements the open collaboration service sharing API type Handler struct { gatewayAddr string additionalInfoAttribute string diff --git a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/shares.go b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/shares.go index 1d1f2ab30..b47177420 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/shares.go +++ b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/shares.go @@ -75,7 +75,7 @@ var ( errParsingSpaceReference = errors.New("could not parse space reference") ) -// Handler implements the shares part of the ownCloud sharing API +// Handler implements the shares part of the open collaboration service sharing API type Handler struct { gatewayAddr string machineAuthAPIKey string diff --git a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocs/handlers/cloud/user/user.go b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocs/handlers/cloud/user/user.go index eba385e4e..25fe550fa 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocs/handlers/cloud/user/user.go +++ b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocs/handlers/cloud/user/user.go @@ -52,7 +52,7 @@ func (h *Handler) GetSelf(w http.ResponseWriter, r *http.Request) { // User holds user data type User struct { - ID string `json:"id" xml:"id"` // UserID in ocs is the owncloud internal username + ID string `json:"id" xml:"id"` // UserID in ocs is the username DisplayName string `json:"display-name" xml:"display-name"` // is used in ocs/v(1|2).php/cloud/user - yes this is different from the users endpoint Email string `json:"email" xml:"email"` UserType string `json:"user-type" xml:"user-type"` diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/metrics/driver/xcloud/xcloud.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/metrics/driver/xcloud/xcloud.go index 084169b59..93d7c0bc1 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/metrics/driver/xcloud/xcloud.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/metrics/driver/xcloud/xcloud.go @@ -59,7 +59,7 @@ type CloudDriver struct { } func (d *CloudDriver) refresh() error { - // endpoint example: https://mybox.com or https://mybox.com/owncloud + // endpoint example: https://mybox.com endpoint := fmt.Sprintf("%s/index.php/apps/sciencemesh/internal_metrics", d.instance) req, err := http.NewRequest("GET", endpoint, nil) diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/ocm/storage/received/ocm.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/ocm/storage/received/ocm.go index 172adf3df..075c7734f 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/ocm/storage/received/ocm.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/ocm/storage/received/ocm.go @@ -294,7 +294,7 @@ func convertStatToResourceInfo(ref *provider.Reference, f fs.FileInfo, share *oc } if t == provider.ResourceType_RESOURCE_TYPE_FILE { - // get SHA1 checksum from owncloud specific properties if available + // get SHA1 checksum from OpenCloud specific properties if available propstat := webdavFile.Sys().(gowebdav.Props) ri.Checksum = extractChecksum(propstat) } diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/ocm/storage/received/upload.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/ocm/storage/received/upload.go index b723242b8..4aa0f18fc 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/ocm/storage/received/upload.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/ocm/storage/received/upload.go @@ -255,7 +255,7 @@ func (u *upload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (i // If the HTTP PATCH request gets interrupted in the middle (e.g. because // the user wants to pause the upload), Go's net/http returns an io.ErrUnexpectedEOF. - // However, for the ocis driver it's not important whether the stream has ended + // However, for the ocm driver it's not important whether the stream has ended // on purpose or accidentally. if err != nil && err != io.ErrUnexpectedEOF { return n, err diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/publicshare/manager/cs3/cs3.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/publicshare/manager/cs3/cs3.go deleted file mode 100644 index 93071b31d..000000000 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/publicshare/manager/cs3/cs3.go +++ /dev/null @@ -1,620 +0,0 @@ -// Copyright 2018-2022 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package cs3 - -import ( - "context" - "encoding/json" - "fmt" - "net/url" - "path" - "strconv" - "strings" - "sync" - "time" - - "github.com/mitchellh/mapstructure" - "github.com/pkg/errors" - "golang.org/x/crypto/bcrypt" - "google.golang.org/protobuf/proto" - - gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" - user "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" - link "github.com/cs3org/go-cs3apis/cs3/sharing/link/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - typespb "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" - "github.com/opencloud-eu/reva/v2/pkg/appctx" - "github.com/opencloud-eu/reva/v2/pkg/errtypes" - "github.com/opencloud-eu/reva/v2/pkg/publicshare" - "github.com/opencloud-eu/reva/v2/pkg/publicshare/manager/registry" - "github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool" - "github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer" - indexerErrors "github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/errors" - "github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/option" - "github.com/opencloud-eu/reva/v2/pkg/storage/utils/metadata" - "github.com/opencloud-eu/reva/v2/pkg/storagespace" - "github.com/opencloud-eu/reva/v2/pkg/utils" -) - -func init() { - registry.Register("cs3", NewDefault) -} - -// Manager implements a publicshare manager using a cs3 storage backend -type Manager struct { - gatewayClient gateway.GatewayAPIClient - sync.RWMutex - - storage metadata.Storage - indexer indexer.Indexer - passwordHashCost int - - initialized bool -} - -type config struct { - GatewayAddr string `mapstructure:"gateway_addr"` - ProviderAddr string `mapstructure:"provider_addr"` - ServiceUserID string `mapstructure:"service_user_id"` - ServiceUserIdp string `mapstructure:"service_user_idp"` - MachineAuthAPIKey string `mapstructure:"machine_auth_apikey"` -} - -// NewDefault returns a new manager instance with default dependencies -func NewDefault(m map[string]interface{}) (publicshare.Manager, error) { - c := &config{} - if err := mapstructure.Decode(m, c); err != nil { - err = errors.Wrap(err, "error creating a new manager") - return nil, err - } - - s, err := metadata.NewCS3Storage(c.GatewayAddr, c.ProviderAddr, c.ServiceUserID, c.ServiceUserIdp, c.MachineAuthAPIKey) - if err != nil { - return nil, err - } - indexer := indexer.CreateIndexer(s) - - client, err := pool.GetGatewayServiceClient(c.GatewayAddr) - if err != nil { - return nil, err - } - - return New(client, s, indexer, bcrypt.DefaultCost) -} - -// New returns a new manager instance -func New(gatewayClient gateway.GatewayAPIClient, storage metadata.Storage, indexer indexer.Indexer, passwordHashCost int) (*Manager, error) { - return &Manager{ - gatewayClient: gatewayClient, - storage: storage, - indexer: indexer, - passwordHashCost: passwordHashCost, - initialized: false, - }, nil -} - -func (m *Manager) initialize() error { - if m.initialized { - return nil - } - - m.Lock() - defer m.Unlock() - - if m.initialized { // check if initialization happened while grabbing the lock - return nil - } - - err := m.storage.Init(context.Background(), "public-share-manager-metadata") - if err != nil { - return err - } - if err := m.storage.MakeDirIfNotExist(context.Background(), "publicshares"); err != nil { - return err - } - err = m.indexer.AddIndex(&link.PublicShare{}, option.IndexByField("Id.OpaqueId"), "Token", "publicshares", "unique", nil, true) - if err != nil { - return err - } - err = m.indexer.AddIndex(&link.PublicShare{}, option.IndexByFunc{ - Name: "Owner", - Func: indexOwnerFunc, - }, "Token", "publicshares", "non_unique", nil, true) - if err != nil { - return err - } - err = m.indexer.AddIndex(&link.PublicShare{}, option.IndexByFunc{ - Name: "Creator", - Func: indexCreatorFunc, - }, "Token", "publicshares", "non_unique", nil, true) - if err != nil { - return err - } - err = m.indexer.AddIndex(&link.PublicShare{}, option.IndexByFunc{ - Name: "ResourceId", - Func: indexResourceIDFunc, - }, "Token", "publicshares", "non_unique", nil, true) - if err != nil { - return err - } - m.initialized = true - return nil -} - -// Dump exports public shares to channels (e.g. during migration) -func (m *Manager) Dump(ctx context.Context, shareChan chan<- *publicshare.WithPassword) error { - if err := m.initialize(); err != nil { - return err - } - - pshares, err := m.storage.ListDir(ctx, "publicshares") - if err != nil { - return err - } - - for _, v := range pshares { - var local publicshare.WithPassword - ps, err := m.getByToken(ctx, v.Name) - if err != nil { - return err - } - local.Password = ps.Password - proto.Merge(&local.PublicShare, &ps.PublicShare) - - shareChan <- &local - } - - return nil -} - -// Load imports public shares and received shares from channels (e.g. during migration) -func (m *Manager) Load(ctx context.Context, shareChan <-chan *publicshare.WithPassword) error { - log := appctx.GetLogger(ctx) - if err := m.initialize(); err != nil { - return err - } - for ps := range shareChan { - if err := m.persist(context.Background(), ps); err != nil { - log.Error().Err(err).Interface("publicshare", ps).Msg("error loading public share") - } - } - return nil -} - -// CreatePublicShare creates a new public share -func (m *Manager) CreatePublicShare(ctx context.Context, u *user.User, ri *provider.ResourceInfo, g *link.Grant) (*link.PublicShare, error) { - if err := m.initialize(); err != nil { - return nil, err - } - - id := &link.PublicShareId{ - OpaqueId: utils.RandString(15), - } - - tkn := utils.RandString(15) - now := time.Now().UnixNano() - - displayName, quicklink := tkn, false - if ri.ArbitraryMetadata != nil { - metadataName, ok := ri.ArbitraryMetadata.Metadata["name"] - if ok { - displayName = metadataName - } - - quicklink, _ = strconv.ParseBool(ri.ArbitraryMetadata.Metadata["quicklink"]) - } - - var passwordProtected bool - password := g.Password - if password != "" { - h, err := bcrypt.GenerateFromPassword([]byte(password), m.passwordHashCost) - if err != nil { - return nil, errors.Wrap(err, "could not hash share password") - } - password = string(h) - passwordProtected = true - } - - createdAt := &typespb.Timestamp{ - Seconds: uint64(now / int64(time.Second)), - Nanos: uint32(now % int64(time.Second)), - } - - s := &publicshare.WithPassword{ - PublicShare: link.PublicShare{ - Id: id, - Owner: ri.GetOwner(), - Creator: u.Id, - ResourceId: ri.Id, - Token: tkn, - Permissions: g.Permissions, - Ctime: createdAt, - Mtime: createdAt, - PasswordProtected: passwordProtected, - Expiration: g.Expiration, - DisplayName: displayName, - Quicklink: quicklink, - }, - Password: password, - } - - err := m.persist(ctx, s) - if err != nil { - return nil, err - } - - return &s.PublicShare, nil -} - -// UpdatePublicShare updates an existing public share -func (m *Manager) UpdatePublicShare(ctx context.Context, u *user.User, req *link.UpdatePublicShareRequest) (*link.PublicShare, error) { - if err := m.initialize(); err != nil { - return nil, err - } - - ps, err := m.getWithPassword(ctx, req.Ref) - if err != nil { - return nil, err - } - - switch req.Update.Type { - case link.UpdatePublicShareRequest_Update_TYPE_DISPLAYNAME: - ps.PublicShare.DisplayName = req.Update.DisplayName - case link.UpdatePublicShareRequest_Update_TYPE_PERMISSIONS: - ps.PublicShare.Permissions = req.Update.Grant.Permissions - case link.UpdatePublicShareRequest_Update_TYPE_EXPIRATION: - ps.PublicShare.Expiration = req.Update.Grant.Expiration - case link.UpdatePublicShareRequest_Update_TYPE_PASSWORD: - if req.Update.Grant.Password == "" { - ps.Password = "" - ps.PublicShare.PasswordProtected = false - } else { - h, err := bcrypt.GenerateFromPassword([]byte(req.Update.Grant.Password), m.passwordHashCost) - if err != nil { - return nil, errors.Wrap(err, "could not hash share password") - } - ps.Password = string(h) - ps.PublicShare.PasswordProtected = true - } - default: - return nil, errtypes.BadRequest("no valid update type given") - } - - err = m.persist(ctx, ps) - if err != nil { - return nil, err - } - - return &ps.PublicShare, nil -} - -// GetPublicShare returns an existing public share -func (m *Manager) GetPublicShare(ctx context.Context, u *user.User, ref *link.PublicShareReference, sign bool) (*link.PublicShare, error) { - if err := m.initialize(); err != nil { - return nil, err - } - - ps, err := m.getWithPassword(ctx, ref) - if err != nil { - return nil, err - } - - if ps.PublicShare.PasswordProtected && sign { - err = publicshare.AddSignature(&ps.PublicShare, ps.Password) - if err != nil { - return nil, err - } - } - - return &ps.PublicShare, nil -} - -func (m *Manager) getWithPassword(ctx context.Context, ref *link.PublicShareReference) (*publicshare.WithPassword, error) { - switch { - case ref.GetToken() != "": - return m.getByToken(ctx, ref.GetToken()) - case ref.GetId().GetOpaqueId() != "": - return m.getByID(ctx, ref.GetId().GetOpaqueId()) - default: - return nil, errtypes.BadRequest("neither id nor token given") - } -} - -func (m *Manager) getByID(ctx context.Context, id string) (*publicshare.WithPassword, error) { - tokens, err := m.indexer.FindBy(&link.PublicShare{}, - indexer.NewField("Id.OpaqueId", id), - ) - if err != nil { - return nil, err - } - if len(tokens) == 0 { - return nil, errtypes.NotFound("publicshare with the given id not found") - } - return m.getByToken(ctx, tokens[0]) -} - -func (m *Manager) getByToken(ctx context.Context, token string) (*publicshare.WithPassword, error) { - fn := path.Join("publicshares", token) - data, err := m.storage.SimpleDownload(ctx, fn) - if err != nil { - return nil, err - } - - ps := &publicshare.WithPassword{} - err = json.Unmarshal(data, ps) - if err != nil { - return nil, err - } - id := storagespace.UpdateLegacyResourceID(ps.PublicShare.ResourceId) - ps.PublicShare.ResourceId = id - return ps, nil -} - -// ListPublicShares lists existing public shares matching the given filters -func (m *Manager) ListPublicShares(ctx context.Context, u *user.User, filters []*link.ListPublicSharesRequest_Filter, sign bool) ([]*link.PublicShare, error) { - if err := m.initialize(); err != nil { - return nil, err - } - - log := appctx.GetLogger(ctx) - var rIDs []*provider.ResourceId - if len(filters) != 0 { - grouped := publicshare.GroupFiltersByType(filters) - for _, g := range grouped { - for _, f := range g { - if f.GetResourceId() != nil { - rIDs = append(rIDs, f.GetResourceId()) - } - } - } - } - var ( - createdShareTokens []string - err error - ) - - // in spaces, always use the resourceId - if len(rIDs) != 0 { - for _, rID := range rIDs { - shareTokens, err := m.indexer.FindBy(&link.PublicShare{}, - indexer.NewField("ResourceId", resourceIDToIndex(rID)), - ) - if err != nil { - return nil, err - } - createdShareTokens = append(createdShareTokens, shareTokens...) - } - } else { - // fallback for legacy use - createdShareTokens, err = m.indexer.FindBy(&link.PublicShare{}, - indexer.NewField("Owner", userIDToIndex(u.Id)), - indexer.NewField("Creator", userIDToIndex(u.Id)), - ) - if err != nil { - return nil, err - } - } - - // We use shareMem as a temporary lookup store to check which shares were - // already added. This is to prevent duplicates. - shareMem := make(map[string]struct{}) - result := []*link.PublicShare{} - for _, token := range createdShareTokens { - ps, err := m.getByToken(ctx, token) - if err != nil { - return nil, err - } - - if !publicshare.MatchesFilters(&ps.PublicShare, filters) { - continue - } - - if publicshare.IsExpired(&ps.PublicShare) { - ref := &link.PublicShareReference{ - Spec: &link.PublicShareReference_Id{ - Id: ps.PublicShare.Id, - }, - } - if err := m.RevokePublicShare(ctx, u, ref); err != nil { - log.Error().Err(err). - Str("public_share_token", ps.PublicShare.Token). - Str("public_share_id", ps.PublicShare.Id.OpaqueId). - Msg("failed to revoke expired public share") - } - continue - } - - if ps.PublicShare.PasswordProtected && sign { - err = publicshare.AddSignature(&ps.PublicShare, ps.Password) - if err != nil { - return nil, err - } - } - result = append(result, &ps.PublicShare) - shareMem[ps.PublicShare.Token] = struct{}{} - } - - // If a user requests to list shares which have not been created by them - // we have to explicitly fetch these shares and check if the user is - // allowed to list the shares. - // Only then can we add these shares to the result. - grouped := publicshare.GroupFiltersByType(filters) - idFilter, ok := grouped[link.ListPublicSharesRequest_Filter_TYPE_RESOURCE_ID] - if !ok { - return result, nil - } - - var tokens []string - if len(idFilter) > 0 { - idFilters := make([]indexer.Field, 0, len(idFilter)) - for _, filter := range idFilter { - resourceID := filter.GetResourceId() - idFilters = append(idFilters, indexer.NewField("ResourceId", resourceIDToIndex(resourceID))) - } - tokens, err = m.indexer.FindBy(&link.PublicShare{}, idFilters...) - if err != nil { - return nil, err - } - } - - // statMem is used as a local cache to prevent statting resources which - // already have been checked. - statMem := make(map[string]struct{}) - for _, token := range tokens { - if _, handled := shareMem[token]; handled { - // We don't want to add a share multiple times when we added it - // already. - continue - } - - s, err := m.getByToken(ctx, token) - if err != nil { - return nil, err - } - - if _, checked := statMem[resourceIDToIndex(s.PublicShare.GetResourceId())]; !checked { - sReq := &provider.StatRequest{ - Ref: &provider.Reference{ResourceId: s.PublicShare.GetResourceId()}, - } - sRes, err := m.gatewayClient.Stat(ctx, sReq) - if err != nil { - continue - } - if sRes.Status.Code != rpc.Code_CODE_OK { - continue - } - if !sRes.Info.PermissionSet.ListGrants { - continue - } - statMem[resourceIDToIndex(s.PublicShare.GetResourceId())] = struct{}{} - } - - if publicshare.MatchesFilters(&s.PublicShare, filters) { - result = append(result, &s.PublicShare) - shareMem[s.PublicShare.Token] = struct{}{} - } - } - return result, nil -} - -// RevokePublicShare revokes an existing public share -func (m *Manager) RevokePublicShare(ctx context.Context, u *user.User, ref *link.PublicShareReference) error { - if err := m.initialize(); err != nil { - return err - } - - ps, err := m.GetPublicShare(ctx, u, ref, false) - if err != nil { - return err - } - - err = m.storage.Delete(ctx, path.Join("publicshares", ps.Token)) - if err != nil { - if _, ok := err.(errtypes.NotFound); !ok { - return err - } - } - - return m.indexer.Delete(ps) -} - -// GetPublicShareByToken gets an existing public share in an unauthenticated context using either a password or a signature -func (m *Manager) GetPublicShareByToken(ctx context.Context, token string, auth *link.PublicShareAuthentication, sign bool) (*link.PublicShare, error) { - if err := m.initialize(); err != nil { - return nil, err - } - - ps, err := m.getByToken(ctx, token) - if err != nil { - return nil, err - } - - if publicshare.IsExpired(&ps.PublicShare) { - return nil, errtypes.NotFound("public share has expired") - } - - if ps.PublicShare.PasswordProtected { - if !publicshare.Authenticate(&ps.PublicShare, ps.Password, auth) { - return nil, errtypes.InvalidCredentials("access denied") - } - } - - return &ps.PublicShare, nil -} - -func indexOwnerFunc(v interface{}) (string, error) { - ps, ok := v.(*link.PublicShare) - if !ok { - return "", fmt.Errorf("given entity is not a public share") - } - return userIDToIndex(ps.Owner), nil -} - -func indexCreatorFunc(v interface{}) (string, error) { - ps, ok := v.(*link.PublicShare) - if !ok { - return "", fmt.Errorf("given entity is not a public share") - } - return userIDToIndex(ps.Creator), nil -} - -func indexResourceIDFunc(v interface{}) (string, error) { - ps, ok := v.(*link.PublicShare) - if !ok { - return "", fmt.Errorf("given entity is not a public share") - } - return resourceIDToIndex(ps.ResourceId), nil -} - -func userIDToIndex(id *user.UserId) string { - return url.QueryEscape(id.Idp + ":" + id.OpaqueId) -} - -func resourceIDToIndex(id *provider.ResourceId) string { - return strings.Join([]string{id.StorageId, id.OpaqueId}, "!") -} - -func (m *Manager) persist(ctx context.Context, ps *publicshare.WithPassword) error { - data, err := json.Marshal(ps) - if err != nil { - return err - } - - fn := path.Join("publicshares", ps.PublicShare.Token) - err = m.storage.SimpleUpload(ctx, fn, data) - if err != nil { - return err - } - - _, err = m.indexer.Add(&ps.PublicShare) - if err != nil { - if _, ok := err.(*indexerErrors.AlreadyExistsErr); ok { - return nil - } - err = m.indexer.Delete(&ps.PublicShare) - if err != nil { - return err - } - _, err = m.indexer.Add(&ps.PublicShare) - return err - } - - return nil -} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/publicshare/manager/loader/loader.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/publicshare/manager/loader/loader.go index ec9e2c0cf..b1b208551 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/publicshare/manager/loader/loader.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/publicshare/manager/loader/loader.go @@ -20,7 +20,6 @@ package loader import ( // Load core share manager drivers. - _ "github.com/opencloud-eu/reva/v2/pkg/publicshare/manager/cs3" _ "github.com/opencloud-eu/reva/v2/pkg/publicshare/manager/json" _ "github.com/opencloud-eu/reva/v2/pkg/publicshare/manager/memory" _ "github.com/opencloud-eu/reva/v2/pkg/publicshare/manager/owncloudsql" diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/share/manager/cs3/cs3.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/share/manager/cs3/cs3.go deleted file mode 100644 index 36b1411cd..000000000 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/share/manager/cs3/cs3.go +++ /dev/null @@ -1,887 +0,0 @@ -// Copyright 2018-2022 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package cs3 - -import ( - "context" - "encoding/json" - "fmt" - "net/url" - "path" - "strings" - "sync" - - gatewayv1beta1 "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" - groupv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/group/v1beta1" - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - rpcv1beta1 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" - collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/google/uuid" - "github.com/mitchellh/mapstructure" - "github.com/opencloud-eu/reva/v2/pkg/appctx" - ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx" - "github.com/opencloud-eu/reva/v2/pkg/errtypes" - "github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool" - "github.com/opencloud-eu/reva/v2/pkg/share" - "github.com/opencloud-eu/reva/v2/pkg/share/manager/registry" - "github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer" - indexerErrors "github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/errors" - "github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/option" - "github.com/opencloud-eu/reva/v2/pkg/storage/utils/metadata" - "github.com/opencloud-eu/reva/v2/pkg/storagespace" - "github.com/opencloud-eu/reva/v2/pkg/utils" - "github.com/pkg/errors" - "google.golang.org/genproto/protobuf/field_mask" -) - -// Manager implements a share manager using a cs3 storage backend -type Manager struct { - gatewayClient gatewayv1beta1.GatewayAPIClient - - sync.RWMutex - storage metadata.Storage - indexer indexer.Indexer - - initialized bool -} - -// ReceivedShareMetadata hold the state information or a received share -type ReceivedShareMetadata struct { - State collaboration.ShareState `json:"state"` - MountPoint *provider.Reference `json:"mountpoint"` -} - -func init() { - registry.Register("cs3", NewDefault) -} - -type config struct { - GatewayAddr string `mapstructure:"gateway_addr"` - ProviderAddr string `mapstructure:"provider_addr"` - ServiceUserID string `mapstructure:"service_user_id"` - ServiceUserIdp string `mapstructure:"service_user_idp"` - MachineAuthAPIKey string `mapstructure:"machine_auth_apikey"` -} - -// NewDefault returns a new manager instance with default dependencies -func NewDefault(m map[string]interface{}) (share.Manager, error) { - c := &config{} - if err := mapstructure.Decode(m, c); err != nil { - err = errors.Wrap(err, "error creating a new manager") - return nil, err - } - - s, err := metadata.NewCS3Storage(c.GatewayAddr, c.ProviderAddr, c.ServiceUserID, c.ServiceUserIdp, c.MachineAuthAPIKey) - if err != nil { - return nil, err - } - indexer := indexer.CreateIndexer(s) - - client, err := pool.GetGatewayServiceClient(c.GatewayAddr) - if err != nil { - return nil, err - } - - return New(client, s, indexer) -} - -// New returns a new manager instance -func New(gatewayClient gatewayv1beta1.GatewayAPIClient, s metadata.Storage, indexer indexer.Indexer) (*Manager, error) { - return &Manager{ - gatewayClient: gatewayClient, - storage: s, - indexer: indexer, - initialized: false, - }, nil -} - -func (m *Manager) initialize() error { - if m.initialized { - return nil - } - - m.Lock() - defer m.Unlock() - - if m.initialized { // check if initialization happened while grabbing the lock - return nil - } - - err := m.storage.Init(context.Background(), "cs3-share-manager-metadata") - if err != nil { - return err - } - if err := m.storage.MakeDirIfNotExist(context.Background(), "shares"); err != nil { - return err - } - if err := m.storage.MakeDirIfNotExist(context.Background(), "metadata"); err != nil { - return err - } - err = m.indexer.AddIndex(&collaboration.Share{}, option.IndexByFunc{ - Name: "OwnerId", - Func: indexOwnerFunc, - }, "Id.OpaqueId", "shares", "non_unique", nil, true) - if err != nil { - return err - } - err = m.indexer.AddIndex(&collaboration.Share{}, option.IndexByFunc{ - Name: "CreatorId", - Func: indexCreatorFunc, - }, "Id.OpaqueId", "shares", "non_unique", nil, true) - if err != nil { - return err - } - err = m.indexer.AddIndex(&collaboration.Share{}, option.IndexByFunc{ - Name: "GranteeId", - Func: indexGranteeFunc, - }, "Id.OpaqueId", "shares", "non_unique", nil, true) - if err != nil { - return err - } - err = m.indexer.AddIndex(&collaboration.Share{}, option.IndexByFunc{ - Name: "ResourceId", - Func: indexResourceIDFunc, - }, "Id.OpaqueId", "shares", "non_unique", nil, true) - if err != nil { - return err - } - m.initialized = true - return nil -} - -// Load imports shares and received shares from channels (e.g. during migration) -func (m *Manager) Load(ctx context.Context, shareChan <-chan *collaboration.Share, receivedShareChan <-chan share.ReceivedShareWithUser) error { - log := appctx.GetLogger(ctx) - if err := m.initialize(); err != nil { - return err - } - - var mu sync.Mutex - var wg sync.WaitGroup - wg.Add(2) - go func() { - for s := range shareChan { - if s == nil { - continue - } - mu.Lock() - if err := m.persistShare(context.Background(), s); err != nil { - log.Error().Err(err).Interface("share", s).Msg("error persisting share") - } - mu.Unlock() - } - wg.Done() - }() - go func() { - for s := range receivedShareChan { - if s.ReceivedShare != nil && s.UserID != nil { - mu.Lock() - if err := m.persistReceivedShare(context.Background(), s.UserID, s.ReceivedShare); err != nil { - log.Error().Err(err).Interface("received share", s).Msg("error persisting received share") - } - mu.Unlock() - } - } - wg.Done() - }() - wg.Wait() - - return nil -} - -func (m *Manager) getMetadata(ctx context.Context, shareid, grantee string) ReceivedShareMetadata { - // use default values if the grantee didn't configure anything yet - metadata := ReceivedShareMetadata{ - State: collaboration.ShareState_SHARE_STATE_PENDING, - } - data, err := m.storage.SimpleDownload(ctx, path.Join("metadata", shareid, grantee)) - if err != nil { - return metadata - } - err = json.Unmarshal(data, &metadata) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("shareid", shareid).Msg("error fetching share") - } - return metadata -} - -// Dump exports shares and received shares to channels (e.g. during migration) -func (m *Manager) Dump(ctx context.Context, shareChan chan<- *collaboration.Share, receivedShareChan chan<- share.ReceivedShareWithUser) error { - log := appctx.GetLogger(ctx) - if err := m.initialize(); err != nil { - return err - } - - shareids, err := m.storage.ReadDir(ctx, "shares") - if err != nil { - return err - } - for _, shareid := range shareids { - var s *collaboration.Share - if s, err = m.getShareByID(ctx, shareid); err != nil { - log.Error().Err(err).Str("shareid", shareid).Msg("error fetching share") - continue - } - // dump share data - shareChan <- s - // dump grantee metadata that includes share state and mount path - grantees, err := m.storage.ReadDir(ctx, path.Join("metadata", s.Id.OpaqueId)) - if err != nil { - continue - } - for _, grantee := range grantees { - metadata := m.getMetadata(ctx, s.GetId().GetOpaqueId(), grantee) - g, err := indexToGrantee(grantee) - if err != nil || g.Type != provider.GranteeType_GRANTEE_TYPE_USER { - // ignore group grants, as every user has his own received state - continue - } - receivedShareChan <- share.ReceivedShareWithUser{ - UserID: g.GetUserId(), - ReceivedShare: &collaboration.ReceivedShare{ - Share: s, - State: metadata.State, - MountPoint: metadata.MountPoint, - }, - } - } - } - return nil -} - -// Share creates a new share -func (m *Manager) Share(ctx context.Context, md *provider.ResourceInfo, g *collaboration.ShareGrant) (*collaboration.Share, error) { - if err := m.initialize(); err != nil { - return nil, err - } - user := ctxpkg.ContextMustGetUser(ctx) - // do not allow share to myself or the owner if share is for a user - if g.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_USER && - (utils.UserEqual(g.Grantee.GetUserId(), user.Id) || utils.UserEqual(g.Grantee.GetUserId(), md.Owner)) { - return nil, errtypes.BadRequest("cs3: owner/creator and grantee are the same") - } - ts := utils.TSNow() - - share := &collaboration.Share{ - Id: &collaboration.ShareId{ - OpaqueId: uuid.NewString(), - }, - ResourceId: md.Id, - Permissions: g.Permissions, - Grantee: g.Grantee, - Owner: md.Owner, - Creator: user.Id, - Ctime: ts, - Mtime: ts, - } - - err := m.persistShare(ctx, share) - return share, err -} - -func (m *Manager) persistShare(ctx context.Context, share *collaboration.Share) error { - data, err := json.Marshal(share) - if err != nil { - return err - } - - err = m.storage.SimpleUpload(ctx, shareFilename(share.Id.OpaqueId), data) - if err != nil { - return err - } - - metadataPath := path.Join("metadata", share.Id.OpaqueId) - err = m.storage.MakeDirIfNotExist(ctx, metadataPath) - if err != nil { - return err - } - - _, err = m.indexer.Add(share) - if _, ok := err.(*indexerErrors.AlreadyExistsErr); ok { - return nil - } - return err -} - -// GetShare gets the information for a share by the given ref. -func (m *Manager) GetShare(ctx context.Context, ref *collaboration.ShareReference) (*collaboration.Share, error) { - err := m.initialize() - if err != nil { - return nil, err - } - - var s *collaboration.Share - switch { - case ref.GetId() != nil: - s, err = m.getShareByID(ctx, ref.GetId().OpaqueId) - case ref.GetKey() != nil: - s, err = m.getShareByKey(ctx, ref.GetKey()) - default: - return nil, errtypes.BadRequest("neither share id nor key was given") - } - - if err != nil { - return nil, err - } - - // check if we are the owner or the grantee - user := ctxpkg.ContextMustGetUser(ctx) - if user.GetId().GetType() == userpb.UserType_USER_TYPE_SERVICE || share.IsCreatedByUser(s, user) || share.IsGrantedToUser(s, user) { - return s, nil - } - - return nil, errtypes.NotFound("not found") -} - -// Unshare deletes the share pointed by ref. -func (m *Manager) Unshare(ctx context.Context, ref *collaboration.ShareReference) error { - if err := m.initialize(); err != nil { - return err - } - share, err := m.GetShare(ctx, ref) - if err != nil { - return err - } - - err = m.storage.Delete(ctx, shareFilename(ref.GetId().OpaqueId)) - if err != nil { - if _, ok := err.(errtypes.NotFound); !ok { - return err - } - } - - return m.indexer.Delete(share) -} - -// ListShares returns the shares created by the user -func (m *Manager) ListShares(ctx context.Context, filters []*collaboration.Filter) ([]*collaboration.Share, error) { - if err := m.initialize(); err != nil { - return nil, err - } - - user, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - return nil, errtypes.UserRequired("error getting user from context") - } - var rIDs []*provider.ResourceId - if len(filters) != 0 { - grouped := share.GroupFiltersByType(filters) - for _, g := range grouped { - for _, f := range g { - if f.GetResourceId() != nil { - rIDs = append(rIDs, f.GetResourceId()) - } - } - } - } - var ( - createdShareIds []string - err error - ) - // in spaces, always use the resourceId - // We could have more than one resourceID - // which would form a logical OR - if len(rIDs) != 0 { - for _, rID := range rIDs { - shareIDs, err := m.indexer.FindBy(&collaboration.Share{}, - indexer.NewField("ResourceId", resourceIDToIndex(rID)), - ) - if err != nil { - return nil, err - } - createdShareIds = append(createdShareIds, shareIDs...) - } - } else { - createdShareIds, err = m.indexer.FindBy(&collaboration.Share{}, - indexer.NewField("OwnerId", userIDToIndex(user.Id)), - indexer.NewField("CreatorId", userIDToIndex(user.Id)), - ) - if err != nil { - return nil, err - } - } - - // We use shareMem as a temporary lookup store to check which shares were - // already added. This is to prevent duplicates. - shareMem := make(map[string]struct{}) - result := []*collaboration.Share{} - for _, id := range createdShareIds { - s, err := m.getShareByID(ctx, id) - if err != nil { - return nil, err - } - if share.MatchesFilters(s, filters) { - result = append(result, s) - shareMem[s.Id.OpaqueId] = struct{}{} - } - } - - // If a user requests to list shares which have not been created by them - // we have to explicitly fetch these shares and check if the user is - // allowed to list the shares. - // Only then can we add these shares to the result. - grouped := share.GroupFiltersByType(filters) - idFilter, ok := grouped[collaboration.Filter_TYPE_RESOURCE_ID] - if !ok { - return result, nil - } - - shareIDsByResourceID := make(map[string]*provider.ResourceId) - for _, filter := range idFilter { - resourceID := filter.GetResourceId() - shareIDs, err := m.indexer.FindBy(&collaboration.Share{}, - indexer.NewField("ResourceId", resourceIDToIndex(resourceID)), - ) - if err != nil { - continue - } - for _, shareID := range shareIDs { - shareIDsByResourceID[shareID] = resourceID - } - } - - // statMem is used as a local cache to prevent statting resources which - // already have been checked. - statMem := make(map[string]struct{}) - for shareID, resourceID := range shareIDsByResourceID { - if _, handled := shareMem[shareID]; handled { - // We don't want to add a share multiple times when we added it - // already. - continue - } - - if _, checked := statMem[resourceIDToIndex(resourceID)]; !checked { - sReq := &provider.StatRequest{ - Ref: &provider.Reference{ResourceId: resourceID}, - } - sRes, err := m.gatewayClient.Stat(ctx, sReq) - if err != nil { - continue - } - if sRes.Status.Code != rpcv1beta1.Code_CODE_OK { - continue - } - if !sRes.Info.PermissionSet.ListGrants { - continue - } - statMem[resourceIDToIndex(resourceID)] = struct{}{} - } - - s, err := m.getShareByID(ctx, shareID) - if err != nil { - return nil, err - } - if share.MatchesFilters(s, filters) { - result = append(result, s) - shareMem[s.Id.OpaqueId] = struct{}{} - } - } - - return result, nil -} - -// UpdateShare updates the mode of the given share. -func (m *Manager) UpdateShare(ctx context.Context, ref *collaboration.ShareReference, p *collaboration.SharePermissions, updated *collaboration.Share, fieldMask *field_mask.FieldMask) (*collaboration.Share, error) { - if err := m.initialize(); err != nil { - return nil, err - } - share, err := m.GetShare(ctx, ref) - if err != nil { - return nil, err - } - share.Permissions = p - - data, err := json.Marshal(share) - if err != nil { - return nil, err - } - - err = m.storage.SimpleUpload(ctx, shareFilename(share.Id.OpaqueId), data) - - return share, err -} - -// ListReceivedShares returns the list of shares the user has access to. -func (m *Manager) ListReceivedShares(ctx context.Context, filters []*collaboration.Filter, forUser *userpb.UserId) ([]*collaboration.ReceivedShare, error) { - if err := m.initialize(); err != nil { - return nil, err - } - - user, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - return nil, errtypes.UserRequired("error getting user from context") - } - - uid, groups := user.GetId(), user.GetGroups() - if user.GetId().GetType() == userpb.UserType_USER_TYPE_SERVICE { - u, err := utils.GetUser(forUser, m.gatewayClient) - if err != nil { - return nil, errtypes.BadRequest("user not found") - } - uid = forUser - groups = u.GetGroups() - } - result := []*collaboration.ReceivedShare{} - - ids, err := granteeToIndex(&provider.Grantee{ - Type: provider.GranteeType_GRANTEE_TYPE_USER, - Id: &provider.Grantee_UserId{UserId: uid}, - }) - if err != nil { - return nil, err - } - receivedIds, err := m.indexer.FindBy(&collaboration.Share{}, - indexer.NewField("GranteeId", ids), - ) - if err != nil { - return nil, err - } - for _, group := range groups { - index, err := granteeToIndex(&provider.Grantee{ - Type: provider.GranteeType_GRANTEE_TYPE_GROUP, - Id: &provider.Grantee_GroupId{GroupId: &groupv1beta1.GroupId{OpaqueId: group}}, - }) - if err != nil { - return nil, err - } - groupIds, err := m.indexer.FindBy(&collaboration.Share{}, - indexer.NewField("GranteeId", index), - ) - if err != nil { - return nil, err - } - receivedIds = append(receivedIds, groupIds...) - } - - for _, id := range receivedIds { - s, err := m.getShareByID(ctx, id) - if err != nil { - return nil, err - } - if !share.MatchesFilters(s, filters) { - continue - } - metadata, err := m.downloadMetadata(ctx, s) - if err != nil { - if _, ok := err.(errtypes.NotFound); !ok { - return nil, err - } - // use default values if the grantee didn't configure anything yet - metadata = ReceivedShareMetadata{ - State: collaboration.ShareState_SHARE_STATE_PENDING, - } - } - result = append(result, &collaboration.ReceivedShare{ - Share: s, - State: metadata.State, - MountPoint: metadata.MountPoint, - }) - } - return result, nil -} - -// GetReceivedShare returns the information for a received share. -func (m *Manager) GetReceivedShare(ctx context.Context, ref *collaboration.ShareReference) (*collaboration.ReceivedShare, error) { - if err := m.initialize(); err != nil { - return nil, err - } - - share, err := m.GetShare(ctx, ref) - if err != nil { - return nil, err - } - - metadata, err := m.downloadMetadata(ctx, share) - if err != nil { - if _, ok := err.(errtypes.NotFound); !ok { - return nil, err - } - // use default values if the grantee didn't configure anything yet - metadata = ReceivedShareMetadata{ - State: collaboration.ShareState_SHARE_STATE_PENDING, - } - } - return &collaboration.ReceivedShare{ - Share: share, - State: metadata.State, - MountPoint: metadata.MountPoint, - }, nil -} - -// UpdateReceivedShare updates the received share with share state. -func (m *Manager) UpdateReceivedShare(ctx context.Context, rshare *collaboration.ReceivedShare, fieldMask *field_mask.FieldMask, forUser *userpb.UserId) (*collaboration.ReceivedShare, error) { - if err := m.initialize(); err != nil { - return nil, err - } - - user, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - return nil, errtypes.UserRequired("error getting user from context") - } - - rs, err := m.GetReceivedShare(ctx, &collaboration.ShareReference{Spec: &collaboration.ShareReference_Id{Id: rshare.Share.Id}}) - if err != nil { - return nil, err - } - - for i := range fieldMask.Paths { - switch fieldMask.Paths[i] { - case "state": - rs.State = rshare.State - case "mount_point": - rs.MountPoint = rshare.MountPoint - case "hidden": - continue - default: - return nil, errtypes.NotSupported("updating " + fieldMask.Paths[i] + " is not supported") - } - } - - uid := user.GetId() - if user.GetId().GetType() == userpb.UserType_USER_TYPE_SERVICE { - uid = forUser - } - - err = m.persistReceivedShare(ctx, uid, rs) - if err != nil { - return nil, err - } - return rs, nil -} - -func (m *Manager) persistReceivedShare(ctx context.Context, userID *userpb.UserId, rs *collaboration.ReceivedShare) error { - err := m.persistShare(ctx, rs.Share) - if err != nil { - return err - } - - meta := ReceivedShareMetadata{ - State: rs.State, - MountPoint: rs.MountPoint, - } - data, err := json.Marshal(meta) - if err != nil { - return err - } - - fn, err := metadataFilename(rs.Share, userID) - if err != nil { - return err - } - return m.storage.SimpleUpload(ctx, fn, data) -} - -func (m *Manager) downloadMetadata(ctx context.Context, share *collaboration.Share) (ReceivedShareMetadata, error) { - user, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - return ReceivedShareMetadata{}, errtypes.UserRequired("error getting user from context") - } - - metadataFn, err := metadataFilename(share, user.Id) - if err != nil { - return ReceivedShareMetadata{}, err - } - data, err := m.storage.SimpleDownload(ctx, metadataFn) - if err != nil { - return ReceivedShareMetadata{}, err - } - metadata := ReceivedShareMetadata{} - err = json.Unmarshal(data, &metadata) - return metadata, err -} - -func (m *Manager) getShareByID(ctx context.Context, id string) (*collaboration.Share, error) { - data, err := m.storage.SimpleDownload(ctx, shareFilename(id)) - if err != nil { - return nil, err - } - - userShare := &collaboration.Share{ - Grantee: &provider.Grantee{Id: &provider.Grantee_UserId{}}, - } - err = json.Unmarshal(data, userShare) - if err == nil && userShare.Grantee.GetUserId() != nil { - userShare.ResourceId = storagespace.UpdateLegacyResourceID(userShare.GetResourceId()) - return userShare, nil - } - - groupShare := &collaboration.Share{ - Grantee: &provider.Grantee{Id: &provider.Grantee_GroupId{}}, - } - err = json.Unmarshal(data, groupShare) // try to unmarshal to a group share if the user share unmarshalling failed - if err == nil && groupShare.Grantee.GetGroupId() != nil { - groupShare.ResourceId = storagespace.UpdateLegacyResourceID(groupShare.GetResourceId()) - return groupShare, nil - } - - return nil, errtypes.InternalError("failed to unmarshal share data") -} - -func (m *Manager) getShareByKey(ctx context.Context, key *collaboration.ShareKey) (*collaboration.Share, error) { - ownerIds, err := m.indexer.FindBy(&collaboration.Share{}, - indexer.NewField("OwnerId", userIDToIndex(key.Owner)), - ) - if err != nil { - return nil, err - } - granteeIndex, err := granteeToIndex(key.Grantee) - if err != nil { - return nil, err - } - granteeIds, err := m.indexer.FindBy(&collaboration.Share{}, - indexer.NewField("GranteeId", granteeIndex), - ) - if err != nil { - return nil, err - } - - ids := intersectSlices(ownerIds, granteeIds) - for _, id := range ids { - share, err := m.getShareByID(ctx, id) - if err != nil { - return nil, err - } - if utils.ResourceIDEqual(share.ResourceId, key.ResourceId) { - return share, nil - } - } - return nil, errtypes.NotFound("share not found") -} - -func shareFilename(id string) string { - return path.Join("shares", id) -} - -func metadataFilename(s *collaboration.Share, g interface{}) (string, error) { - var granteePart string - switch v := g.(type) { - case *userpb.UserId: - granteePart = url.QueryEscape("user:" + v.Idp + ":" + v.OpaqueId) - case *provider.Grantee: - var err error - granteePart, err = granteeToIndex(v) - if err != nil { - return "", err - } - } - return path.Join("metadata", s.Id.OpaqueId, granteePart), nil -} - -func indexOwnerFunc(v interface{}) (string, error) { - share, ok := v.(*collaboration.Share) - if !ok { - return "", fmt.Errorf("given entity is not a share") - } - return userIDToIndex(share.Owner), nil -} - -func indexCreatorFunc(v interface{}) (string, error) { - share, ok := v.(*collaboration.Share) - if !ok { - return "", fmt.Errorf("given entity is not a share") - } - return userIDToIndex(share.Creator), nil -} - -func userIDToIndex(id *userpb.UserId) string { - return url.QueryEscape(id.Idp + ":" + id.OpaqueId) -} - -func indexGranteeFunc(v interface{}) (string, error) { - share, ok := v.(*collaboration.Share) - if !ok { - return "", fmt.Errorf("given entity is not a share") - } - return granteeToIndex(share.Grantee) -} - -func indexResourceIDFunc(v interface{}) (string, error) { - share, ok := v.(*collaboration.Share) - if !ok { - return "", fmt.Errorf("given entity is not a share") - } - return resourceIDToIndex(share.ResourceId), nil -} - -func resourceIDToIndex(id *provider.ResourceId) string { - return strings.Join([]string{id.SpaceId, id.OpaqueId}, "!") -} - -func granteeToIndex(grantee *provider.Grantee) (string, error) { - switch { - case grantee.Type == provider.GranteeType_GRANTEE_TYPE_USER: - return url.QueryEscape("user:" + grantee.GetUserId().Idp + ":" + grantee.GetUserId().OpaqueId), nil - case grantee.Type == provider.GranteeType_GRANTEE_TYPE_GROUP: - return url.QueryEscape("group:" + grantee.GetGroupId().OpaqueId), nil - default: - return "", fmt.Errorf("unknown grantee type") - } -} - -// indexToGrantee tries to unparse a grantee in a metadata dir -// unfortunately, it is just concatenated by :, causing nasty corner cases -func indexToGrantee(name string) (*provider.Grantee, error) { - unescaped, err := url.QueryUnescape(name) - if err != nil { - return nil, err - } - parts := strings.SplitN(unescaped, ":", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid grantee %s", unescaped) - } - switch parts[0] { - case "user": - lastInd := strings.LastIndex(parts[1], ":") - return &provider.Grantee{ - Type: provider.GranteeType_GRANTEE_TYPE_USER, - Id: &provider.Grantee_UserId{ - UserId: &userpb.UserId{ - Idp: parts[1][:lastInd], - OpaqueId: parts[1][lastInd+1:], - }, - }, - }, nil - case "group": - return &provider.Grantee{ - Type: provider.GranteeType_GRANTEE_TYPE_GROUP, - Id: &provider.Grantee_GroupId{ - GroupId: &groupv1beta1.GroupId{ - OpaqueId: parts[1], - }, - }, - }, nil - default: - return nil, fmt.Errorf("invalid grantee %s", unescaped) - } -} - -func intersectSlices(a, b []string) []string { - aMap := map[string]bool{} - for _, s := range a { - aMap[s] = true - } - result := []string{} - for _, s := range b { - if _, ok := aMap[s]; ok { - result = append(result, s) - } - } - return result -} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/share/manager/loader/loader.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/share/manager/loader/loader.go index 94b411c17..c6ef3e18b 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/share/manager/loader/loader.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/share/manager/loader/loader.go @@ -20,7 +20,6 @@ package loader import ( // Load core share manager drivers. - _ "github.com/opencloud-eu/reva/v2/pkg/share/manager/cs3" _ "github.com/opencloud-eu/reva/v2/pkg/share/manager/json" _ "github.com/opencloud-eu/reva/v2/pkg/share/manager/jsoncs3" _ "github.com/opencloud-eu/reva/v2/pkg/share/manager/memory" diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/cephfs/upload.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/cephfs/upload.go index 42c607ec4..4b76da800 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/cephfs/upload.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/cephfs/upload.go @@ -323,7 +323,7 @@ func (upload *fileUpload) WriteChunk(ctx context.Context, offset int64, src io.R // If the HTTP PATCH request gets interrupted in the middle (e.g. because // the user wants to pause the upload), Go's net/http returns an io.ErrUnexpectedEOF. - // However, for OwnCloudStore it's not important whether the stream has ended + // However, for the driver it's not important whether the stream has ended // on purpose or accidentally. if err != nil { if err != io.ErrUnexpectedEOF { diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/blobstore/blobstore.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/blobstore/blobstore.go index 9fb4840d2..1268385e4 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/blobstore/blobstore.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/blobstore/blobstore.go @@ -1,4 +1,5 @@ // Copyright 2018-2021 CERN +// Copyright 2025 OpenCloud GmbH // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,6 +23,7 @@ import ( "bufio" "io" "os" + "path/filepath" "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node" "github.com/pkg/errors" @@ -40,7 +42,7 @@ func New(root string) (*Blobstore, error) { } // Upload stores some data in the blobstore under the given key -func (bs *Blobstore) Upload(node *node.Node, source string) error { +func (bs *Blobstore) Upload(node *node.Node, source, copyTarget string) error { path := node.InternalPath() // preserve the mtime of the file @@ -48,7 +50,7 @@ func (bs *Blobstore) Upload(node *node.Node, source string) error { file, err := os.Open(source) if err != nil { - return errors.Wrap(err, "Decomposedfs: oCIS blobstore: Can not open source file to upload") + return errors.Wrap(err, "Decomposedfs: posix blobstore: Can not open source file to upload") } defer file.Close() @@ -63,15 +65,35 @@ func (bs *Blobstore) Upload(node *node.Node, source string) error { if err != nil { return errors.Wrapf(err, "could not write blob '%s'", node.InternalPath()) } - err = w.Flush() if err != nil { return err } - - if fi != nil { - return os.Chtimes(path, fi.ModTime(), fi.ModTime()) + err = os.Chtimes(path, fi.ModTime(), fi.ModTime()) + if err != nil { + return err } + + if copyTarget != "" { + // also "upload" the file to a local path, e.g. for keeping the "current" version of the file + err := os.MkdirAll(filepath.Dir(copyTarget), 0700) + if err != nil { + return err + } + + file.Seek(0, 0) + copyFile, err := os.OpenFile(copyTarget, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600) + if err != nil { + return errors.Wrapf(err, "could not open copy target '%s' for writing", copyTarget) + } + defer copyFile.Close() + + _, err = copyFile.ReadFrom(file) + if err != nil { + return errors.Wrapf(err, "could not write blob copy of '%s' to '%s'", node.InternalPath(), copyTarget) + } + } + return nil } diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup/lookup.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup/lookup.go index 0e42a3f1d..0806202df 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup/lookup.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup/lookup.go @@ -1,4 +1,5 @@ // Copyright 2018-2021 CERN +// Copyright 2025 OpenCloud GmbH // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -45,8 +46,11 @@ import ( var tracer trace.Tracer +const RevisionsDir = ".oc-nodes" + var _spaceTypePersonal = "personal" var _spaceTypeProject = "project" +var _currentSuffix = ".current" func init() { tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/pkg/decomposedfs/lookup") @@ -310,11 +314,39 @@ func (lu *Lookup) InternalRoot() string { // InternalPath returns the internal path for a given ID func (lu *Lookup) InternalPath(spaceID, nodeID string) string { + if strings.Contains(nodeID, node.RevisionIDDelimiter) { + spaceRoot, _ := lu.IDCache.Get(context.Background(), spaceID, spaceID) + if len(spaceRoot) == 0 { + return "" + } + return filepath.Join(spaceRoot, RevisionsDir, Pathify(nodeID, 4, 2)) + } + path, _ := lu.IDCache.Get(context.Background(), spaceID, nodeID) return path } +// VersionPath returns the path to the version of the node +func (lu *Lookup) VersionPath(spaceID, nodeID, version string) string { + spaceRoot, _ := lu.IDCache.Get(context.Background(), spaceID, spaceID) + if len(spaceRoot) == 0 { + return "" + } + + return filepath.Join(spaceRoot, RevisionsDir, Pathify(nodeID, 4, 2)+node.RevisionIDDelimiter+version) +} + +// VersionPath returns the "current" path of the node +func (lu *Lookup) CurrentPath(spaceID, nodeID string) string { + spaceRoot, _ := lu.IDCache.Get(context.Background(), spaceID, spaceID) + if len(spaceRoot) == 0 { + return "" + } + + return filepath.Join(spaceRoot, RevisionsDir, Pathify(nodeID, 4, 2)+_currentSuffix) +} + // // ReferenceFromAttr returns a CS3 reference from xattr of a node. // // Supported formats are: "cs3:storageid/nodeid" // func ReferenceFromAttr(b []byte) (*provider.Reference, error) { diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options/options.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options/options.go index 492d80f9e..b2ee6f02b 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options/options.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options/options.go @@ -33,6 +33,12 @@ type Options struct { ScanDebounceDelay time.Duration `mapstructure:"scan_debounce_delay"` + // Allows generating revisions from changes done to the local storage. + // Note: This basically doubles the number of bytes stored on disk because + // a copy of the current version of a file is kept available for generating + // a revision when the file is changed. + EnableFSRevisions bool `mapstructure:"enable_fs_revisions"` + WatchFS bool `mapstructure:"watch_fs"` WatchType string `mapstructure:"watch_type"` WatchPath string `mapstructure:"watch_path"` diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/posix.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/posix.go index fac79df36..c1429fd23 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/posix.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/posix.go @@ -103,7 +103,13 @@ func New(m map[string]interface{}, stream events.Stream, log *zerolog.Logger) (s return nil, fmt.Errorf("the posix driver requires a shared id cache, e.g. nats-js-kv or redis") } - tp, err := tree.New(lu, bs, um, trashbin, o, stream, store.Create( + permissionsSelector, err := pool.PermissionsSelector(o.PermissionsSVC, pool.WithTLSMode(o.PermTLSMode)) + if err != nil { + return nil, err + } + p := permissions.NewPermissions(node.NewPermissions(lu), permissionsSelector) + + tp, err := tree.New(lu, bs, um, trashbin, p, o, stream, store.Create( store.Store(o.IDCache.Store), store.TTL(o.IDCache.TTL), store.Size(o.IDCache.Size), @@ -117,20 +123,13 @@ func New(m map[string]interface{}, stream events.Stream, log *zerolog.Logger) (s return nil, err } - permissionsSelector, err := pool.PermissionsSelector(o.PermissionsSVC, pool.WithTLSMode(o.PermTLSMode)) - if err != nil { - return nil, err - } - - p := permissions.NewPermissions(node.NewPermissions(lu), permissionsSelector) - aspects := aspects.Aspects{ Lookup: lu, Tree: tp, Permissions: p, EventStream: stream, UserMapper: um, - DisableVersioning: true, + DisableVersioning: false, Trashbin: trashbin, } @@ -203,19 +202,19 @@ func (fs *posixFS) GetUpload(ctx context.Context, id string) (upload tusd.Upload // To implement the termination extension as specified in https://tus.io/protocols/resumable-upload.html#termination // the storage needs to implement AsTerminatableUpload func (fs *posixFS) AsTerminatableUpload(up tusd.Upload) tusd.TerminatableUpload { - return up.(*upload.OcisSession) + return up.(*upload.DecomposedFsSession) } // AsLengthDeclarableUpload returns a LengthDeclarableUpload // To implement the creation-defer-length extension as specified in https://tus.io/protocols/resumable-upload.html#creation // the storage needs to implement AsLengthDeclarableUpload func (fs *posixFS) AsLengthDeclarableUpload(up tusd.Upload) tusd.LengthDeclarableUpload { - return up.(*upload.OcisSession) + return up.(*upload.DecomposedFsSession) } // AsConcatableUpload returns a ConcatableUpload // To implement the concatenation extension as specified in https://tus.io/protocols/resumable-upload.html#concatenation // the storage needs to implement AsConcatableUpload func (fs *posixFS) AsConcatableUpload(up tusd.Upload) tusd.ConcatableUpload { - return up.(*upload.OcisSession) + return up.(*upload.DecomposedFsSession) } diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/timemanager/timemanager.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/timemanager/timemanager.go index 8e2516c43..005a1abae 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/timemanager/timemanager.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/timemanager/timemanager.go @@ -33,7 +33,8 @@ type Manager struct { } // OverrideMtime overrides the modification time (mtime) of a node with the specified time. -func (m *Manager) OverrideMtime(ctx context.Context, n *node.Node, _ *node.Attributes, mtime time.Time) error { +func (m *Manager) OverrideMtime(ctx context.Context, n *node.Node, attrs *node.Attributes, mtime time.Time) error { + attrs.SetTime(prefixes.MTimeAttr, mtime) return os.Chtimes(n.InternalPath(), mtime, mtime) } diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go index 52f404a85..e84d7fed1 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go @@ -1,4 +1,5 @@ // Copyright 2018-2021 CERN +// Copyright 2025 OpenCloud GmbH // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,6 +22,7 @@ package tree import ( "context" "fmt" + "io" "io/fs" "os" "path/filepath" @@ -67,7 +69,7 @@ type queueItem struct { timer *time.Timer } -const dirtyFlag = "user.ocis.dirty" +const dirtyFlag = "user.oc.dirty" // NewScanDebouncer returns a new SpaceDebouncer instance func NewScanDebouncer(d time.Duration, f func(item scanItem)) *ScanDebouncer { @@ -375,7 +377,24 @@ func (t *Tree) assimilate(item scanItem) error { } // check for the id attribute again after grabbing the lock, maybe the file was assimilated/created by us in the meantime - id, err = t.lookup.MetadataBackend().Get(context.Background(), item.Path, prefixes.IDAttr) + md, err := t.lookup.MetadataBackend().All(context.Background(), item.Path) + if err != nil { + return err + } + attrs := node.Attributes(md) + + // compare metadata mtime with actual mtime. if it matches we can skip the assimilation because the file was handled by us + mtime, err := attrs.Time(prefixes.MTimeAttr) + if err == nil { + fi, err := os.Stat(item.Path) + if err == nil { + if mtime.Equal(fi.ModTime()) { + return nil + } + } + } + + id = attrs[prefixes.IDAttr] if err == nil { previousPath, ok := t.lookup.(*lookup.Lookup).GetCachedID(context.Background(), spaceID, string(id)) previousParentID, _ := t.lookup.MetadataBackend().Get(context.Background(), item.Path, prefixes.ParentidAttr) @@ -576,11 +595,62 @@ assimilate: } attributes[prefixes.PropagationAttr] = []byte("1") } else { + attributes.SetString(prefixes.BlobIDAttr, uuid.NewString()) + attributes.SetInt64(prefixes.BlobsizeAttr, fi.Size()) attributes.SetInt64(prefixes.TypeAttr, int64(provider.ResourceType_RESOURCE_TYPE_FILE)) } n := node.New(spaceID, id, parentID, filepath.Base(path), fi.Size(), "", provider.ResourceType_RESOURCE_TYPE_FILE, nil, t.lookup) n.SpaceRoot = &node.Node{SpaceID: spaceID, ID: spaceID} + + go func() { + // Copy the previous current version to a revision + currentPath := t.lookup.(*lookup.Lookup).CurrentPath(n.SpaceID, n.ID) + stat, err := os.Stat(currentPath) + if err != nil { + t.log.Error().Err(err).Str("path", path).Str("currentPath", currentPath).Msg("could not stat current path") + return + } + revisionPath := t.lookup.VersionPath(n.SpaceID, n.ID, stat.ModTime().UTC().Format(time.RFC3339Nano)) + + err = os.Rename(currentPath, revisionPath) + if err != nil { + t.log.Error().Err(err).Str("path", path).Str("revisionPath", revisionPath).Msg("could not create revision") + return + } + + // Copy the new version to the current version + w, err := os.OpenFile(currentPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600) + if err != nil { + t.log.Error().Err(err).Str("path", path).Str("currentPath", currentPath).Msg("could not open current path for writing") + return + } + defer w.Close() + r, err := os.OpenFile(n.InternalPath(), os.O_RDONLY, 0600) + if err != nil { + t.log.Error().Err(err).Str("path", path).Msg("could not open file for reading") + return + } + defer r.Close() + + _, err = io.Copy(w, r) + if err != nil { + t.log.Error().Err(err).Str("currentPath", currentPath).Str("path", path).Msg("could not copy new version to current version") + return + } + + err = t.lookup.CopyMetadata(context.Background(), n.InternalPath(), currentPath, func(attributeName string, value []byte) (newValue []byte, copy bool) { + return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || + attributeName == prefixes.TypeAttr || + attributeName == prefixes.BlobIDAttr || + attributeName == prefixes.BlobsizeAttr + }, false) + if err != nil { + t.log.Error().Err(err).Str("currentPath", currentPath).Str("path", path).Msg("failed to copy xattrs to 'current' file") + return + } + }() + err = t.Propagate(context.Background(), n, 0) if err != nil { return nil, errors.Wrap(err, "failed to propagate") @@ -623,7 +693,7 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error { sizes := make(map[string]int64) err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { // skip lock and upload files - if isLockFile(path) { + if isInternal(path) || isLockFile(path) { return nil } if isTrash(path) || t.isUpload(path) { diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/gpfsfilauditloggingwatcher.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/gpfsfilauditloggingwatcher.go index 681a7910c..c00f93474 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/gpfsfilauditloggingwatcher.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/gpfsfilauditloggingwatcher.go @@ -82,7 +82,7 @@ start: w.log.Error().Err(err).Str("line", line).Msg("error unmarshalling line") continue } - if isLockFile(ev.Path) || isTrash(ev.Path) || w.tree.isUpload(ev.Path) { + if w.tree.isIgnored(ev.Path) { continue } go func() { diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/gpfswatchfolderwatcher.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/gpfswatchfolderwatcher.go index 5d05bcd11..6c952d579 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/gpfswatchfolderwatcher.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/gpfswatchfolderwatcher.go @@ -46,7 +46,7 @@ func NewGpfsWatchFolderWatcher(tree *Tree, kafkaBrokers []string, log *zerolog.L func (w *GpfsWatchFolderWatcher) Watch(topic string) { r := kafka.NewReader(kafka.ReaderConfig{ Brokers: w.brokers, - GroupID: "ocis-posixfs", + GroupID: "opencloud-posixfs", Topic: topic, }) @@ -62,7 +62,7 @@ func (w *GpfsWatchFolderWatcher) Watch(topic string) { continue } - if isLockFile(lwev.Path) || isTrash(lwev.Path) || w.tree.isUpload(lwev.Path) { + if w.tree.isIgnored(lwev.Path) { continue } diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/inotifywatcher.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/inotifywatcher.go index 660cdbdac..1251851f9 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/inotifywatcher.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/inotifywatcher.go @@ -63,7 +63,7 @@ func (iw *InotifyWatcher) Watch(path string) { for { select { case event := <-events: - if isLockFile(event.Filename) || isTrash(event.Filename) || iw.tree.isUpload(event.Filename) { + if iw.tree.isIgnored(event.Filename) { continue } for _, e := range event.Events { diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/revisions.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/revisions.go new file mode 100644 index 000000000..bbb5498f2 --- /dev/null +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/revisions.go @@ -0,0 +1,338 @@ +// Copyright 2018-2021 CERN +// Copyright 2025 OpenCloud GmbH +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package tree + +import ( + "context" + "io" + "os" + "path/filepath" + "strings" + "time" + + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/pkg/errors" + "github.com/rogpeppe/go-internal/lockedfile" + + "github.com/opencloud-eu/reva/v2/pkg/appctx" + "github.com/opencloud-eu/reva/v2/pkg/errtypes" + "github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup" + "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes" + "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node" + "github.com/opencloud-eu/reva/v2/pkg/storagespace" + "github.com/opencloud-eu/reva/v2/pkg/utils" +) + +// Revision entries are stored inside the revisions directory in .oc-nodes, sharded by the node ids. +// The `.REV.` indicates it is a revision and what follows is a timestamp, so multiple versions +// can be kept in the same location. + +// CreateRevision creates a new version of the node +func (tp *Tree) CreateRevision(ctx context.Context, n *node.Node, version string, f *lockedfile.File) (string, error) { + versionPath := tp.lookup.VersionPath(n.SpaceID, n.ID, version) + + err := os.MkdirAll(filepath.Dir(versionPath), 0700) + if err != nil { + return "", err + } + + // copy file content to version node + sf, err := os.OpenFile(n.InternalPath(), os.O_RDONLY, 0) + if err != nil { + return "", err + } + defer sf.Close() + vf, err := os.OpenFile(versionPath, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0600) + if err != nil { + return "", err + } + defer vf.Close() + if _, err := io.Copy(vf, sf); err != nil { + return "", err + } + + // copy blob metadata to version node + if err := tp.lookup.CopyMetadataWithSourceLock(ctx, n.InternalPath(), versionPath, func(attributeName string, value []byte) (newValue []byte, copy bool) { + return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || + attributeName == prefixes.TypeAttr || + attributeName == prefixes.BlobIDAttr || + attributeName == prefixes.BlobsizeAttr || + attributeName == prefixes.MTimeAttr + }, f, true); err != nil { + return "", err + } + + return versionPath, nil +} + +func (tp *Tree) ListRevisions(ctx context.Context, ref *provider.Reference) (revisions []*provider.FileVersion, err error) { + _, span := tracer.Start(ctx, "ListRevisions") + defer span.End() + var n *node.Node + if n, err = tp.lookup.NodeFromResource(ctx, ref); err != nil { + return + } + if !n.Exists { + err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) + return + } + + rp, err := tp.permissions.AssemblePermissions(ctx, n) + switch { + case err != nil: + return nil, err + case !rp.ListFileVersions: + f, _ := storagespace.FormatReference(ref) + if rp.Stat { + return nil, errtypes.PermissionDenied(f) + } + return nil, errtypes.NotFound(f) + } + + revisions = []*provider.FileVersion{} + versionGlob := tp.lookup.VersionPath(n.SpaceID, n.ID, "*") + if items, err := filepath.Glob(versionGlob); err == nil { + for i := range items { + if tp.lookup.MetadataBackend().IsMetaFile(items[i]) || strings.HasSuffix(items[i], ".mlock") { + continue + } + + if fi, err := os.Stat(items[i]); err == nil { + parts := strings.SplitN(fi.Name(), node.RevisionIDDelimiter, 2) + if len(parts) != 2 { + appctx.GetLogger(ctx).Error().Err(err).Str("name", fi.Name()).Msg("invalid revision name, skipping") + continue + } + mtime := fi.ModTime() + rev := &provider.FileVersion{ + Key: n.ID + node.RevisionIDDelimiter + parts[1], + Mtime: uint64(mtime.Unix()), + } + _, blobSize, err := tp.lookup.ReadBlobIDAndSizeAttr(ctx, items[i], nil) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("name", fi.Name()).Msg("error reading blobsize xattr, using 0") + } + rev.Size = uint64(blobSize) + etag, err := node.CalculateEtag(n.ID, mtime) + if err != nil { + return nil, errors.Wrapf(err, "error calculating etag") + } + rev.Etag = etag + revisions = append(revisions, rev) + } + } + } + // maybe we need to sort the list by key + /* + sort.Slice(revisions, func(i, j int) bool { + return revisions[i].Key > revisions[j].Key + }) + */ + + return +} + +// DownloadRevision returns a reader for the specified revision +// FIXME the CS3 api should explicitly allow initiating revision and trash download, a related issue is https://github.com/cs3org/reva/issues/1813 +func (tp *Tree) DownloadRevision(ctx context.Context, ref *provider.Reference, revisionKey string, openReaderFunc func(md *provider.ResourceInfo) bool) (*provider.ResourceInfo, io.ReadCloser, error) { + _, span := tracer.Start(ctx, "DownloadRevision") + defer span.End() + log := appctx.GetLogger(ctx) + + // verify revision key format + kp := strings.SplitN(revisionKey, node.RevisionIDDelimiter, 2) + if len(kp) != 2 { + log.Error().Str("revisionKey", revisionKey).Msg("malformed revisionKey") + return nil, nil, errtypes.NotFound(revisionKey) + } + log.Debug().Str("revisionKey", revisionKey).Msg("DownloadRevision") + + spaceID := ref.ResourceId.SpaceId + // check if the node is available and has not been deleted + n, err := node.ReadNode(ctx, tp.lookup, spaceID, kp[0], false, nil, false) + if err != nil { + return nil, nil, err + } + if !n.Exists { + err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) + return nil, nil, err + } + + rp, err := tp.permissions.AssemblePermissions(ctx, n) + switch { + case err != nil: + return nil, nil, err + case !rp.ListFileVersions || !rp.InitiateFileDownload: // TODO add explicit permission in the CS3 api? + f, _ := storagespace.FormatReference(ref) + if rp.Stat { + return nil, nil, errtypes.PermissionDenied(f) + } + return nil, nil, errtypes.NotFound(f) + } + + contentPath := tp.lookup.InternalPath(spaceID, revisionKey) + + _, blobsize, err := tp.lookup.ReadBlobIDAndSizeAttr(ctx, contentPath, nil) + if err != nil { + return nil, nil, errors.Wrapf(err, "Decomposedfs: could not read blob id and size for revision '%s' of node '%s'", kp[1], n.ID) + } + + revisionNode := node.New(spaceID, revisionKey, n.ParentID, n.Name, blobsize, "", provider.ResourceType_RESOURCE_TYPE_FILE, n.Owner(), tp.lookup) + + ri, err := n.AsResourceInfo(ctx, rp, nil, []string{"size", "mimetype", "etag"}, true) + if err != nil { + return nil, nil, err + } + + // update resource info with revision data + mtime, err := time.Parse(time.RFC3339Nano, kp[1]) + if err != nil { + return nil, nil, errors.Wrapf(err, "Decomposedfs: could not parse mtime for revision '%s' of node '%s'", kp[1], n.ID) + } + ri.Size = uint64(blobsize) + ri.Mtime = utils.TimeToTS(mtime) + ri.Etag, err = node.CalculateEtag(n.ID, mtime) + if err != nil { + return nil, nil, errors.Wrapf(err, "error calculating etag for revision '%s' of node '%s'", kp[1], n.ID) + } + + var reader io.ReadCloser + if openReaderFunc(ri) { + reader, err = tp.ReadBlob(revisionNode) + if err != nil { + return nil, nil, errors.Wrapf(err, "Decomposedfs: could not download blob of revision '%s' for node '%s'", n.ID, revisionKey) + } + } + return ri, reader, nil +} + +func (tp *Tree) getRevisionNode(ctx context.Context, ref *provider.Reference, revisionKey string, hasPermission func(*provider.ResourcePermissions) bool) (*node.Node, error) { + _, span := tracer.Start(ctx, "getRevisionNode") + defer span.End() + log := appctx.GetLogger(ctx) + + // verify revision key format + kp := strings.SplitN(revisionKey, node.RevisionIDDelimiter, 2) + if len(kp) != 2 { + log.Error().Str("revisionKey", revisionKey).Msg("malformed revisionKey") + return nil, errtypes.NotFound(revisionKey) + } + log.Debug().Str("revisionKey", revisionKey).Msg("DownloadRevision") + + spaceID := ref.ResourceId.SpaceId + // check if the node is available and has not been deleted + n, err := node.ReadNode(ctx, tp.lookup, spaceID, kp[0], false, nil, false) + if err != nil { + return nil, err + } + if !n.Exists { + err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) + return nil, err + } + + p, err := tp.permissions.AssemblePermissions(ctx, n) + switch { + case err != nil: + return nil, err + case !hasPermission(p): + return nil, errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) + } + + // Set space owner in context + storagespace.ContextSendSpaceOwnerID(ctx, n.SpaceOwnerOrManager(ctx)) + + return n, nil +} + +func (tp *Tree) RestoreRevision(ctx context.Context, spaceID, nodeID, source string) error { + target := tp.lookup.InternalPath(spaceID, nodeID) + rf, err := os.Open(source) + if err != nil { + return err + } + defer rf.Close() + + wf, err := os.OpenFile(target, os.O_CREATE|os.O_WRONLY, 0600) + if err != nil { + return err + } + defer wf.Close() + wf.Truncate(0) + + if _, err := io.Copy(wf, rf); err != nil { + return err + } + + err = tp.lookup.CopyMetadata(ctx, source, target, func(attributeName string, value []byte) (newValue []byte, copy bool) { + return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || + attributeName == prefixes.TypeAttr || + attributeName == prefixes.BlobIDAttr || + attributeName == prefixes.BlobsizeAttr + }, false) + if err != nil { + return errtypes.InternalError("failed to copy blob xattrs to old revision to node: " + err.Error()) + } + + // always set the node mtime to the current time + mtime := time.Now() + os.Chtimes(target, mtime, mtime) + err = tp.lookup.MetadataBackend().SetMultiple(ctx, target, + map[string][]byte{ + prefixes.MTimeAttr: []byte(mtime.UTC().Format(time.RFC3339Nano)), + }, + false) + if err != nil { + return errtypes.InternalError("failed to set mtime attribute on node: " + err.Error()) + } + + // update "current" revision + if tp.options.EnableFSRevisions { + currentPath := tp.lookup.(*lookup.Lookup).CurrentPath(spaceID, nodeID) + w, err := os.OpenFile(currentPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600) + if err != nil { + tp.log.Error().Err(err).Str("currentPath", currentPath).Str("source", source).Msg("could not open current path for writing") + return err + } + defer w.Close() + r, err := os.OpenFile(source, os.O_RDONLY, 0600) + if err != nil { + tp.log.Error().Err(err).Str("currentPath", currentPath).Str("source", source).Msg("could not open file for reading") + return err + } + defer r.Close() + + _, err = io.Copy(w, r) + if err != nil { + tp.log.Error().Err(err).Str("currentPath", currentPath).Str("source", source).Msg("could not copy new version to current version") + return err + } + err = tp.lookup.CopyMetadata(ctx, source, currentPath, func(attributeName string, value []byte) (newValue []byte, copy bool) { + return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || + attributeName == prefixes.TypeAttr || + attributeName == prefixes.BlobIDAttr || + attributeName == prefixes.BlobsizeAttr + }, false) + if err != nil { + return errtypes.InternalError("failed to copy xattrs to 'current' file: " + err.Error()) + } + } + + return nil +} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go index b67bb589f..7b75c6633 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go @@ -1,4 +1,5 @@ // Copyright 2018-2021 CERN +// Copyright 2025 OpenCloud GmbH // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -27,6 +28,7 @@ import ( "path/filepath" "regexp" "strings" + "time" "github.com/google/uuid" "github.com/pkg/errors" @@ -48,6 +50,7 @@ import ( "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata" "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes" "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node" + "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/permissions" "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/propagator" "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/usermapper" "github.com/opencloud-eu/reva/v2/pkg/utils" @@ -61,7 +64,7 @@ func init() { // Blobstore defines an interface for storing blobs in a blobstore type Blobstore interface { - Upload(node *node.Node, source string) error + Upload(node *node.Node, source, copyTarget string) error Download(node *node.Node) (io.ReadCloser, error) Delete(node *node.Node) error } @@ -78,10 +81,11 @@ type scanItem struct { // Tree manages a hierarchical tree type Tree struct { - lookup node.PathLookup - blobstore Blobstore - trashbin *trashbin.Trashbin - propagator propagator.Propagator + lookup node.PathLookup + blobstore Blobstore + trashbin *trashbin.Trashbin + propagator propagator.Propagator + permissions permissions.Permissions options *options.Options @@ -99,17 +103,18 @@ type Tree struct { type PermissionCheckFunc func(rp *provider.ResourcePermissions) bool // New returns a new instance of Tree -func New(lu node.PathLookup, bs Blobstore, um usermapper.Mapper, trashbin *trashbin.Trashbin, o *options.Options, es events.Stream, cache store.Store, log *zerolog.Logger) (*Tree, error) { +func New(lu node.PathLookup, bs Blobstore, um usermapper.Mapper, trashbin *trashbin.Trashbin, permissions permissions.Permissions, o *options.Options, es events.Stream, cache store.Store, log *zerolog.Logger) (*Tree, error) { scanQueue := make(chan scanItem) t := &Tree{ - lookup: lu, - blobstore: bs, - userMapper: um, - trashbin: trashbin, - options: o, - idCache: cache, - propagator: propagator.New(lu, &o.Options, log), - scanQueue: scanQueue, + lookup: lu, + blobstore: bs, + userMapper: um, + trashbin: trashbin, + permissions: permissions, + options: o, + idCache: cache, + propagator: propagator.New(lu, &o.Options, log), + scanQueue: scanQueue, scanDebouncer: NewScanDebouncer(o.ScanDebounceDelay, func(item scanItem) { scanQueue <- item }), @@ -220,7 +225,7 @@ func (t *Tree) TouchFile(ctx context.Context, n *node.Node, markprocessing bool, if err := os.MkdirAll(filepath.Dir(nodePath), 0700); err != nil { return errors.Wrap(err, "Decomposedfs: error creating node") } - _, err = os.Create(nodePath) + f, err := os.Create(nodePath) if err != nil { return errors.Wrap(err, "Decomposedfs: error creating node") } @@ -235,10 +240,14 @@ func (t *Tree) TouchFile(ctx context.Context, n *node.Node, markprocessing bool, if err != nil { return err } - err = os.Chtimes(nodePath, nodeMTime, nodeMTime) + t.lookup.TimeManager().OverrideMtime(ctx, n, &attributes, nodeMTime) + } else { + fi, err := f.Stat() if err != nil { return err } + mtime := fi.ModTime() + attributes[prefixes.MTimeAttr] = []byte(mtime.UTC().Format(time.RFC3339Nano)) } err = n.SetXattrsWithContext(ctx, attributes, false) @@ -397,7 +406,7 @@ func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, erro g.Go(func() error { defer close(work) for _, name := range names { - if isLockFile(name) || isTrash(name) { + if isInternal(name) || isLockFile(name) || isTrash(name) { continue } @@ -676,7 +685,24 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err // WriteBlob writes a blob to the blobstore func (t *Tree) WriteBlob(node *node.Node, source string) error { - return t.blobstore.Upload(node, source) + var currentPath string + var err error + + if t.options.EnableFSRevisions { + currentPath = t.lookup.(*lookup.Lookup).CurrentPath(node.SpaceID, node.ID) + + defer func() { + _ = t.lookup.CopyMetadata(context.Background(), node.InternalPath(), currentPath, func(attributeName string, value []byte) (newValue []byte, copy bool) { + return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || + attributeName == prefixes.TypeAttr || + attributeName == prefixes.BlobIDAttr || + attributeName == prefixes.BlobsizeAttr + }, false) + }() + } + + err = t.blobstore.Upload(node, source, currentPath) + return err } // ReadBlob reads a blob from the blobstore @@ -845,14 +871,22 @@ func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) ( return } -func isLockFile(path string) bool { - return strings.HasSuffix(path, ".lock") || strings.HasSuffix(path, ".flock") || strings.HasSuffix(path, ".mlock") -} - -func isTrash(path string) bool { - return strings.HasSuffix(path, ".trashinfo") || strings.HasSuffix(path, ".trashitem") +func (t *Tree) isIgnored(path string) bool { + return isLockFile(path) || isTrash(path) || t.isUpload(path) || isInternal(path) } func (t *Tree) isUpload(path string) bool { return strings.HasPrefix(path, t.options.UploadDirectory) } + +func isInternal(path string) bool { + return strings.Contains(path, lookup.RevisionsDir) +} + +func isLockFile(path string) bool { + return strings.HasSuffix(path, ".lock") || strings.HasSuffix(path, ".flock") || strings.HasSuffix(path, ".mlock") +} + +func isTrash(path string) bool { + return strings.HasSuffix(path, ".trashinfo") || strings.HasSuffix(path, ".trashitem") || strings.Contains(path, ".Trash") +} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/decomposedfs.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/decomposedfs.go index a5583d6b3..e3cee5e95 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/decomposedfs.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/decomposedfs.go @@ -90,7 +90,7 @@ func init() { tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs") } -// Session is the interface that OcisSession implements. By combining tus.Upload, +// Session is the interface that DecomposedfsSession implements. By combining tus.Upload, // storage.UploadSession and custom functions we can reuse the same struct throughout // the whole upload lifecycle. // @@ -104,9 +104,9 @@ type Session interface { } type SessionStore interface { - New(ctx context.Context) *upload.OcisSession - List(ctx context.Context) ([]*upload.OcisSession, error) - Get(ctx context.Context, id string) (*upload.OcisSession, error) + New(ctx context.Context) *upload.DecomposedFsSession + List(ctx context.Context) ([]*upload.DecomposedFsSession, error) + Get(ctx context.Context, id string) (*upload.DecomposedFsSession, error) Cleanup(ctx context.Context, session upload.Session, revertNodeMetadata, keepUpload, unmarkPostprocessing bool) } @@ -151,7 +151,13 @@ func NewDefault(m map[string]interface{}, bs tree.Blobstore, es events.Stream, l return nil, fmt.Errorf("unknown metadata backend %s, only 'messagepack' or 'xattrs' (default) supported", o.MetadataBackend) } - tp := tree.New(lu, bs, o, store.Create( + permissionsSelector, err := pool.PermissionsSelector(o.PermissionsSVC, pool.WithTLSMode(o.PermTLSMode)) + if err != nil { + return nil, err + } + p := permissions.NewPermissions(node.NewPermissions(lu), permissionsSelector) + + tp := tree.New(lu, bs, o, p, store.Create( store.Store(o.IDCache.Store), store.TTL(o.IDCache.TTL), store.Size(o.IDCache.Size), @@ -162,15 +168,10 @@ func NewDefault(m map[string]interface{}, bs tree.Blobstore, es events.Stream, l store.Authentication(o.IDCache.AuthUsername, o.IDCache.AuthPassword), ), log) - permissionsSelector, err := pool.PermissionsSelector(o.PermissionsSVC, pool.WithTLSMode(o.PermTLSMode)) - if err != nil { - return nil, err - } - aspects := aspects.Aspects{ Lookup: lu, Tree: tp, - Permissions: permissions.NewPermissions(node.NewPermissions(lu), permissionsSelector), + Permissions: p, EventStream: es, DisableVersioning: o.DisableVersioning, Trashbin: &DecomposedfsTrashbin{}, diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup/lookup.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup/lookup.go index 79ef9ebb4..74224a000 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup/lookup.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup/lookup.go @@ -309,6 +309,11 @@ func (lu *Lookup) InternalPath(spaceID, nodeID string) string { return filepath.Join(lu.Options.Root, "spaces", Pathify(spaceID, 1, 2), "nodes", Pathify(nodeID, 4, 2)) } +// VersionPath returns the internal path for a version of a node +func (lu *Lookup) VersionPath(spaceID, nodeID, version string) string { + return lu.InternalPath(spaceID, nodeID) + node.RevisionIDDelimiter + version +} + // // ReferenceFromAttr returns a CS3 reference from xattr of a node. // // Supported formats are: "cs3:storageid/nodeid" // func ReferenceFromAttr(b []byte) (*provider.Reference, error) { diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes/ocis_prefix.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes/oc_prefix.go similarity index 76% rename from vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes/ocis_prefix.go rename to vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes/oc_prefix.go index b51a0487b..d6133594a 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes/ocis_prefix.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes/oc_prefix.go @@ -20,10 +20,10 @@ package prefixes -// The default namespace for ocis. As non root users can only manipulate -// the user. namespace, which is what is used to store ownCloud specific -// metadata. To prevent name collisions with other apps, we are going to -// introduce a sub namespace "user.ocis." +// The default namespace for decomposedfs. As non root users can only +// manipulate the user. namespace, which is what is used to store decomposedfs +// specific metadata. To prevent name collisions with other apps, we are going +// to introduce a sub namespace "user.oc." const ( OcPrefix string = "user.oc." ) diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes/ocis_prefix_freebsd.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes/oc_prefix_freebsd.go similarity index 97% rename from vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes/ocis_prefix_freebsd.go rename to vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes/oc_prefix_freebsd.go index c601a0839..cc16a696e 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes/ocis_prefix_freebsd.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes/oc_prefix_freebsd.go @@ -24,5 +24,5 @@ package prefixes // and will fail with invalid argument when you try to start an xattr name with user. or system. // For that reason we drop the superfluous user. prefix for FreeBSD specifically. const ( - OcisPrefix string = "ocis." + OcPrefix string = "oc." ) diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes/prefixes.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes/prefixes.go index 0a5517e4f..a0f39e955 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes/prefixes.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes/prefixes.go @@ -19,13 +19,13 @@ package prefixes // Declare a list of xattr keys -// TODO the below comment is currently copied from the owncloud driver, revisit + // Currently,extended file attributes have four separated // namespaces (user, trusted, security and system) followed by a dot. // A non root user can only manipulate the user. namespace, which is what -// we will use to store ownCloud specific metadata. To prevent name +// we will use to store decomposedfs specific metadata. To prevent name // collisions with other apps We are going to introduce a sub namespace -// "user.ocis." in the xattrs_prefix*.go files. +// "user.oc." in the xattrs_prefix*.go files. const ( TypeAttr string = OcPrefix + "type" IDAttr string = OcPrefix + "id" @@ -59,7 +59,7 @@ const ( // a temporary etag for a folder that is removed when the mtime propagation happens TmpEtagAttr string = OcPrefix + "tmp.etag" ReferenceAttr string = OcPrefix + "cs3.ref" // arbitrary metadata - ChecksumPrefix string = OcPrefix + "cs." // followed by the algorithm, eg. ocis.cs.sha1 + ChecksumPrefix string = OcPrefix + "cs." // followed by the algorithm, eg. oc.cs.sha1 TrashOriginAttr string = OcPrefix + "trash.origin" // trash origin // we use a single attribute to enable or disable propagation of both: synctime and treesize @@ -71,7 +71,7 @@ const ( MTimeAttr string = OcPrefix + "mtime" // the tree modification time of the tree below this node, // propagated when synctime_accounting is true and - // user.ocis.propagation=1 is set + // user.oc.propagation=1 is set // stored as a readable time.RFC3339Nano TreeMTimeAttr string = OcPrefix + "tmtime" @@ -82,7 +82,7 @@ const ( // the size of the tree below this node, // propagated when treesize_accounting is true and - // user.ocis.propagation=1 is set + // user.oc.propagation=1 is set // stored as uint64, little endian TreesizeAttr string = OcPrefix + "treesize" diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go index c3f11d8df..58cbbf126 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go @@ -124,6 +124,7 @@ type Tree interface { PurgeRecycleItemFunc(ctx context.Context, spaceid, key, purgePath string) (*Node, func() error, error) InitNewNode(ctx context.Context, n *Node, fsize uint64) (metadata.UnlockFunc, error) + RestoreRevision(ctx context.Context, spaceID, nodeID, sourcePath string) (err error) WriteBlob(node *Node, source string) error ReadBlob(node *Node) (io.ReadCloser, error) @@ -132,6 +133,10 @@ type Tree interface { BuildSpaceIDIndexEntry(spaceID, nodeID string) string ResolveSpaceIDIndexEntry(spaceID, entry string) (string, string, error) + CreateRevision(ctx context.Context, n *Node, version string, f *lockedfile.File) (string, error) + ListRevisions(ctx context.Context, ref *provider.Reference) ([]*provider.FileVersion, error) + DownloadRevision(ctx context.Context, ref *provider.Reference, revisionKey string, openReaderFunc func(md *provider.ResourceInfo) bool) (*provider.ResourceInfo, io.ReadCloser, error) + Propagate(ctx context.Context, node *Node, sizeDiff int64) (err error) } @@ -147,6 +152,7 @@ type PathLookup interface { InternalRoot() string InternalPath(spaceID, nodeID string) string + VersionPath(spaceID, nodeID, version string) string Path(ctx context.Context, n *Node, hasPermission PermissionFunc) (path string, err error) MetadataBackend() metadata.Backend TimeManager() TimeManager @@ -356,7 +362,7 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis // use the actual node for the metadata lookup nodeID = kp[0] // remember revision for blob metadata - revisionSuffix = RevisionIDDelimiter + kp[1] + revisionSuffix = kp[1] } } @@ -372,8 +378,8 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis // append back revision to nodeid, even when returning a not existing node defer func() { // when returning errors n is nil - if n != nil { - n.ID += revisionSuffix + if n != nil && revisionSuffix != "" { + n.ID += RevisionIDDelimiter + revisionSuffix } }() @@ -402,7 +408,8 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis return nil, err } } else { - n.BlobID, n.Blobsize, err = lu.ReadBlobIDAndSizeAttr(ctx, nodePath+revisionSuffix, nil) + versionPath := lu.VersionPath(spaceID, nodeID, revisionSuffix) + n.BlobID, n.Blobsize, err = lu.ReadBlobIDAndSizeAttr(ctx, versionPath, nil) if err != nil { return nil, err } diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/xattrs.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/xattrs.go index c31ebfde6..42bc5562a 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/xattrs.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/xattrs.go @@ -66,6 +66,11 @@ func (md Attributes) Time(key string) (time.Time, error) { return time.Parse(time.RFC3339Nano, string(md[key])) } +// SetTime sets a time value +func (md Attributes) SetTime(key string, t time.Time) { + md[key] = []byte(t.UTC().Format(time.RFC3339Nano)) +} + // SetXattrs sets multiple extended attributes on the write-through cache/node func (n *Node) SetXattrsWithContext(ctx context.Context, attribs map[string][]byte, acquireLock bool) (err error) { _, span := tracer.Start(ctx, "SetXattrsWithContext") diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options/options.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options/options.go index b43e85891..773be10e8 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options/options.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options/options.go @@ -47,7 +47,7 @@ type Options struct { // Options specific to the async propagator AsyncPropagatorOptions AsyncPropagatorOptions `mapstructure:"async_propagator_options"` - // ocis fs works on top of a dir of uuid nodes + // decomposedfs fs works on top of a dir of uuid nodes Root string `mapstructure:"root"` // the upload directory where uploads in progress are stored @@ -59,7 +59,7 @@ type Options struct { // ProjectLayout describes the relative path from the storage's root node to the project spaces root directory. ProjectLayout string `mapstructure:"project_layout"` - // propagate mtime changes as tmtime (tree modification time) to the parent directory when user.ocis.propagation=1 is set on a node + // propagate mtime changes as tmtime (tree modification time) to the parent directory when user.oc.propagation=1 is set on a node TreeTimeAccounting bool `mapstructure:"treetime_accounting"` // propagate size changes as treesize diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/recycle.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/recycle.go index 01fc868ba..5b9222e59 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/recycle.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/recycle.go @@ -58,7 +58,7 @@ func (tb *DecomposedfsTrashbin) Setup(fs storage.FS) error { // The deleted file is kept in the same location/dir as the original node. This prevents deletes // from triggering cross storage moves when the trash is accidentally stored on another partition, // because the admin mounted a different partition there. -// For an efficient listing of deleted nodes the ocis storage driver maintains a 'trash' folder +// For an efficient listing of deleted nodes the decomposedfs storage driver maintains a 'trash' folder // with symlinks to trash files for every storagespace. // ListRecycle returns the list of available recycle items diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/revisions.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/revisions.go index f24eb6f96..282103076 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/revisions.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/revisions.go @@ -27,7 +27,6 @@ import ( "time" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/pkg/errors" "github.com/rogpeppe/go-internal/lockedfile" "github.com/opencloud-eu/reva/v2/pkg/appctx" @@ -35,7 +34,6 @@ import ( "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes" "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node" "github.com/opencloud-eu/reva/v2/pkg/storagespace" - "github.com/opencloud-eu/reva/v2/pkg/utils" ) // Revision entries are stored inside the node folder and start with the same uuid as the current version. @@ -46,146 +44,14 @@ import ( // We can add a background process to move old revisions to a slower storage // and replace the revision file with a symbolic link in the future, if necessary. -// ListRevisions lists the revisions of the given resource func (fs *Decomposedfs) ListRevisions(ctx context.Context, ref *provider.Reference) (revisions []*provider.FileVersion, err error) { - _, span := tracer.Start(ctx, "ListRevisions") - defer span.End() - var n *node.Node - if n, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return - } - if !n.Exists { - err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) - return - } - - rp, err := fs.p.AssemblePermissions(ctx, n) - switch { - case err != nil: - return nil, err - case !rp.ListFileVersions: - f, _ := storagespace.FormatReference(ref) - if rp.Stat { - return nil, errtypes.PermissionDenied(f) - } - return nil, errtypes.NotFound(f) - } - - revisions = []*provider.FileVersion{} - np := n.InternalPath() - if items, err := filepath.Glob(np + node.RevisionIDDelimiter + "*"); err == nil { - for i := range items { - if fs.lu.MetadataBackend().IsMetaFile(items[i]) || strings.HasSuffix(items[i], ".mlock") { - continue - } - - if fi, err := os.Stat(items[i]); err == nil { - parts := strings.SplitN(fi.Name(), node.RevisionIDDelimiter, 2) - if len(parts) != 2 { - appctx.GetLogger(ctx).Error().Err(err).Str("name", fi.Name()).Msg("invalid revision name, skipping") - continue - } - mtime := fi.ModTime() - rev := &provider.FileVersion{ - Key: n.ID + node.RevisionIDDelimiter + parts[1], - Mtime: uint64(mtime.Unix()), - } - _, blobSize, err := fs.lu.ReadBlobIDAndSizeAttr(ctx, items[i], nil) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("name", fi.Name()).Msg("error reading blobsize xattr, using 0") - } - rev.Size = uint64(blobSize) - etag, err := node.CalculateEtag(n.ID, mtime) - if err != nil { - return nil, errors.Wrapf(err, "error calculating etag") - } - rev.Etag = etag - revisions = append(revisions, rev) - } - } - } - // maybe we need to sort the list by key - /* - sort.Slice(revisions, func(i, j int) bool { - return revisions[i].Key > revisions[j].Key - }) - */ - - return + return fs.tp.ListRevisions(ctx, ref) } // DownloadRevision returns a reader for the specified revision // FIXME the CS3 api should explicitly allow initiating revision and trash download, a related issue is https://github.com/cs3org/reva/issues/1813 func (fs *Decomposedfs) DownloadRevision(ctx context.Context, ref *provider.Reference, revisionKey string, openReaderFunc func(md *provider.ResourceInfo) bool) (*provider.ResourceInfo, io.ReadCloser, error) { - _, span := tracer.Start(ctx, "DownloadRevision") - defer span.End() - log := appctx.GetLogger(ctx) - - // verify revision key format - kp := strings.SplitN(revisionKey, node.RevisionIDDelimiter, 2) - if len(kp) != 2 { - log.Error().Str("revisionKey", revisionKey).Msg("malformed revisionKey") - return nil, nil, errtypes.NotFound(revisionKey) - } - log.Debug().Str("revisionKey", revisionKey).Msg("DownloadRevision") - - spaceID := ref.ResourceId.SpaceId - // check if the node is available and has not been deleted - n, err := node.ReadNode(ctx, fs.lu, spaceID, kp[0], false, nil, false) - if err != nil { - return nil, nil, err - } - if !n.Exists { - err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) - return nil, nil, err - } - - rp, err := fs.p.AssemblePermissions(ctx, n) - switch { - case err != nil: - return nil, nil, err - case !rp.ListFileVersions || !rp.InitiateFileDownload: // TODO add explicit permission in the CS3 api? - f, _ := storagespace.FormatReference(ref) - if rp.Stat { - return nil, nil, errtypes.PermissionDenied(f) - } - return nil, nil, errtypes.NotFound(f) - } - - contentPath := fs.lu.InternalPath(spaceID, revisionKey) - - blobid, blobsize, err := fs.lu.ReadBlobIDAndSizeAttr(ctx, contentPath, nil) - if err != nil { - return nil, nil, errors.Wrapf(err, "Decomposedfs: could not read blob id and size for revision '%s' of node '%s'", kp[1], n.ID) - } - - revisionNode := node.Node{SpaceID: spaceID, BlobID: blobid, Blobsize: blobsize} // blobsize is needed for the s3ng blobstore - - ri, err := n.AsResourceInfo(ctx, rp, nil, []string{"size", "mimetype", "etag"}, true) - if err != nil { - return nil, nil, err - } - - // update resource info with revision data - mtime, err := time.Parse(time.RFC3339Nano, kp[1]) - if err != nil { - return nil, nil, errors.Wrapf(err, "Decomposedfs: could not parse mtime for revision '%s' of node '%s'", kp[1], n.ID) - } - ri.Size = uint64(blobsize) - ri.Mtime = utils.TimeToTS(mtime) - ri.Etag, err = node.CalculateEtag(n.ID, mtime) - if err != nil { - return nil, nil, errors.Wrapf(err, "error calculating etag for revision '%s' of node '%s'", kp[1], n.ID) - } - - var reader io.ReadCloser - if openReaderFunc(ri) { - reader, err = fs.tp.ReadBlob(&revisionNode) - if err != nil { - return nil, nil, errors.Wrapf(err, "Decomposedfs: could not download blob of revision '%s' for node '%s'", n.ID, revisionKey) - } - } - return ri, reader, nil + return fs.tp.DownloadRevision(ctx, ref, revisionKey, openReaderFunc) } // RestoreRevision restores the specified revision of the resource @@ -250,67 +116,15 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer return err } - // revisions are stored alongside the actual file, so a rename can be efficient and does not cross storage / partition boundaries - newRevisionPath := fs.lu.InternalPath(spaceID, kp[0]+node.RevisionIDDelimiter+mtime.UTC().Format(time.RFC3339Nano)) - - // touch new revision - if _, err := os.Create(newRevisionPath); err != nil { + // create a revision of the current node + if _, err := fs.tp.CreateRevision(ctx, n, mtime.UTC().Format(time.RFC3339Nano), f); err != nil { return err } - defer func() { - if returnErr != nil { - if err := os.Remove(newRevisionPath); err != nil { - log.Error().Err(err).Str("revision", filepath.Base(newRevisionPath)).Msg("could not clean up revision node") - } - if err := fs.lu.MetadataBackend().Purge(ctx, newRevisionPath); err != nil { - log.Error().Err(err).Str("revision", filepath.Base(newRevisionPath)).Msg("could not clean up revision node") - } - } - }() - // copy blob metadata from node to new revision node - err = fs.lu.CopyMetadataWithSourceLock(ctx, nodePath, newRevisionPath, func(attributeName string, value []byte) (newValue []byte, copy bool) { - return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || // for checksums - attributeName == prefixes.TypeAttr || - attributeName == prefixes.BlobIDAttr || - attributeName == prefixes.BlobsizeAttr || - attributeName == prefixes.MTimeAttr // FIXME somewhere I mix up the revision time and the mtime, causing the restore to overwrite the other existing revisien - }, f, true) - if err != nil { - return errtypes.InternalError("failed to copy blob xattrs to version node: " + err.Error()) - } - - // remember mtime from node as new revision mtime - if err = os.Chtimes(newRevisionPath, mtime, mtime); err != nil { - return errtypes.InternalError("failed to change mtime of version node") - } - - // update blob id in node - - // copy blob metadata from restored revision to node + // restore revision restoredRevisionPath := fs.lu.InternalPath(spaceID, revisionKey) - err = fs.lu.CopyMetadata(ctx, restoredRevisionPath, nodePath, func(attributeName string, value []byte) (newValue []byte, copy bool) { - return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || - attributeName == prefixes.TypeAttr || - attributeName == prefixes.BlobIDAttr || - attributeName == prefixes.BlobsizeAttr - }, false) - if err != nil { - return errtypes.InternalError("failed to copy blob xattrs to old revision to node: " + err.Error()) - } - // always set the node mtime to the current time - err = fs.lu.MetadataBackend().SetMultiple(ctx, nodePath, - map[string][]byte{ - prefixes.MTimeAttr: []byte(time.Now().UTC().Format(time.RFC3339Nano)), - }, - false) - if err != nil { - return errtypes.InternalError("failed to set mtime attribute on node: " + err.Error()) - } - - revisionSize, err := fs.lu.MetadataBackend().GetInt64(ctx, restoredRevisionPath, prefixes.BlobsizeAttr) - if err != nil { - return errtypes.InternalError("failed to read blob size xattr from old revision") + if err := fs.tp.RestoreRevision(ctx, spaceID, kp[0], restoredRevisionPath); err != nil { + return err } // drop old revision @@ -329,6 +143,10 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer // revision 5, current 10 (restore a smaller blob) -> 5-10 = -5 // revision 10, current 5 (restore a bigger blob) -> 10-5 = +5 + revisionSize, err := fs.lu.MetadataBackend().GetInt64(ctx, nodePath, prefixes.BlobsizeAttr) + if err != nil { + return errtypes.InternalError("failed to read blob size xattr from old revision") + } sizeDiff := revisionSize - n.Blobsize return fs.tp.Propagate(ctx, n, sizeDiff) diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/spaces.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/spaces.go index e36e399fa..c1cd66696 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/spaces.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/spaces.go @@ -767,7 +767,7 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De return err } - bid := m["user.ocis.blobid"] + bid := m["user.oc.blobid"] if string(bid) == "" { return nil } @@ -807,7 +807,7 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De } // the value of `target` depends on the implementation: -// - for ocis/s3ng it is the relative link to the space root +// - for decomposedfs/s3ng it is the relative link to the space root // - for the posixfs it is the node id func (fs *Decomposedfs) updateIndexes(ctx context.Context, grantee *provider.Grantee, spaceType, spaceID, nodeID string) error { target := fs.tp.BuildSpaceIDIndexEntry(spaceID, nodeID) diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/revisions.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/revisions.go new file mode 100644 index 000000000..1b0f84c82 --- /dev/null +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/revisions.go @@ -0,0 +1,299 @@ +// Copyright 2018-2021 CERN +// Copyright 2025 OpenCloud GmbH +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package tree + +import ( + "context" + "io" + "os" + "path/filepath" + "strings" + "time" + + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/pkg/errors" + "github.com/rogpeppe/go-internal/lockedfile" + + "github.com/opencloud-eu/reva/v2/pkg/appctx" + "github.com/opencloud-eu/reva/v2/pkg/errtypes" + "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes" + "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node" + "github.com/opencloud-eu/reva/v2/pkg/storagespace" + "github.com/opencloud-eu/reva/v2/pkg/utils" +) + +// Revision entries are stored inside the node folder and start with the same uuid as the current version. +// The `.REV.` indicates it is a revision and what follows is a timestamp, so multiple versions +// can be kept in the same location as the current file content. This prevents new fileuploads +// to trigger cross storage moves when revisions accidentally are stored on another partition, +// because the admin mounted a different partition there. +// We can add a background process to move old revisions to a slower storage +// and replace the revision file with a symbolic link in the future, if necessary. + +// CreateVersion creates a new version of the node +func (tp *Tree) CreateRevision(ctx context.Context, n *node.Node, version string, f *lockedfile.File) (string, error) { + versionPath := tp.lookup.VersionPath(n.SpaceID, n.ID, version) + + err := os.MkdirAll(filepath.Dir(versionPath), 0700) + if err != nil { + return "", err + } + + // create version node + vf, err := os.OpenFile(versionPath, os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return "", err + } + defer vf.Close() + + // copy blob metadata to version node + if err := tp.lookup.CopyMetadataWithSourceLock(ctx, n.InternalPath(), versionPath, func(attributeName string, value []byte) (newValue []byte, copy bool) { + return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || + attributeName == prefixes.TypeAttr || + attributeName == prefixes.BlobIDAttr || + attributeName == prefixes.BlobsizeAttr || + attributeName == prefixes.MTimeAttr + }, f, true); err != nil { + return "", err + } + + return versionPath, nil +} + +func (tp *Tree) ListRevisions(ctx context.Context, ref *provider.Reference) (revisions []*provider.FileVersion, err error) { + _, span := tracer.Start(ctx, "ListRevisions") + defer span.End() + var n *node.Node + if n, err = tp.lookup.NodeFromResource(ctx, ref); err != nil { + return + } + if !n.Exists { + err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) + return + } + + rp, err := tp.permissions.AssemblePermissions(ctx, n) + switch { + case err != nil: + return nil, err + case !rp.ListFileVersions: + f, _ := storagespace.FormatReference(ref) + if rp.Stat { + return nil, errtypes.PermissionDenied(f) + } + return nil, errtypes.NotFound(f) + } + + revisions = []*provider.FileVersion{} + np := n.InternalPath() + if items, err := filepath.Glob(np + node.RevisionIDDelimiter + "*"); err == nil { + for i := range items { + if tp.lookup.MetadataBackend().IsMetaFile(items[i]) || strings.HasSuffix(items[i], ".mlock") { + continue + } + + if fi, err := os.Stat(items[i]); err == nil { + parts := strings.SplitN(fi.Name(), node.RevisionIDDelimiter, 2) + if len(parts) != 2 { + appctx.GetLogger(ctx).Error().Err(err).Str("name", fi.Name()).Msg("invalid revision name, skipping") + continue + } + mtime := fi.ModTime() + rev := &provider.FileVersion{ + Key: n.ID + node.RevisionIDDelimiter + parts[1], + Mtime: uint64(mtime.Unix()), + } + _, blobSize, err := tp.lookup.ReadBlobIDAndSizeAttr(ctx, items[i], nil) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("name", fi.Name()).Msg("error reading blobsize xattr, using 0") + } + rev.Size = uint64(blobSize) + etag, err := node.CalculateEtag(n.ID, mtime) + if err != nil { + return nil, errors.Wrapf(err, "error calculating etag") + } + rev.Etag = etag + revisions = append(revisions, rev) + } + } + } + // maybe we need to sort the list by key + /* + sort.Slice(revisions, func(i, j int) bool { + return revisions[i].Key > revisions[j].Key + }) + */ + + return +} + +// DownloadRevision returns a reader for the specified revision +// FIXME the CS3 api should explicitly allow initiating revision and trash download, a related issue is https://github.com/cs3org/reva/issues/1813 +func (tp *Tree) DownloadRevision(ctx context.Context, ref *provider.Reference, revisionKey string, openReaderFunc func(md *provider.ResourceInfo) bool) (*provider.ResourceInfo, io.ReadCloser, error) { + _, span := tracer.Start(ctx, "DownloadRevision") + defer span.End() + log := appctx.GetLogger(ctx) + + // verify revision key format + kp := strings.SplitN(revisionKey, node.RevisionIDDelimiter, 2) + if len(kp) != 2 { + log.Error().Str("revisionKey", revisionKey).Msg("malformed revisionKey") + return nil, nil, errtypes.NotFound(revisionKey) + } + log.Debug().Str("revisionKey", revisionKey).Msg("DownloadRevision") + + spaceID := ref.ResourceId.SpaceId + // check if the node is available and has not been deleted + n, err := node.ReadNode(ctx, tp.lookup, spaceID, kp[0], false, nil, false) + if err != nil { + return nil, nil, err + } + if !n.Exists { + err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) + return nil, nil, err + } + + rp, err := tp.permissions.AssemblePermissions(ctx, n) + switch { + case err != nil: + return nil, nil, err + case !rp.ListFileVersions || !rp.InitiateFileDownload: // TODO add explicit permission in the CS3 api? + f, _ := storagespace.FormatReference(ref) + if rp.Stat { + return nil, nil, errtypes.PermissionDenied(f) + } + return nil, nil, errtypes.NotFound(f) + } + + contentPath := tp.lookup.InternalPath(spaceID, revisionKey) + + blobid, blobsize, err := tp.lookup.ReadBlobIDAndSizeAttr(ctx, contentPath, nil) + if err != nil { + return nil, nil, errors.Wrapf(err, "Decomposedfs: could not read blob id and size for revision '%s' of node '%s'", kp[1], n.ID) + } + + revisionNode := node.Node{SpaceID: spaceID, BlobID: blobid, Blobsize: blobsize} // blobsize is needed for the s3ng blobstore + + ri, err := n.AsResourceInfo(ctx, rp, nil, []string{"size", "mimetype", "etag"}, true) + if err != nil { + return nil, nil, err + } + + // update resource info with revision data + mtime, err := time.Parse(time.RFC3339Nano, kp[1]) + if err != nil { + return nil, nil, errors.Wrapf(err, "Decomposedfs: could not parse mtime for revision '%s' of node '%s'", kp[1], n.ID) + } + ri.Size = uint64(blobsize) + ri.Mtime = utils.TimeToTS(mtime) + ri.Etag, err = node.CalculateEtag(n.ID, mtime) + if err != nil { + return nil, nil, errors.Wrapf(err, "error calculating etag for revision '%s' of node '%s'", kp[1], n.ID) + } + + var reader io.ReadCloser + if openReaderFunc(ri) { + reader, err = tp.ReadBlob(&revisionNode) + if err != nil { + return nil, nil, errors.Wrapf(err, "Decomposedfs: could not download blob of revision '%s' for node '%s'", n.ID, revisionKey) + } + } + return ri, reader, nil +} + +// DeleteRevision deletes the specified revision of the resource +func (tp *Tree) DeleteRevision(ctx context.Context, ref *provider.Reference, revisionKey string) error { + _, span := tracer.Start(ctx, "DeleteRevision") + defer span.End() + n, err := tp.getRevisionNode(ctx, ref, revisionKey, func(rp *provider.ResourcePermissions) bool { + return rp.RestoreFileVersion + }) + if err != nil { + return err + } + + if err := os.RemoveAll(tp.lookup.InternalPath(n.SpaceID, revisionKey)); err != nil { + return err + } + + return tp.DeleteBlob(n) +} + +func (tp *Tree) getRevisionNode(ctx context.Context, ref *provider.Reference, revisionKey string, hasPermission func(*provider.ResourcePermissions) bool) (*node.Node, error) { + _, span := tracer.Start(ctx, "getRevisionNode") + defer span.End() + log := appctx.GetLogger(ctx) + + // verify revision key format + kp := strings.SplitN(revisionKey, node.RevisionIDDelimiter, 2) + if len(kp) != 2 { + log.Error().Str("revisionKey", revisionKey).Msg("malformed revisionKey") + return nil, errtypes.NotFound(revisionKey) + } + log.Debug().Str("revisionKey", revisionKey).Msg("DownloadRevision") + + spaceID := ref.ResourceId.SpaceId + // check if the node is available and has not been deleted + n, err := node.ReadNode(ctx, tp.lookup, spaceID, kp[0], false, nil, false) + if err != nil { + return nil, err + } + if !n.Exists { + err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) + return nil, err + } + + p, err := tp.permissions.AssemblePermissions(ctx, n) + switch { + case err != nil: + return nil, err + case !hasPermission(p): + return nil, errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) + } + + // Set space owner in context + storagespace.ContextSendSpaceOwnerID(ctx, n.SpaceOwnerOrManager(ctx)) + + return n, nil +} + +func (tp *Tree) RestoreRevision(ctx context.Context, spaceID, nodeID, source string) error { + target := tp.lookup.InternalPath(spaceID, nodeID) + err := tp.lookup.CopyMetadata(ctx, source, target, func(attributeName string, value []byte) (newValue []byte, copy bool) { + return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || + attributeName == prefixes.TypeAttr || + attributeName == prefixes.BlobIDAttr || + attributeName == prefixes.BlobsizeAttr + }, false) + if err != nil { + return errtypes.InternalError("failed to copy blob xattrs to old revision to node: " + err.Error()) + } + // always set the node mtime to the current time + err = tp.lookup.MetadataBackend().SetMultiple(ctx, target, + map[string][]byte{ + prefixes.MTimeAttr: []byte(time.Now().UTC().Format(time.RFC3339Nano)), + }, + false) + if err != nil { + return errtypes.InternalError("failed to set mtime attribute on node: " + err.Error()) + } + + return nil +} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/tree.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/tree.go index 5c71ff7bd..8c61a27b3 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/tree.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/tree.go @@ -39,6 +39,7 @@ import ( "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes" "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node" "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options" + "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/permissions" "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/propagator" "github.com/opencloud-eu/reva/v2/pkg/utils" "github.com/pkg/errors" @@ -64,9 +65,10 @@ type Blobstore interface { // Tree manages a hierarchical tree type Tree struct { - lookup node.PathLookup - blobstore Blobstore - propagator propagator.Propagator + lookup node.PathLookup + blobstore Blobstore + propagator propagator.Propagator + permissions permissions.Permissions options *options.Options @@ -77,13 +79,14 @@ type Tree struct { type PermissionCheckFunc func(rp *provider.ResourcePermissions) bool // New returns a new instance of Tree -func New(lu node.PathLookup, bs Blobstore, o *options.Options, cache store.Store, log *zerolog.Logger) *Tree { +func New(lu node.PathLookup, bs Blobstore, o *options.Options, p permissions.Permissions, cache store.Store, log *zerolog.Logger) *Tree { return &Tree{ - lookup: lu, - blobstore: bs, - options: o, - idCache: cache, - propagator: propagator.New(lu, o, log), + lookup: lu, + blobstore: bs, + options: o, + permissions: p, + idCache: cache, + propagator: propagator.New(lu, o, log), } } diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload.go index f26a15b99..ed70486a7 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload.go @@ -54,7 +54,7 @@ func (fs *Decomposedfs) Upload(ctx context.Context, req storage.UploadRequest, u return &provider.ResourceInfo{}, errors.Wrap(err, "Decomposedfs: error retrieving upload") } - session := up.(*upload.OcisSession) + session := up.(*upload.DecomposedFsSession) ctx = session.Context(ctx) @@ -371,13 +371,13 @@ func (fs *Decomposedfs) GetUpload(ctx context.Context, id string) (tusd.Upload, // ListUploadSessions returns the upload sessions for the given filter func (fs *Decomposedfs) ListUploadSessions(ctx context.Context, filter storage.UploadSessionFilter) ([]storage.UploadSession, error) { - var sessions []*upload.OcisSession + var sessions []*upload.DecomposedFsSession if filter.ID != nil && *filter.ID != "" { session, err := fs.sessionStore.Get(ctx, *filter.ID) if err != nil { return nil, err } - sessions = []*upload.OcisSession{session} + sessions = []*upload.DecomposedFsSession{session} } else { var err error sessions, err = fs.sessionStore.List(ctx) @@ -418,19 +418,19 @@ func (fs *Decomposedfs) ListUploadSessions(ctx context.Context, filter storage.U // To implement the termination extension as specified in https://tus.io/protocols/resumable-upload.html#termination // the storage needs to implement AsTerminatableUpload func (fs *Decomposedfs) AsTerminatableUpload(up tusd.Upload) tusd.TerminatableUpload { - return up.(*upload.OcisSession) + return up.(*upload.DecomposedFsSession) } // AsLengthDeclarableUpload returns a LengthDeclarableUpload // To implement the creation-defer-length extension as specified in https://tus.io/protocols/resumable-upload.html#creation // the storage needs to implement AsLengthDeclarableUpload func (fs *Decomposedfs) AsLengthDeclarableUpload(up tusd.Upload) tusd.LengthDeclarableUpload { - return up.(*upload.OcisSession) + return up.(*upload.DecomposedFsSession) } // AsConcatableUpload returns a ConcatableUpload // To implement the concatenation extension as specified in https://tus.io/protocols/resumable-upload.html#concatenation // the storage needs to implement AsConcatableUpload func (fs *Decomposedfs) AsConcatableUpload(up tusd.Upload) tusd.ConcatableUpload { - return up.(*upload.OcisSession) + return up.(*upload.DecomposedFsSession) } diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/session.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/session.go index 5804f43c3..d7223a3b1 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/session.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/session.go @@ -38,15 +38,15 @@ import ( "github.com/opencloud-eu/reva/v2/pkg/utils" ) -// OcisSession extends tus upload lifecycle with postprocessing steps. -type OcisSession struct { - store OcisStore +// DecomposedFsSession extends tus upload lifecycle with postprocessing steps. +type DecomposedFsSession struct { + store DecomposedFsStore // for now, we keep the json files in the uploads folder info tusd.FileInfo } // Context returns a context with the user, logger and lockid used when initiating the upload session -func (s *OcisSession) Context(ctx context.Context) context.Context { // restore logger from file info +func (s *DecomposedFsSession) Context(ctx context.Context) context.Context { // restore logger from file info sub := s.store.log.With().Int("pid", os.Getpid()).Logger() ctx = appctx.WithLogger(ctx, &sub) ctx = ctxpkg.ContextSetLockID(ctx, s.lockID()) @@ -54,10 +54,10 @@ func (s *OcisSession) Context(ctx context.Context) context.Context { // restore return ctxpkg.ContextSetInitiator(ctx, s.InitiatorID()) } -func (s *OcisSession) lockID() string { +func (s *DecomposedFsSession) lockID() string { return s.info.MetaData["lockid"] } -func (s *OcisSession) executantUser() *userpb.User { +func (s *DecomposedFsSession) executantUser() *userpb.User { var o *typespb.Opaque _ = json.Unmarshal([]byte(s.info.Storage["UserOpaque"]), &o) return &userpb.User{ @@ -73,7 +73,7 @@ func (s *OcisSession) executantUser() *userpb.User { } // Purge deletes the upload session metadata and written binary data -func (s *OcisSession) Purge(ctx context.Context) error { +func (s *DecomposedFsSession) Purge(ctx context.Context) error { _, span := tracer.Start(ctx, "Purge") defer span.End() sessionPath := sessionPath(s.store.root, s.info.ID) @@ -87,7 +87,7 @@ func (s *OcisSession) Purge(ctx context.Context) error { } // TouchBin creates a file to contain the binary data. It's size will be used to keep track of the tus upload offset. -func (s *OcisSession) TouchBin() error { +func (s *DecomposedFsSession) TouchBin() error { file, err := os.OpenFile(s.binPath(), os.O_CREATE|os.O_WRONLY, defaultFilePerm) if err != nil { return err @@ -98,7 +98,7 @@ func (s *OcisSession) TouchBin() error { // Persist writes the upload session metadata to disk // events can update the scan outcome and the finished event might read an empty file because of race conditions // so we need to lock the file while writing and use atomic writes -func (s *OcisSession) Persist(ctx context.Context) error { +func (s *DecomposedFsSession) Persist(ctx context.Context) error { _, span := tracer.Start(ctx, "Persist") defer span.End() sessionPath := sessionPath(s.store.root, s.info.ID) @@ -116,27 +116,27 @@ func (s *OcisSession) Persist(ctx context.Context) error { } // ToFileInfo returns tus compatible FileInfo so the tus handler can access the upload offset -func (s *OcisSession) ToFileInfo() tusd.FileInfo { +func (s *DecomposedFsSession) ToFileInfo() tusd.FileInfo { return s.info } // ProviderID returns the provider id -func (s *OcisSession) ProviderID() string { +func (s *DecomposedFsSession) ProviderID() string { return s.info.MetaData["providerID"] } // SpaceID returns the space id -func (s *OcisSession) SpaceID() string { +func (s *DecomposedFsSession) SpaceID() string { return s.info.Storage["SpaceRoot"] } // NodeID returns the node id -func (s *OcisSession) NodeID() string { +func (s *DecomposedFsSession) NodeID() string { return s.info.Storage["NodeId"] } // NodeParentID returns the nodes parent id -func (s *OcisSession) NodeParentID() string { +func (s *DecomposedFsSession) NodeParentID() string { return s.info.Storage["NodeParentId"] } @@ -148,62 +148,62 @@ func (s *OcisSession) NodeParentID() string { // A node should be created as part of InitiateUpload. When listing a directory // we can decide if we want to skip the entry, or expose uploed progress // information. But that is a bigger change and might involve client work. -func (s *OcisSession) NodeExists() bool { +func (s *DecomposedFsSession) NodeExists() bool { return s.info.Storage["NodeExists"] == "true" } // HeaderIfMatch returns the if-match header for the upload session -func (s *OcisSession) HeaderIfMatch() string { +func (s *DecomposedFsSession) HeaderIfMatch() string { return s.info.MetaData["if-match"] } // HeaderIfNoneMatch returns the if-none-match header for the upload session -func (s *OcisSession) HeaderIfNoneMatch() string { +func (s *DecomposedFsSession) HeaderIfNoneMatch() string { return s.info.MetaData["if-none-match"] } // HeaderIfUnmodifiedSince returns the if-unmodified-since header for the upload session -func (s *OcisSession) HeaderIfUnmodifiedSince() string { +func (s *DecomposedFsSession) HeaderIfUnmodifiedSince() string { return s.info.MetaData["if-unmodified-since"] } // Node returns the node for the session -func (s *OcisSession) Node(ctx context.Context) (*node.Node, error) { +func (s *DecomposedFsSession) Node(ctx context.Context) (*node.Node, error) { return node.ReadNode(ctx, s.store.lu, s.SpaceID(), s.info.Storage["NodeId"], false, nil, true) } // ID returns the upload session id -func (s *OcisSession) ID() string { +func (s *DecomposedFsSession) ID() string { return s.info.ID } // Filename returns the name of the node which is not the same as the name af the file being uploaded for legacy chunked uploads -func (s *OcisSession) Filename() string { +func (s *DecomposedFsSession) Filename() string { return s.info.Storage["NodeName"] } // Chunk returns the chunk name when a legacy chunked upload was started -func (s *OcisSession) Chunk() string { +func (s *DecomposedFsSession) Chunk() string { return s.info.Storage["Chunk"] } // SetMetadata is used to fill the upload metadata that will be exposed to the end user -func (s *OcisSession) SetMetadata(key, value string) { +func (s *DecomposedFsSession) SetMetadata(key, value string) { s.info.MetaData[key] = value } // SetStorageValue is used to set metadata only relevant for the upload session implementation -func (s *OcisSession) SetStorageValue(key, value string) { +func (s *DecomposedFsSession) SetStorageValue(key, value string) { s.info.Storage[key] = value } // SetSize will set the upload size of the underlying tus info. -func (s *OcisSession) SetSize(size int64) { +func (s *DecomposedFsSession) SetSize(size int64) { s.info.Size = size } // SetSizeIsDeferred is uset to change the SizeIsDeferred property of the underlying tus info. -func (s *OcisSession) SetSizeIsDeferred(value bool) { +func (s *DecomposedFsSession) SetSizeIsDeferred(value bool) { s.info.SizeIsDeferred = value } @@ -227,23 +227,23 @@ func (s *OcisSession) SetSizeIsDeferred(value bool) { // // I think we can safely determine the path later, right before emitting the // event. And maybe make it configurable, because only audit needs it, anyway. -func (s *OcisSession) Dir() string { +func (s *DecomposedFsSession) Dir() string { return s.info.Storage["Dir"] } // Size returns the upload size -func (s *OcisSession) Size() int64 { +func (s *DecomposedFsSession) Size() int64 { return s.info.Size } // SizeDiff returns the size diff that was calculated after postprocessing -func (s *OcisSession) SizeDiff() int64 { +func (s *DecomposedFsSession) SizeDiff() int64 { sizeDiff, _ := strconv.ParseInt(s.info.MetaData["sizeDiff"], 10, 64) return sizeDiff } // Reference returns a reference that can be used to access the uploaded resource -func (s *OcisSession) Reference() provider.Reference { +func (s *DecomposedFsSession) Reference() provider.Reference { return provider.Reference{ ResourceId: &provider.ResourceId{ StorageId: s.info.MetaData["providerID"], @@ -255,7 +255,7 @@ func (s *OcisSession) Reference() provider.Reference { } // Executant returns the id of the user that initiated the upload session -func (s *OcisSession) Executant() userpb.UserId { +func (s *DecomposedFsSession) Executant() userpb.UserId { return userpb.UserId{ Type: userpb.UserType(userpb.UserType_value[s.info.Storage["UserType"]]), Idp: s.info.Storage["Idp"], @@ -264,7 +264,7 @@ func (s *OcisSession) Executant() userpb.UserId { } // SetExecutant is used to remember the user that initiated the upload session -func (s *OcisSession) SetExecutant(u *userpb.User) { +func (s *DecomposedFsSession) SetExecutant(u *userpb.User) { s.info.Storage["Idp"] = u.GetId().GetIdp() s.info.Storage["UserId"] = u.GetId().GetOpaqueId() s.info.Storage["UserType"] = utils.UserTypeToString(u.GetId().Type) @@ -276,12 +276,12 @@ func (s *OcisSession) SetExecutant(u *userpb.User) { } // Offset returns the current upload offset -func (s *OcisSession) Offset() int64 { +func (s *DecomposedFsSession) Offset() int64 { return s.info.Offset } // SpaceOwner returns the id of the space owner -func (s *OcisSession) SpaceOwner() *userpb.UserId { +func (s *DecomposedFsSession) SpaceOwner() *userpb.UserId { return &userpb.UserId{ // idp and type do not seem to be consumed and the node currently only stores the user id anyway OpaqueId: s.info.Storage["SpaceOwnerOrManager"], @@ -289,7 +289,7 @@ func (s *OcisSession) SpaceOwner() *userpb.UserId { } // Expires returns the time the upload session expires -func (s *OcisSession) Expires() time.Time { +func (s *DecomposedFsSession) Expires() time.Time { var t time.Time if value, ok := s.info.MetaData["expires"]; ok { t, _ = utils.MTimeToTime(value) @@ -298,7 +298,7 @@ func (s *OcisSession) Expires() time.Time { } // MTime returns the mtime to use for the uploaded file -func (s *OcisSession) MTime() time.Time { +func (s *DecomposedFsSession) MTime() time.Time { var t time.Time if value, ok := s.info.MetaData["mtime"]; ok { t, _ = utils.MTimeToTime(value) @@ -307,29 +307,29 @@ func (s *OcisSession) MTime() time.Time { } // IsProcessing returns true if all bytes have been received. The session then has entered postprocessing state. -func (s *OcisSession) IsProcessing() bool { +func (s *DecomposedFsSession) IsProcessing() bool { // We might need a more sophisticated way to determine processing status soon return s.info.Size == s.info.Offset && s.info.MetaData["scanResult"] == "" } // binPath returns the path to the file storing the binary data. -func (s *OcisSession) binPath() string { +func (s *DecomposedFsSession) binPath() string { return filepath.Join(s.store.root, "uploads", s.info.ID) } // InitiatorID returns the id of the initiating client -func (s *OcisSession) InitiatorID() string { +func (s *DecomposedFsSession) InitiatorID() string { return s.info.MetaData["initiatorid"] } // SetScanData sets virus scan data to the upload session -func (s *OcisSession) SetScanData(result string, date time.Time) { +func (s *DecomposedFsSession) SetScanData(result string, date time.Time) { s.info.MetaData["scanResult"] = result s.info.MetaData["scanDate"] = date.Format(time.RFC3339) } // ScanData returns the virus scan data -func (s *OcisSession) ScanData() (string, time.Time) { +func (s *DecomposedFsSession) ScanData() (string, time.Time) { date := s.info.MetaData["scanDate"] if date == "" { return "", time.Time{} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/store.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/store.go index 63745190d..2574cf240 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/store.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/store.go @@ -56,8 +56,8 @@ type PermissionsChecker interface { AssemblePermissions(ctx context.Context, n *node.Node) (ap provider.ResourcePermissions, err error) } -// OcisStore manages upload sessions -type OcisStore struct { +// DecomposedFsStore manages upload sessions +type DecomposedFsStore struct { fs storage.FS lu node.PathLookup tp node.Tree @@ -70,9 +70,9 @@ type OcisStore struct { log *zerolog.Logger } -// NewSessionStore returns a new OcisStore -func NewSessionStore(fs storage.FS, aspects aspects.Aspects, root string, async bool, tknopts options.TokenOptions, log *zerolog.Logger) *OcisStore { - return &OcisStore{ +// NewSessionStore returns a new DecomposedFsStore +func NewSessionStore(fs storage.FS, aspects aspects.Aspects, root string, async bool, tknopts options.TokenOptions, log *zerolog.Logger) *DecomposedFsStore { + return &DecomposedFsStore{ fs: fs, lu: aspects.Lookup, tp: aspects.Tree, @@ -87,13 +87,13 @@ func NewSessionStore(fs storage.FS, aspects aspects.Aspects, root string, async } // New returns a new upload session -func (store OcisStore) New(ctx context.Context) *OcisSession { - return &OcisSession{ +func (store DecomposedFsStore) New(ctx context.Context) *DecomposedFsSession { + return &DecomposedFsSession{ store: store, info: tusd.FileInfo{ ID: uuid.New().String(), Storage: map[string]string{ - "Type": "OCISStore", + "Type": "DecomposedFsStore", }, MetaData: tusd.MetaData{}, }, @@ -101,8 +101,8 @@ func (store OcisStore) New(ctx context.Context) *OcisSession { } // List lists all upload sessions -func (store OcisStore) List(ctx context.Context) ([]*OcisSession, error) { - uploads := []*OcisSession{} +func (store DecomposedFsStore) List(ctx context.Context) ([]*DecomposedFsSession, error) { + uploads := []*DecomposedFsSession{} infoFiles, err := filepath.Glob(filepath.Join(store.root, "uploads", "*.info")) if err != nil { return nil, err @@ -122,14 +122,14 @@ func (store OcisStore) List(ctx context.Context) ([]*OcisSession, error) { } // Get returns the upload session for the given upload id -func (store OcisStore) Get(ctx context.Context, id string) (*OcisSession, error) { +func (store DecomposedFsStore) Get(ctx context.Context, id string) (*DecomposedFsSession, error) { sessionPath := sessionPath(store.root, id) match := _idRegexp.FindStringSubmatch(sessionPath) if match == nil || len(match) < 2 { return nil, fmt.Errorf("invalid upload path") } - session := OcisSession{ + session := DecomposedFsSession{ store: store, info: tusd.FileInfo{}, } @@ -174,7 +174,7 @@ type Session interface { } // Cleanup cleans upload metadata, binary data and processing status as necessary -func (store OcisStore) Cleanup(ctx context.Context, session Session, revertNodeMetadata, keepUpload, unmarkPostprocessing bool) { +func (store DecomposedFsStore) Cleanup(ctx context.Context, session Session, revertNodeMetadata, keepUpload, unmarkPostprocessing bool) { ctx, span := tracer.Start(session.Context(ctx), "Cleanup") defer span.End() session.Cleanup(revertNodeMetadata, !keepUpload, !keepUpload) @@ -198,7 +198,7 @@ func (store OcisStore) Cleanup(ctx context.Context, session Session, revertNodeM // CreateNodeForUpload will create the target node for the Upload // TODO move this to the node package as NodeFromUpload? // should we in InitiateUpload create the node first? and then the upload? -func (store OcisStore) CreateNodeForUpload(ctx context.Context, session *OcisSession, initAttrs node.Attributes) (*node.Node, error) { +func (store DecomposedFsStore) CreateNodeForUpload(ctx context.Context, session *DecomposedFsSession, initAttrs node.Attributes) (*node.Node, error) { ctx, span := tracer.Start(session.Context(ctx), "CreateNodeForUpload") defer span.End() n := node.New( @@ -301,7 +301,7 @@ func (store OcisStore) CreateNodeForUpload(ctx context.Context, session *OcisSes return n, nil } -func (store OcisStore) updateExistingNode(ctx context.Context, session *OcisSession, n *node.Node, spaceID string, fsize uint64) (metadata.UnlockFunc, error) { +func (store DecomposedFsStore) updateExistingNode(ctx context.Context, session *DecomposedFsSession, n *node.Node, spaceID string, fsize uint64) (metadata.UnlockFunc, error) { _, span := tracer.Start(ctx, "updateExistingNode") defer span.End() targetPath := n.InternalPath() @@ -364,10 +364,8 @@ func (store OcisStore) updateExistingNode(ctx context.Context, session *OcisSess } if !store.disableVersioning { - versionPath := session.store.lu.InternalPath(spaceID, n.ID+node.RevisionIDDelimiter+oldNodeMtime.UTC().Format(time.RFC3339Nano)) - - // create version node - _, err := os.OpenFile(versionPath, os.O_CREATE|os.O_EXCL, 0600) + span.AddEvent("CreateVersion") + versionPath, err := session.store.tp.CreateRevision(ctx, n, oldNodeMtime.UTC().Format(time.RFC3339Nano), f) if err != nil { if !errors.Is(err, os.ErrExist) { return unlock, err @@ -389,22 +387,11 @@ func (store OcisStore) updateExistingNode(ctx context.Context, session *OcisSess } // clean revision file - span.AddEvent("os.Create") - if _, err := os.Create(versionPath); err != nil { + if versionPath, err = session.store.tp.CreateRevision(ctx, n, oldNodeMtime.UTC().Format(time.RFC3339Nano), f); err != nil { return unlock, err } } - // copy blob metadata to version node - if err := store.lu.CopyMetadataWithSourceLock(ctx, targetPath, versionPath, func(attributeName string, value []byte) (newValue []byte, copy bool) { - return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || - attributeName == prefixes.TypeAttr || - attributeName == prefixes.BlobIDAttr || - attributeName == prefixes.BlobsizeAttr || - attributeName == prefixes.MTimeAttr - }, f, true); err != nil { - return unlock, err - } session.info.MetaData["versionsPath"] = versionPath // keep mtime from previous version span.AddEvent("os.Chtimes") @@ -418,7 +405,7 @@ func (store OcisStore) updateExistingNode(ctx context.Context, session *OcisSess return unlock, nil } -func validateChecksums(ctx context.Context, n *node.Node, session *OcisSession, versionPath string) error { +func validateChecksums(ctx context.Context, n *node.Node, session *DecomposedFsSession, versionPath string) error { for _, t := range []string{"md5", "sha1", "adler32"} { key := prefixes.ChecksumPrefix + t diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/upload.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/upload.go index 6acfc58b5..4bc526b3e 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/upload.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/upload.go @@ -60,7 +60,7 @@ func init() { } // WriteChunk writes the stream from the reader to the given offset of the upload -func (session *OcisSession) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) { +func (session *DecomposedFsSession) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) { ctx, span := tracer.Start(session.Context(ctx), "WriteChunk") defer span.End() _, subspan := tracer.Start(ctx, "os.OpenFile") @@ -81,7 +81,7 @@ func (session *OcisSession) WriteChunk(ctx context.Context, offset int64, src io // If the HTTP PATCH request gets interrupted in the middle (e.g. because // the user wants to pause the upload), Go's net/http returns an io.ErrUnexpectedEOF. - // However, for the ocis driver it's not important whether the stream has ended + // However, for the decompsedfs driver it's not important whether the stream has ended // on purpose or accidentally. if err != nil && err != io.ErrUnexpectedEOF { return n, err @@ -95,12 +95,12 @@ func (session *OcisSession) WriteChunk(ctx context.Context, offset int64, src io } // GetInfo returns the FileInfo -func (session *OcisSession) GetInfo(_ context.Context) (tusd.FileInfo, error) { +func (session *DecomposedFsSession) GetInfo(_ context.Context) (tusd.FileInfo, error) { return session.ToFileInfo(), nil } // GetReader returns an io.Reader for the upload -func (session *OcisSession) GetReader(ctx context.Context) (io.ReadCloser, error) { +func (session *DecomposedFsSession) GetReader(ctx context.Context) (io.ReadCloser, error) { _, span := tracer.Start(session.Context(ctx), "GetReader") defer span.End() return os.Open(session.binPath()) @@ -109,7 +109,7 @@ func (session *OcisSession) GetReader(ctx context.Context) (io.ReadCloser, error // FinishUpload finishes an upload and moves the file to the internal destination // implements tusd.DataStore interface // returns tusd errors -func (session *OcisSession) FinishUpload(ctx context.Context) error { +func (session *DecomposedFsSession) FinishUpload(ctx context.Context) error { err := session.FinishUploadDecomposed(ctx) // we need to return a tusd error here to make the tusd handler return the correct status code @@ -125,7 +125,7 @@ func (session *OcisSession) FinishUpload(ctx context.Context) error { // FinishUploadDecomposed finishes an upload and moves the file to the internal destination // retures errtypes errors -func (session *OcisSession) FinishUploadDecomposed(ctx context.Context) error { +func (session *DecomposedFsSession) FinishUploadDecomposed(ctx context.Context) error { ctx, span := tracer.Start(session.Context(ctx), "FinishUpload") defer span.End() log := appctx.GetLogger(ctx) @@ -239,13 +239,13 @@ func (session *OcisSession) FinishUploadDecomposed(ctx context.Context) error { } // Terminate terminates the upload -func (session *OcisSession) Terminate(_ context.Context) error { +func (session *DecomposedFsSession) Terminate(_ context.Context) error { session.Cleanup(true, true, true) return nil } // DeclareLength updates the upload length information -func (session *OcisSession) DeclareLength(ctx context.Context, length int64) error { +func (session *DecomposedFsSession) DeclareLength(ctx context.Context, length int64) error { session.info.Size = length session.info.SizeIsDeferred = false return session.store.um.RunInBaseScope(func() error { @@ -254,7 +254,7 @@ func (session *OcisSession) DeclareLength(ctx context.Context, length int64) err } // ConcatUploads concatenates multiple uploads -func (session *OcisSession) ConcatUploads(_ context.Context, uploads []tusd.Upload) (err error) { +func (session *DecomposedFsSession) ConcatUploads(_ context.Context, uploads []tusd.Upload) (err error) { file, err := os.OpenFile(session.binPath(), os.O_WRONLY|os.O_APPEND, defaultFilePerm) if err != nil { return err @@ -262,7 +262,7 @@ func (session *OcisSession) ConcatUploads(_ context.Context, uploads []tusd.Uplo defer file.Close() for _, partialUpload := range uploads { - fileUpload := partialUpload.(*OcisSession) + fileUpload := partialUpload.(*DecomposedFsSession) src, err := os.Open(fileUpload.binPath()) if err != nil { @@ -279,7 +279,7 @@ func (session *OcisSession) ConcatUploads(_ context.Context, uploads []tusd.Uplo } // Finalize finalizes the upload (eg moves the file to the internal destination) -func (session *OcisSession) Finalize(ctx context.Context) (err error) { +func (session *DecomposedFsSession) Finalize(ctx context.Context) (err error) { ctx, span := tracer.Start(session.Context(ctx), "Finalize") defer span.End() @@ -305,7 +305,7 @@ func checkHash(expected string, h hash.Hash) error { return nil } -func (session *OcisSession) removeNode(ctx context.Context) { +func (session *DecomposedFsSession) removeNode(ctx context.Context) { n, err := session.Node(ctx) if err != nil { appctx.GetLogger(ctx).Error().Str("session", session.ID()).Err(err).Msg("getting node from session failed") @@ -317,7 +317,7 @@ func (session *OcisSession) removeNode(ctx context.Context) { } // cleanup cleans up after the upload is finished -func (session *OcisSession) Cleanup(revertNodeMetadata, cleanBin, cleanInfo bool) { +func (session *DecomposedFsSession) Cleanup(revertNodeMetadata, cleanBin, cleanInfo bool) { ctx := session.Context(context.Background()) if revertNodeMetadata { @@ -370,7 +370,7 @@ func (session *OcisSession) Cleanup(revertNodeMetadata, cleanBin, cleanInfo bool } // URL returns a url to download an upload -func (session *OcisSession) URL(_ context.Context) (string, error) { +func (session *DecomposedFsSession) URL(_ context.Context) (string, error) { type transferClaims struct { jwt.RegisteredClaims Target string `json:"target"` diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/registry/spaces/Readme.md b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/registry/spaces/Readme.md index 4fa3741a5..52e970488 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/registry/spaces/Readme.md +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/registry/spaces/Readme.md @@ -41,7 +41,7 @@ It also carries filters that are sent with a ListStorageSpaces call to a storage * a display name, that is assigned by the owner or managers, eg. project names or 'Phils Home' for personal spaces. They are not unique * an alias that is human readable and unique per user. It is used when listing paths on the CS3 global names as well as oc10 `/webdav` and `/dav/files/{username}` endpoints -5. on the ocis `/dav/spaces/{spaceid}/` endpoint the alias is actually not used because navigation happens by `{spaceid}` +5. on the OpenCloud `/dav/spaces/{spaceid}/` endpoint the alias is actually not used because navigation happens by `{spaceid}` 6. Every user has their own list of path to spaceid mappings, like one config file per user. ## consequences for storage providers diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/chunking/chunking.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/chunking/chunking.go index 97c21e030..80513937a 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/chunking/chunking.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/chunking/chunking.go @@ -160,7 +160,7 @@ func (c *ChunkHandler) saveChunk(path string, r io.ReadCloser) (bool, string, er // there are still some chunks to be uploaded. // we return CodeUploadIsPartial to notify upper layers that the upload is still // not complete and requires more actions. - // This code is needed to notify the owncloud webservice that the upload has not yet been + // This code is needed to notify the OpenCloud webservice that the upload has not yet been // completed and needs to continue uploading chunks. if len(chunks) < chunkInfo.TotalChunks { return false, "", nil diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/errors/errors.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/errors/errors.go deleted file mode 100644 index 696ac13dd..000000000 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/errors/errors.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2018-2022 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package errors - -import ( - "fmt" - - "github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/option" -) - -// AlreadyExistsErr implements the Error interface. -type AlreadyExistsErr struct { - TypeName, Value string - IndexBy option.IndexBy -} - -func (e *AlreadyExistsErr) Error() string { - return fmt.Sprintf("%s with %s=%s does already exist", e.TypeName, e.IndexBy.String(), e.Value) -} - -// IsAlreadyExists implements the IsAlreadyExists interface. -func (e *AlreadyExistsErr) IsAlreadyExists() {} - -// NotFoundErr implements the Error interface. -type NotFoundErr struct { - TypeName, Value string - IndexBy option.IndexBy -} - -func (e *NotFoundErr) Error() string { - return fmt.Sprintf("%s with %s=%s not found", e.TypeName, e.IndexBy.String(), e.Value) -} - -// IsNotFound implements the IsNotFound interface. -func (e *NotFoundErr) IsNotFound() {} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/helper.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/helper.go deleted file mode 100644 index 242f321ee..000000000 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/helper.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2018-2022 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package indexer - -// dedup removes duplicate values in given slice -func dedup(s []string) []string { - var out []string - exists := make(map[string]bool) - for _, ss := range s { - if _, ok := exists[ss]; !ok { - out = append(out, ss) - exists[ss] = true - } - } - return out -} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/index/autoincrement.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/index/autoincrement.go deleted file mode 100644 index 3e0f30f51..000000000 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/index/autoincrement.go +++ /dev/null @@ -1,260 +0,0 @@ -// Copyright 2018-2022 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package index - -import ( - "context" - "os" - "path" - "path/filepath" - "sort" - "strconv" - "strings" - - idxerrs "github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/errors" - "github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/option" - metadata "github.com/opencloud-eu/reva/v2/pkg/storage/utils/metadata" -) - -// Autoincrement are fields for an index of type autoincrement. -type Autoincrement struct { - indexBy option.IndexBy - typeName string - filesDir string - indexBaseDir string - indexRootDir string - - bound *option.Bound - storage metadata.Storage -} - -// NewAutoincrementIndex instantiates a new AutoincrementIndex instance. -func NewAutoincrementIndex(storage metadata.Storage, o ...option.Option) Index { - opts := &option.Options{} - for _, opt := range o { - opt(opts) - } - - u := &Autoincrement{ - storage: storage, - indexBy: opts.IndexBy, - typeName: opts.TypeName, - filesDir: opts.FilesDir, - bound: opts.Bound, - indexBaseDir: path.Join(opts.Prefix, "index."+storage.Backend()), - indexRootDir: path.Join(opts.Prefix, "index."+storage.Backend(), strings.Join([]string{"autoincrement", opts.TypeName, opts.IndexBy.String()}, ".")), - } - - return u -} - -// Init initializes an autoincrement index. -func (idx *Autoincrement) Init() error { - if err := idx.storage.MakeDirIfNotExist(context.Background(), idx.indexBaseDir); err != nil { - return err - } - - return idx.storage.MakeDirIfNotExist(context.Background(), idx.indexRootDir) -} - -// Lookup exact lookup by value. -func (idx *Autoincrement) Lookup(v string) ([]string, error) { - return idx.LookupCtx(context.Background(), v) -} - -// LookupCtx retieves multiple exact values and allows passing in a context -func (idx *Autoincrement) LookupCtx(ctx context.Context, values ...string) ([]string, error) { - var allValues map[string]struct{} - if len(values) != 1 { - // prefetch all values with one request - entries, err := idx.storage.ReadDir(context.Background(), idx.indexRootDir) - if err != nil { - return nil, err - } - // convert known values to set - allValues = make(map[string]struct{}, len(entries)) - for _, e := range entries { - allValues[path.Base(e)] = struct{}{} - } - } - - // convert requested values to set - valueSet := make(map[string]struct{}, len(values)) - for _, v := range values { - valueSet[v] = struct{}{} - } - - var matches = []string{} - for v := range valueSet { - if _, ok := allValues[v]; ok || len(allValues) == 0 { - oldname, err := idx.storage.ResolveSymlink(context.Background(), path.Join(idx.indexRootDir, v)) - if err != nil { - continue - } - matches = append(matches, oldname) - } - } - - if len(matches) == 0 { - var v string - switch len(values) { - case 0: - v = "none" - case 1: - v = values[0] - default: - v = "multiple" - } - return nil, &idxerrs.NotFoundErr{TypeName: idx.typeName, IndexBy: idx.indexBy, Value: v} - } - - return matches, nil -} - -// Add a new value to the index. -func (idx *Autoincrement) Add(id, v string) (string, error) { - var newName string - if v == "" { - next, err := idx.next() - if err != nil { - return "", err - } - newName = path.Join(idx.indexRootDir, strconv.Itoa(next)) - } else { - newName = path.Join(idx.indexRootDir, v) - } - if err := idx.storage.CreateSymlink(context.Background(), id, newName); err != nil { - if os.IsExist(err) { - return "", &idxerrs.AlreadyExistsErr{TypeName: idx.typeName, IndexBy: idx.indexBy, Value: v} - } - - return "", err - } - - return newName, nil -} - -// Remove a value v from an index. -func (idx *Autoincrement) Remove(_ string, v string) error { - if v == "" { - return nil - } - searchPath := path.Join(idx.indexRootDir, v) - _, err := idx.storage.ResolveSymlink(context.Background(), searchPath) - if err != nil { - if os.IsNotExist(err) { - err = &idxerrs.NotFoundErr{TypeName: idx.typeName, IndexBy: idx.indexBy, Value: v} - } - - return err - } - - deletePath := path.Join(idx.indexRootDir, v) - return idx.storage.Delete(context.Background(), deletePath) -} - -// Update index from to . -func (idx *Autoincrement) Update(id, oldV, newV string) error { - if err := idx.Remove(id, oldV); err != nil { - return err - } - - _, err := idx.Add(id, newV) - return err -} - -// Search allows for glob search on the index. -func (idx *Autoincrement) Search(pattern string) ([]string, error) { - paths, err := idx.storage.ReadDir(context.Background(), idx.indexRootDir) - if err != nil { - return nil, err - } - - searchPath := idx.indexRootDir - matches := make([]string, 0) - for _, p := range paths { - if found, err := filepath.Match(pattern, path.Base(p)); found { - if err != nil { - return nil, err - } - - oldPath, err := idx.storage.ResolveSymlink(context.Background(), path.Join(searchPath, path.Base(p))) - if err != nil { - return nil, err - } - matches = append(matches, oldPath) - } - } - - return matches, nil -} - -// CaseInsensitive undocumented. -func (idx *Autoincrement) CaseInsensitive() bool { - return false -} - -// IndexBy undocumented. -func (idx *Autoincrement) IndexBy() option.IndexBy { - return idx.indexBy -} - -// TypeName undocumented. -func (idx *Autoincrement) TypeName() string { - return idx.typeName -} - -// FilesDir undocumented. -func (idx *Autoincrement) FilesDir() string { - return idx.filesDir -} - -func (idx *Autoincrement) next() (int, error) { - paths, err := idx.storage.ReadDir(context.Background(), idx.indexRootDir) - - if err != nil { - return -1, err - } - - if len(paths) == 0 { - return int(idx.bound.Lower), nil - } - - sort.Slice(paths, func(i, j int) bool { - a, _ := strconv.Atoi(path.Base(paths[i])) - b, _ := strconv.Atoi(path.Base(paths[j])) - return a < b - }) - - latest, err := strconv.Atoi(path.Base(paths[len(paths)-1])) // would returning a string be a better interface? - if err != nil { - return -1, err - } - - if int64(latest) < idx.bound.Lower { - return int(idx.bound.Lower), nil - } - - return latest + 1, nil -} - -// Delete deletes the index folder from its storage. -func (idx *Autoincrement) Delete() error { - return idx.storage.Delete(context.Background(), idx.indexRootDir) -} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/index/index.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/index/index.go deleted file mode 100644 index d5085e570..000000000 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/index/index.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2018-2022 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package index - -import ( - "context" - - "github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/option" -) - -// Index can be implemented to create new indexer-strategies. See Unique for example. -// Each indexer implementation is bound to one data-column (IndexBy) and a data-type (TypeName) -type Index interface { - Init() error - Lookup(v string) ([]string, error) - LookupCtx(ctx context.Context, v ...string) ([]string, error) - Add(id, v string) (string, error) - Remove(id string, v string) error - Update(id, oldV, newV string) error - Search(pattern string) ([]string, error) - CaseInsensitive() bool - IndexBy() option.IndexBy - TypeName() string - FilesDir() string - Delete() error // Delete deletes the index folder from its storage. -} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/index/non_unique.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/index/non_unique.go deleted file mode 100644 index 1bf7f9b73..000000000 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/index/non_unique.go +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright 2018-2022 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package index - -import ( - "context" - "os" - "path" - "path/filepath" - "strings" - - idxerrs "github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/errors" - "github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/option" - metadata "github.com/opencloud-eu/reva/v2/pkg/storage/utils/metadata" -) - -// NonUnique are fields for an index of type non_unique. -type NonUnique struct { - caseInsensitive bool - indexBy option.IndexBy - typeName string - filesDir string - indexBaseDir string - indexRootDir string - - storage metadata.Storage -} - -// NewNonUniqueIndexWithOptions instantiates a new NonUniqueIndex instance. -// /tmp/ocis/accounts/index.cs3/Pets/Bro* -// ├── Brown/ -// │ └── rebef-123 -> /tmp/testfiles-395764020/pets/rebef-123 -// ├── Green/ -// │ ├── goefe-789 -> /tmp/testfiles-395764020/pets/goefe-789 -// │ └── xadaf-189 -> /tmp/testfiles-395764020/pets/xadaf-189 -// └── White/ -// | └── wefwe-456 -> /tmp/testfiles-395764020/pets/wefwe-456 -func NewNonUniqueIndexWithOptions(storage metadata.Storage, o ...option.Option) Index { - opts := &option.Options{} - for _, opt := range o { - opt(opts) - } - - return &NonUnique{ - storage: storage, - caseInsensitive: opts.CaseInsensitive, - indexBy: opts.IndexBy, - typeName: opts.TypeName, - filesDir: opts.FilesDir, - indexBaseDir: path.Join(opts.Prefix, "index."+storage.Backend()), - indexRootDir: path.Join(opts.Prefix, "index."+storage.Backend(), strings.Join([]string{"non_unique", opts.TypeName, opts.IndexBy.String()}, ".")), - } -} - -// Init initializes a non_unique index. -func (idx *NonUnique) Init() error { - if err := idx.storage.MakeDirIfNotExist(context.Background(), idx.indexBaseDir); err != nil { - return err - } - - return idx.storage.MakeDirIfNotExist(context.Background(), idx.indexRootDir) -} - -// Lookup exact lookup by value. -func (idx *NonUnique) Lookup(v string) ([]string, error) { - return idx.LookupCtx(context.Background(), v) -} - -// LookupCtx retieves multiple exact values and allows passing in a context -func (idx *NonUnique) LookupCtx(ctx context.Context, values ...string) ([]string, error) { - // prefetch all values with one request - entries, err := idx.storage.ReadDir(context.Background(), idx.indexRootDir) - if err != nil { - return nil, err - } - // convert known values to set - allValues := make(map[string]struct{}, len(entries)) - for _, e := range entries { - allValues[path.Base(e)] = struct{}{} - } - - // convert requested values to set - valueSet := make(map[string]struct{}, len(values)) - if idx.caseInsensitive { - for _, v := range values { - valueSet[strings.ToLower(v)] = struct{}{} - } - } else { - for _, v := range values { - valueSet[v] = struct{}{} - } - } - - var matches = map[string]struct{}{} - for v := range valueSet { - if _, ok := allValues[v]; ok { - children, err := idx.storage.ReadDir(context.Background(), filepath.Join(idx.indexRootDir, v)) - if err != nil { - continue - } - for _, c := range children { - matches[path.Base(c)] = struct{}{} - } - } - } - - if len(matches) == 0 { - var v string - switch len(values) { - case 0: - v = "none" - case 1: - v = values[0] - default: - v = "multiple" - } - return nil, &idxerrs.NotFoundErr{TypeName: idx.typeName, IndexBy: idx.indexBy, Value: v} - } - - ret := make([]string, 0, len(matches)) - for m := range matches { - ret = append(ret, m) - } - return ret, nil -} - -// Add a new value to the index. -func (idx *NonUnique) Add(id, v string) (string, error) { - if v == "" { - return "", nil - } - if idx.caseInsensitive { - v = strings.ToLower(v) - } - - newName := path.Join(idx.indexRootDir, v) - if err := idx.storage.MakeDirIfNotExist(context.Background(), newName); err != nil { - return "", err - } - - if err := idx.storage.CreateSymlink(context.Background(), id, path.Join(newName, id)); err != nil { - if os.IsExist(err) { - return "", &idxerrs.AlreadyExistsErr{TypeName: idx.typeName, IndexBy: idx.indexBy, Value: v} - } - - return "", err - } - - return newName, nil -} - -// Remove a value v from an index. -func (idx *NonUnique) Remove(id string, v string) error { - if v == "" { - return nil - } - if idx.caseInsensitive { - v = strings.ToLower(v) - } - - deletePath := path.Join(idx.indexRootDir, v, id) - err := idx.storage.Delete(context.Background(), deletePath) - if err != nil { - return err - } - - toStat := path.Join(idx.indexRootDir, v) - infos, err := idx.storage.ReadDir(context.Background(), toStat) - if err != nil { - return err - } - - if len(infos) == 0 { - deletePath = path.Join(idx.indexRootDir, v) - err := idx.storage.Delete(context.Background(), deletePath) - if err != nil { - return err - } - } - - return nil -} - -// Update index from to . -func (idx *NonUnique) Update(id, oldV, newV string) error { - if idx.caseInsensitive { - oldV = strings.ToLower(oldV) - newV = strings.ToLower(newV) - } - - if err := idx.Remove(id, oldV); err != nil { - return err - } - - if _, err := idx.Add(id, newV); err != nil { - return err - } - - return nil -} - -// Search allows for glob search on the index. -func (idx *NonUnique) Search(pattern string) ([]string, error) { - if idx.caseInsensitive { - pattern = strings.ToLower(pattern) - } - - foldersMatched := make([]string, 0) - matches := make([]string, 0) - paths, err := idx.storage.ReadDir(context.Background(), idx.indexRootDir) - - if err != nil { - return nil, err - } - - for _, p := range paths { - if found, err := filepath.Match(pattern, path.Base(p)); found { - if err != nil { - return nil, err - } - - foldersMatched = append(foldersMatched, p) - } - } - - for i := range foldersMatched { - paths, _ := idx.storage.ReadDir(context.Background(), foldersMatched[i]) - - for _, p := range paths { - matches = append(matches, path.Base(p)) - } - } - - if len(matches) == 0 { - return nil, &idxerrs.NotFoundErr{TypeName: idx.typeName, IndexBy: idx.indexBy, Value: pattern} - } - - return matches, nil -} - -// CaseInsensitive undocumented. -func (idx *NonUnique) CaseInsensitive() bool { - return idx.caseInsensitive -} - -// IndexBy undocumented. -func (idx *NonUnique) IndexBy() option.IndexBy { - return idx.indexBy -} - -// TypeName undocumented. -func (idx *NonUnique) TypeName() string { - return idx.typeName -} - -// FilesDir undocumented. -func (idx *NonUnique) FilesDir() string { - return idx.filesDir -} - -// Delete deletes the index folder from its storage. -func (idx *NonUnique) Delete() error { - return idx.storage.Delete(context.Background(), idx.indexRootDir) -} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/index/unique.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/index/unique.go deleted file mode 100644 index c67b9d49c..000000000 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/index/unique.go +++ /dev/null @@ -1,253 +0,0 @@ -// Copyright 2018-2022 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package index - -import ( - "context" - "os" - "path" - "path/filepath" - "strings" - - idxerrs "github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/errors" - "github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/option" - metadata "github.com/opencloud-eu/reva/v2/pkg/storage/utils/metadata" -) - -// Unique are fields for an index of type unique. -type Unique struct { - caseInsensitive bool - indexBy option.IndexBy - typeName string - filesDir string - indexBaseDir string - indexRootDir string - - storage metadata.Storage -} - -// NewUniqueIndexWithOptions instantiates a new UniqueIndex instance. Init() should be -// called afterward to ensure correct on-disk structure. -func NewUniqueIndexWithOptions(storage metadata.Storage, o ...option.Option) Index { - opts := &option.Options{} - for _, opt := range o { - opt(opts) - } - - u := &Unique{ - storage: storage, - caseInsensitive: opts.CaseInsensitive, - indexBy: opts.IndexBy, - typeName: opts.TypeName, - filesDir: opts.FilesDir, - indexBaseDir: path.Join(opts.Prefix, "index."+storage.Backend()), - indexRootDir: path.Join(opts.Prefix, "index."+storage.Backend(), strings.Join([]string{"unique", opts.TypeName, opts.IndexBy.String()}, ".")), - } - - return u -} - -// Init initializes a unique index. -func (idx *Unique) Init() error { - if err := idx.storage.MakeDirIfNotExist(context.Background(), idx.indexBaseDir); err != nil { - return err - } - - return idx.storage.MakeDirIfNotExist(context.Background(), idx.indexRootDir) -} - -// Lookup exact lookup by value. -func (idx *Unique) Lookup(v string) ([]string, error) { - return idx.LookupCtx(context.Background(), v) -} - -// LookupCtx retieves multiple exact values and allows passing in a context -func (idx *Unique) LookupCtx(ctx context.Context, values ...string) ([]string, error) { - var allValues map[string]struct{} - if len(values) != 1 { - // prefetch all values with one request - entries, err := idx.storage.ReadDir(context.Background(), idx.indexRootDir) - if err != nil { - return nil, err - } - // convert known values to set - allValues = make(map[string]struct{}, len(entries)) - for _, e := range entries { - allValues[path.Base(e)] = struct{}{} - } - } - - // convert requested values to set - valueSet := make(map[string]struct{}, len(values)) - if idx.caseInsensitive { - for _, v := range values { - valueSet[strings.ToLower(v)] = struct{}{} - } - } else { - for _, v := range values { - valueSet[v] = struct{}{} - } - } - - var matches = make([]string, 0) - for v := range valueSet { - if _, ok := allValues[v]; ok || len(allValues) == 0 { - oldname, err := idx.storage.ResolveSymlink(context.Background(), path.Join(idx.indexRootDir, v)) - if err != nil { - continue - } - matches = append(matches, oldname) - } - } - - if len(matches) == 0 { - var v string - switch len(values) { - case 0: - v = "none" - case 1: - v = values[0] - default: - v = "multiple" - } - return nil, &idxerrs.NotFoundErr{TypeName: idx.typeName, IndexBy: idx.indexBy, Value: v} - } - - return matches, nil -} - -// Add adds a value to the index, returns the path to the root-document -func (idx *Unique) Add(id, v string) (string, error) { - if v == "" { - return "", nil - } - if idx.caseInsensitive { - v = strings.ToLower(v) - } - target := path.Join(idx.filesDir, id) - newName := path.Join(idx.indexRootDir, v) - if err := idx.storage.CreateSymlink(context.Background(), target, newName); err != nil { - if os.IsExist(err) { - return "", &idxerrs.AlreadyExistsErr{TypeName: idx.typeName, IndexBy: idx.indexBy, Value: v} - } - - return "", err - } - - return newName, nil -} - -// Remove a value v from an index. -func (idx *Unique) Remove(_ string, v string) error { - if v == "" { - return nil - } - if idx.caseInsensitive { - v = strings.ToLower(v) - } - searchPath := path.Join(idx.indexRootDir, v) - _, err := idx.storage.ResolveSymlink(context.Background(), searchPath) - if err != nil { - if os.IsNotExist(err) { - err = &idxerrs.NotFoundErr{TypeName: idx.typeName, IndexBy: idx.indexBy, Value: v} - } - - return err - } - - deletePath := path.Join(idx.indexRootDir, v) - return idx.storage.Delete(context.Background(), deletePath) -} - -// Update index from to . -func (idx *Unique) Update(id, oldV, newV string) error { - if idx.caseInsensitive { - oldV = strings.ToLower(oldV) - newV = strings.ToLower(newV) - } - - if err := idx.Remove(id, oldV); err != nil { - return err - } - - if _, err := idx.Add(id, newV); err != nil { - return err - } - - return nil -} - -// Search allows for glob search on the index. -func (idx *Unique) Search(pattern string) ([]string, error) { - if idx.caseInsensitive { - pattern = strings.ToLower(pattern) - } - - paths, err := idx.storage.ReadDir(context.Background(), idx.indexRootDir) - if err != nil { - return nil, err - } - - searchPath := idx.indexRootDir - matches := make([]string, 0) - for _, p := range paths { - if found, err := filepath.Match(pattern, path.Base(p)); found { - if err != nil { - return nil, err - } - - oldPath, err := idx.storage.ResolveSymlink(context.Background(), path.Join(searchPath, path.Base(p))) - if err != nil { - return nil, err - } - matches = append(matches, oldPath) - } - } - - if len(matches) == 0 { - return nil, &idxerrs.NotFoundErr{TypeName: idx.typeName, IndexBy: idx.indexBy, Value: pattern} - } - - return matches, nil -} - -// CaseInsensitive undocumented. -func (idx *Unique) CaseInsensitive() bool { - return idx.caseInsensitive -} - -// IndexBy undocumented. -func (idx *Unique) IndexBy() option.IndexBy { - return idx.indexBy -} - -// TypeName undocumented. -func (idx *Unique) TypeName() string { - return idx.typeName -} - -// FilesDir undocumented. -func (idx *Unique) FilesDir() string { - return idx.filesDir -} - -// Delete deletes the index folder from its storage. -func (idx *Unique) Delete() error { - return idx.storage.Delete(context.Background(), idx.indexRootDir) -} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/indexer.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/indexer.go deleted file mode 100644 index a2e9cba83..000000000 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/indexer.go +++ /dev/null @@ -1,469 +0,0 @@ -// Copyright 2018-2022 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -// Package indexer provides symlink-based indexer for on-disk document-directories. -package indexer - -import ( - "context" - "errors" - "fmt" - "path" - "strings" - - "github.com/CiscoM31/godata" - "github.com/iancoleman/strcase" - - "github.com/opencloud-eu/reva/v2/pkg/errtypes" - "github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/index" - "github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/option" - "github.com/opencloud-eu/reva/v2/pkg/storage/utils/metadata" - "github.com/opencloud-eu/reva/v2/pkg/storage/utils/sync" -) - -// Indexer is a facade to configure and query over multiple indices. -type Indexer interface { - AddIndex(t interface{}, indexBy option.IndexBy, pkName, entityDirName, indexType string, bound *option.Bound, caseInsensitive bool) error - Add(t interface{}) ([]IdxAddResult, error) - FindBy(t interface{}, fields ...Field) ([]string, error) - Delete(t interface{}) error -} - -// Field combines the name and value of an indexed field. -type Field struct { - Name string - Value string -} - -// NewField is a utility function to create a new Field. -func NewField(name, value string) Field { - return Field{Name: name, Value: value} -} - -// StorageIndexer is the indexer implementation using metadata storage -type StorageIndexer struct { - storage metadata.Storage - indices typeMap - mu sync.NamedRWMutex -} - -// IdxAddResult represents the result of an Add call on an index -type IdxAddResult struct { - Field, Value string -} - -// CreateIndexer creates a new Indexer. -func CreateIndexer(storage metadata.Storage) Indexer { - return &StorageIndexer{ - storage: storage, - indices: typeMap{}, - mu: sync.NewNamedRWMutex(), - } -} - -// Reset takes care of deleting all indices from storage and from the internal map of indices -func (i *StorageIndexer) Reset() error { - for j := range i.indices { - for _, indices := range i.indices[j].IndicesByField { - for _, idx := range indices { - err := idx.Delete() - if err != nil { - return err - } - } - } - delete(i.indices, j) - } - - return nil -} - -// AddIndex adds a new index to the indexer receiver. -func (i *StorageIndexer) AddIndex(t interface{}, indexBy option.IndexBy, pkName, entityDirName, indexType string, bound *option.Bound, caseInsensitive bool) error { - var idx index.Index - - var f func(metadata.Storage, ...option.Option) index.Index - switch indexType { - case "unique": - f = index.NewUniqueIndexWithOptions - case "non_unique": - f = index.NewNonUniqueIndexWithOptions - case "autoincrement": - f = index.NewAutoincrementIndex - default: - return fmt.Errorf("invalid index type: %s", indexType) - } - idx = f( - i.storage, - option.CaseInsensitive(caseInsensitive), - option.WithBounds(bound), - option.WithIndexBy(indexBy), - option.WithTypeName(getTypeFQN(t)), - ) - - i.indices.addIndex(getTypeFQN(t), pkName, idx) - return idx.Init() -} - -// Add a new entry to the indexer -func (i *StorageIndexer) Add(t interface{}) ([]IdxAddResult, error) { - typeName := getTypeFQN(t) - - i.mu.Lock(typeName) - defer i.mu.Unlock(typeName) - - var results []IdxAddResult - if fields, ok := i.indices[typeName]; ok { - for _, indices := range fields.IndicesByField { - for _, idx := range indices { - pkVal, err := valueOf(t, option.IndexByField(fields.PKFieldName)) - if err != nil { - return []IdxAddResult{}, err - } - idxByVal, err := valueOf(t, idx.IndexBy()) - if err != nil { - return []IdxAddResult{}, err - } - value, err := idx.Add(pkVal, idxByVal) - if err != nil { - return []IdxAddResult{}, err - } - if value == "" { - continue - } - results = append(results, IdxAddResult{Field: idx.IndexBy().String(), Value: value}) - } - } - } - - return results, nil -} - -// FindBy finds a value on an index by fields. -// If multiple fields are given then they are handled like an or condition. -func (i *StorageIndexer) FindBy(t interface{}, queryFields ...Field) ([]string, error) { - typeName := getTypeFQN(t) - - i.mu.RLock(typeName) - defer i.mu.RUnlock(typeName) - - resultPaths := make(map[string]struct{}) - if fields, ok := i.indices[typeName]; ok { - for fieldName, queryFields := range groupFieldsByName(queryFields) { - idxes := fields.IndicesByField[strcase.ToCamel(fieldName)] - values := make([]string, 0, len(queryFields)) - for _, f := range queryFields { - values = append(values, f.Value) - } - for _, idx := range idxes { - res, err := idx.LookupCtx(context.Background(), values...) - if err != nil { - if _, ok := err.(errtypes.IsNotFound); ok { - continue - } - - if err != nil { - return nil, err - } - } - for _, r := range res { - resultPaths[path.Base(r)] = struct{}{} - } - } - } - } - - result := make([]string, 0, len(resultPaths)) - for p := range resultPaths { - result = append(result, path.Base(p)) - } - - return result, nil -} - -// groupFieldsByName groups the given filters and returns a map using the filter type as the key. -func groupFieldsByName(queryFields []Field) map[string][]Field { - grouped := make(map[string][]Field) - for _, f := range queryFields { - grouped[f.Name] = append(grouped[f.Name], f) - } - return grouped -} - -// Delete deletes all indexed fields of a given type t on the Indexer. -func (i *StorageIndexer) Delete(t interface{}) error { - typeName := getTypeFQN(t) - - i.mu.Lock(typeName) - defer i.mu.Unlock(typeName) - - if fields, ok := i.indices[typeName]; ok { - for _, indices := range fields.IndicesByField { - for _, idx := range indices { - pkVal, err := valueOf(t, option.IndexByField(fields.PKFieldName)) - if err != nil { - return err - } - idxByVal, err := valueOf(t, idx.IndexBy()) - if err != nil { - return err - } - if err := idx.Remove(pkVal, idxByVal); err != nil { - return err - } - } - } - } - - return nil -} - -// FindByPartial allows for glob search across all indexes. -func (i *StorageIndexer) FindByPartial(t interface{}, field string, pattern string) ([]string, error) { - typeName := getTypeFQN(t) - - i.mu.RLock(typeName) - defer i.mu.RUnlock(typeName) - - resultPaths := make([]string, 0) - if fields, ok := i.indices[typeName]; ok { - for _, idx := range fields.IndicesByField[strcase.ToCamel(field)] { - res, err := idx.Search(pattern) - if err != nil { - if _, ok := err.(errtypes.IsNotFound); ok { - continue - } - - if err != nil { - return nil, err - } - } - - resultPaths = append(resultPaths, res...) - - } - } - - result := make([]string, 0, len(resultPaths)) - for _, v := range resultPaths { - result = append(result, path.Base(v)) - } - - return result, nil - -} - -// Update updates all indexes on a value to a value . -func (i *StorageIndexer) Update(from, to interface{}) error { - typeNameFrom := getTypeFQN(from) - - i.mu.Lock(typeNameFrom) - defer i.mu.Unlock(typeNameFrom) - - if typeNameTo := getTypeFQN(to); typeNameFrom != typeNameTo { - return fmt.Errorf("update types do not match: from %v to %v", typeNameFrom, typeNameTo) - } - - if fields, ok := i.indices[typeNameFrom]; ok { - for fName, indices := range fields.IndicesByField { - oldV, err := valueOf(from, option.IndexByField(fName)) - if err != nil { - return err - } - newV, err := valueOf(to, option.IndexByField(fName)) - if err != nil { - return err - } - pkVal, err := valueOf(from, option.IndexByField(fields.PKFieldName)) - if err != nil { - return err - } - for _, idx := range indices { - if oldV == newV { - continue - } - if oldV == "" { - if _, err := idx.Add(pkVal, newV); err != nil { - return err - } - continue - } - if newV == "" { - if err := idx.Remove(pkVal, oldV); err != nil { - return err - } - continue - } - if err := idx.Update(pkVal, oldV, newV); err != nil { - return err - } - } - } - } - - return nil -} - -// Query parses an OData query into something our indexer.Index understands and resolves it. -func (i *StorageIndexer) Query(ctx context.Context, t interface{}, q string) ([]string, error) { - query, err := godata.ParseFilterString(ctx, q) - if err != nil { - return nil, err - } - - tree := newQueryTree() - if err := buildTreeFromOdataQuery(query.Tree, &tree); err != nil { - return nil, err - } - - results := make([]string, 0) - if err := i.resolveTree(t, &tree, &results); err != nil { - return nil, err - } - - return results, nil -} - -// t is used to infer the indexed field names. When building an index search query, field names have to respect Golang -// conventions and be in PascalCase. For a better overview on this contemplate reading the reflection package under the -// indexer directory. Traversal of the tree happens in a pre-order fashion. -// TODO implement logic for `and` operators. -func (i *StorageIndexer) resolveTree(t interface{}, tree *queryTree, partials *[]string) error { - if partials == nil { - return errors.New("return value cannot be nil: partials") - } - - if tree.left != nil { - _ = i.resolveTree(t, tree.left, partials) - } - - if tree.right != nil { - _ = i.resolveTree(t, tree.right, partials) - } - - // by the time we're here we reached a leaf node. - if tree.token != nil { - switch tree.token.filterType { - case "FindBy": - operand, err := sanitizeInput(tree.token.operands) - if err != nil { - return err - } - - field := Field{Name: operand.field, Value: operand.value} - r, err := i.FindBy(t, field) - if err != nil { - return err - } - - *partials = append(*partials, r...) - case "FindByPartial": - operand, err := sanitizeInput(tree.token.operands) - if err != nil { - return err - } - - r, err := i.FindByPartial(t, operand.field, fmt.Sprintf("%v*", operand.value)) - if err != nil { - return err - } - - *partials = append(*partials, r...) - default: - return fmt.Errorf("unsupported filter: %v", tree.token.filterType) - } - } - - *partials = dedup(*partials) - return nil -} - -type indexerTuple struct { - field, value string -} - -// sanitizeInput returns a tuple of fieldName + value to be applied on indexer.Index filters. -func sanitizeInput(operands []string) (*indexerTuple, error) { - if len(operands) != 2 { - return nil, fmt.Errorf("invalid number of operands for filter function: got %v expected 2", len(operands)) - } - - // field names are Go public types and by design they are in PascalCase, therefore we need to adhere to this rules. - // for further information on this have a look at the reflection package. - f := strcase.ToCamel(operands[0]) - - // remove single quotes from value. - v := strings.ReplaceAll(operands[1], "'", "") - return &indexerTuple{ - field: f, - value: v, - }, nil -} - -// buildTreeFromOdataQuery builds an indexer.queryTree out of a GOData ParseNode. The purpose of this intermediate tree -// is to transform godata operators and functions into supported operations on our index. At the time of this writing -// we only support `FindBy` and `FindByPartial` queries as these are the only implemented filters on indexer.Index(es). -func buildTreeFromOdataQuery(root *godata.ParseNode, tree *queryTree) error { - if root.Token.Type == godata.ExpressionTokenFunc { // i.e "startswith", "contains" - switch root.Token.Value { - case "startswith": - token := token{ - operator: root.Token.Value, - filterType: "FindByPartial", - // TODO sanitize the number of operands it the expected one. - operands: []string{ - root.Children[0].Token.Value, // field name, i.e: Name - root.Children[1].Token.Value, // field value, i.e: Jac - }, - } - - tree.insert(&token) - default: - return errors.New("operation not supported") - } - } - - if root.Token.Type == godata.ExpressionTokenLogical { - switch root.Token.Value { - case "or": - tree.insert(&token{operator: root.Token.Value}) - for _, child := range root.Children { - if err := buildTreeFromOdataQuery(child, tree.left); err != nil { - return err - } - } - case "eq": - tree.insert(&token{ - operator: root.Token.Value, - filterType: "FindBy", - operands: []string{ - root.Children[0].Token.Value, - root.Children[1].Token.Value, - }, - }) - for _, child := range root.Children { - if err := buildTreeFromOdataQuery(child, tree.left); err != nil { - return err - } - } - default: - return errors.New("operator not supported") - } - } - return nil -} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/map.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/map.go deleted file mode 100644 index 546759c46..000000000 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/map.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2018-2022 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package indexer - -import ( - "github.com/iancoleman/strcase" - "github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/index" -) - -// typeMap stores the indexer layout at runtime. - -type typeMap map[tName]typeMapping -type tName = string -type fieldName = string - -type typeMapping struct { - PKFieldName string - IndicesByField map[fieldName][]index.Index -} - -func (m typeMap) addIndex(typeName string, pkName string, idx index.Index) { - if val, ok := m[typeName]; ok { - val.IndicesByField[strcase.ToCamel(idx.IndexBy().String())] = append(val.IndicesByField[strcase.ToCamel(idx.IndexBy().String())], idx) - return - } - m[typeName] = typeMapping{ - PKFieldName: pkName, - IndicesByField: map[string][]index.Index{ - strcase.ToCamel(idx.IndexBy().String()): {idx}, - }, - } -} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/option/option.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/option/option.go deleted file mode 100644 index d609e8049..000000000 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/option/option.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2018-2022 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package option - -import ( - "github.com/opencloud-eu/reva/v2/pkg/storage/utils/metadata" -) - -// Option defines a single option function. -type Option func(o *Options) - -// IndexBy defines how the data is being indexed -type IndexBy interface { - String() string -} - -// IndexByField represents the field that's being used to index the data by -type IndexByField string - -// String returns a string representation -func (ibf IndexByField) String() string { - return string(ibf) -} - -// IndexByFunc represents a function that's being used to index the data by -type IndexByFunc struct { - Name string - Func func(v interface{}) (string, error) -} - -// String returns a string representation -func (ibf IndexByFunc) String() string { - return ibf.Name -} - -// Bound represents a lower and upper bound range for an index. -// todo: if we would like to provide an upper bound then we would need to deal with ranges, in which case this is why the -// upper bound attribute is here. -type Bound struct { - Lower, Upper int64 -} - -// Options defines the available options for this package. -type Options struct { - CaseInsensitive bool - Bound *Bound - - TypeName string - IndexBy IndexBy - FilesDir string - Prefix string - - Storage metadata.Storage -} - -// CaseInsensitive sets the CaseInsensitive field. -func CaseInsensitive(val bool) Option { - return func(o *Options) { - o.CaseInsensitive = val - } -} - -// WithBounds sets the Bounds field. -func WithBounds(val *Bound) Option { - return func(o *Options) { - o.Bound = val - } -} - -// WithTypeName sets the TypeName option. -func WithTypeName(val string) Option { - return func(o *Options) { - o.TypeName = val - } -} - -// WithIndexBy sets the option IndexBy. -func WithIndexBy(val IndexBy) Option { - return func(o *Options) { - o.IndexBy = val - } -} - -// WithFilesDir sets the option FilesDir. -func WithFilesDir(val string) Option { - return func(o *Options) { - o.FilesDir = val - } -} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/query_tree.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/query_tree.go deleted file mode 100644 index 037e9b825..000000000 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/query_tree.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2018-2022 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package indexer - -type queryTree struct { - token *token - root bool - left *queryTree - right *queryTree -} - -// token to be resolved by the index -type token struct { - operator string // original OData operator. i.e: 'startswith', `or`, `and`. - filterType string // equivalent operator from OData -> indexer i.e FindByPartial or FindBy. - operands []string -} - -// newQueryTree constructs a new tree with a root node. -func newQueryTree() queryTree { - return queryTree{ - root: true, - } -} - -// insert populates first the LHS of the tree first, if this is not possible it fills the RHS. -func (t *queryTree) insert(tkn *token) { - if t != nil && t.root { - t.left = &queryTree{token: tkn} - return - } - - if t.left == nil { - t.left = &queryTree{token: tkn} - return - } - - if t.left != nil && t.right == nil { - t.right = &queryTree{token: tkn} - return - } -} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/reflect.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/reflect.go deleted file mode 100644 index 03f2a0fab..000000000 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/reflect.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2018-2022 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package indexer - -import ( - "errors" - "fmt" - "path" - "reflect" - "strconv" - "strings" - - "github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/option" -) - -func getType(v interface{}) (reflect.Value, error) { - rv := reflect.ValueOf(v) - for rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Interface { - rv = rv.Elem() - } - if !rv.IsValid() { - return reflect.Value{}, errors.New("failed to read value via reflection") - } - - return rv, nil -} - -func getTypeFQN(t interface{}) string { - typ, _ := getType(t) - typeName := path.Join(typ.Type().PkgPath(), typ.Type().Name()) - typeName = strings.ReplaceAll(typeName, "/", ".") - return typeName -} - -func valueOf(v interface{}, indexBy option.IndexBy) (string, error) { - switch idxBy := indexBy.(type) { - case option.IndexByField: - return valueOfField(v, string(idxBy)) - case option.IndexByFunc: - return idxBy.Func(v) - default: - return "", fmt.Errorf("unknown indexBy type") - } -} - -func valueOfField(v interface{}, field string) (string, error) { - parts := strings.Split(field, ".") - for i, part := range parts { - r := reflect.ValueOf(v) - if r.Kind() == reflect.Ptr { - r = r.Elem() - } - f := reflect.Indirect(r).FieldByName(part) - if f.Kind() == reflect.Ptr { - f = f.Elem() - } - - switch { - case f.Kind() == reflect.Struct && i != len(parts)-1: - v = f.Interface() - case f.Kind() == reflect.String: - return f.String(), nil - case f.IsZero(): - return "", nil - default: - return strconv.Itoa(int(f.Int())), nil - } - } - return "", nil -} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/localfs/upload.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/localfs/upload.go index 1e482e034..6e625794a 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/localfs/upload.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/localfs/upload.go @@ -312,7 +312,7 @@ func (upload *fileUpload) WriteChunk(ctx context.Context, offset int64, src io.R // If the HTTP PATCH request gets interrupted in the middle (e.g. because // the user wants to pause the upload), Go's net/http returns an io.ErrUnexpectedEOF. - // However, for OwnCloudStore it's not important whether the stream has ended + // However, for the driver it's not important whether the stream has ended // on purpose or accidentally. if err != nil { if err != io.ErrUnexpectedEOF { diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/sync/mutex.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/sync/mutex.go deleted file mode 100644 index f9ce47228..000000000 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/sync/mutex.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2018-2022 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package sync - -import ( - "sync" -) - -// NamedRWMutex works the same as RWMutex, the only difference is that it stores mutexes in a map and reuses them. -// It's handy if you want to write-lock, write-unlock, read-lock and read-unlock for specific names only. -type NamedRWMutex struct { - pool sync.Pool - mus sync.Map -} - -// NewNamedRWMutex returns a new instance of NamedRWMutex. -func NewNamedRWMutex() NamedRWMutex { - return NamedRWMutex{pool: sync.Pool{New: func() interface{} { - return new(sync.RWMutex) - }}} -} - -// Lock locks rw for writing. -func (m *NamedRWMutex) Lock(name string) { - m.loadOrStore(name).Lock() -} - -// Unlock unlocks rw for writing. -func (m *NamedRWMutex) Unlock(name string) { - m.loadOrStore(name).Unlock() -} - -// RLock locks rw for reading. -func (m *NamedRWMutex) RLock(name string) { - m.loadOrStore(name).RLock() -} - -// RUnlock undoes a single RLock call. -func (m *NamedRWMutex) RUnlock(name string) { - m.loadOrStore(name).RUnlock() -} - -func (m *NamedRWMutex) loadOrStore(name string) *sync.RWMutex { - pmu := m.pool.Get() - mmu, loaded := m.mus.LoadOrStore(name, pmu) - if loaded { - m.pool.Put(pmu) - } - - return mmu.(*sync.RWMutex) -} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/sync/sync.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/sync/sync.go deleted file mode 100644 index 83a87b953..000000000 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/utils/sync/sync.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018-2022 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package sync - -import "sync" - -var ( - // ParsingViperConfig addresses the fact that config parsing using Viper is not thread safe. - ParsingViperConfig sync.Mutex -) diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/store/store.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/store/store.go index 52f8d8682..ebd2936a2 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/store/store.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/store/store.go @@ -135,7 +135,7 @@ func Create(opts ...microstore.Option) microstore.Store { append(opts, natsjs.NatsOptions(natsOptions), // always pass in properly initialized default nats options natsjs.DefaultTTL(ttl))..., - ) // TODO test with ocis nats + ) // TODO test with OpenCloud nats case TypeNatsJSKV: // NOTE: nats needs a DefaultTTL option as it does not support per Write TTL ... ttl, _ := options.Context.Value(ttlContextKey{}).(time.Duration) diff --git a/vendor/github.com/pablodz/inotifywaitgo/inotifywaitgo/check.go b/vendor/github.com/pablodz/inotifywaitgo/inotifywaitgo/check.go index 0580ef3ec..8c9f692ef 100644 --- a/vendor/github.com/pablodz/inotifywaitgo/inotifywaitgo/check.go +++ b/vendor/github.com/pablodz/inotifywaitgo/inotifywaitgo/check.go @@ -1,28 +1,17 @@ package inotifywaitgo import ( - "bufio" "os/exec" ) -// Function to checkDependencies if inotifywait is installed +// CheckDependencies verifies if inotifywait is installed. func checkDependencies() (bool, error) { - cmd := exec.Command("bash", "-c", "which inotifywait") - stdout, err := cmd.StdoutPipe() + path, err := exec.LookPath("inotifywait") if err != nil { return false, err } - if err := cmd.Start(); err != nil { - return false, err - } - - // Read the output of inotifywait and split it into lines - scanner := bufio.NewScanner(stdout) - for scanner.Scan() { - line := scanner.Text() - if line != "" { - return true, nil - } + if path != "" { + return true, nil } return false, nil } diff --git a/vendor/github.com/pablodz/inotifywaitgo/inotifywaitgo/command.go b/vendor/github.com/pablodz/inotifywaitgo/inotifywaitgo/command.go index 0729762da..40944d475 100644 --- a/vendor/github.com/pablodz/inotifywaitgo/inotifywaitgo/command.go +++ b/vendor/github.com/pablodz/inotifywaitgo/inotifywaitgo/command.go @@ -6,7 +6,7 @@ import ( "strings" ) -func GenerateBashCommands(s *Settings) ([]string, error) { +func GenerateShellCommands(s *Settings) ([]string, error) { if s.Options == nil { return nil, errors.New(OPT_NIL) } @@ -34,27 +34,27 @@ func GenerateBashCommands(s *Settings) ([]string, error) { if !Contains(VALID_EVENTS, int(event)) { return nil, errors.New(INVALID_EVENT) } - baseCmd = append(baseCmd, "-e ", EVENT_MAP[int(event)]) + baseCmd = append(baseCmd, "-e", EVENT_MAP[int(event)]) } } baseCmd = append(baseCmd, s.Dir) - // remove spaces on all elements + // Trim spaces on all elements var outCmd []string for _, v := range baseCmd { outCmd = append(outCmd, strings.TrimSpace(v)) } if s.Verbose { - fmt.Println("baseCmd:", outCmd) + fmt.Println("Generated command:", strings.Join(outCmd, " ")) } return outCmd, nil } // Contains checks if a slice contains an item -func Contains[T string | int](slice []T, item T) bool { +func Contains[T comparable](slice []T, item T) bool { for _, v := range slice { if v == item { return true diff --git a/vendor/github.com/pablodz/inotifywaitgo/inotifywaitgo/killer.go b/vendor/github.com/pablodz/inotifywaitgo/inotifywaitgo/killer.go index d81ff0eae..a135f9df2 100644 --- a/vendor/github.com/pablodz/inotifywaitgo/inotifywaitgo/killer.go +++ b/vendor/github.com/pablodz/inotifywaitgo/inotifywaitgo/killer.go @@ -3,6 +3,6 @@ package inotifywaitgo import "os/exec" func killOthers() error { - cmd := exec.Command("bash", "-c", "pkill inotifywait").Run() - return cmd + cmd := exec.Command("pkill", "inotifywait") + return cmd.Run() } diff --git a/vendor/github.com/pablodz/inotifywaitgo/inotifywaitgo/watcher.go b/vendor/github.com/pablodz/inotifywaitgo/inotifywaitgo/watcher.go index 652933196..c77c30d17 100644 --- a/vendor/github.com/pablodz/inotifywaitgo/inotifywaitgo/watcher.go +++ b/vendor/github.com/pablodz/inotifywaitgo/inotifywaitgo/watcher.go @@ -10,18 +10,16 @@ import ( "strings" ) -// Function that starts watching a path for new files and returns the file name (abspath) when a new file is finished writing +// WatchPath starts watching a path for new files and returns the file name (abspath) when a new file is finished writing. func WatchPath(s *Settings) { // Check if inotifywait is installed - ok, err := checkDependencies() - if !ok || err != nil { + if ok, err := checkDependencies(); !ok || err != nil { s.ErrorChan <- fmt.Errorf(NOT_INSTALLED) return } - // check if dir exists - _, err = os.Stat(s.Dir) - if os.IsNotExist(err) { + // Check if the directory exists + if _, err := os.Stat(s.Dir); os.IsNotExist(err) { s.ErrorChan <- fmt.Errorf(DIR_NOT_EXISTS) return } @@ -31,8 +29,8 @@ func WatchPath(s *Settings) { killOthers() } - // Generate bash command - cmdString, err := GenerateBashCommands(s) + // Generate shell command + cmdString, err := GenerateShellCommands(s) if err != nil { s.ErrorChan <- err return @@ -45,6 +43,7 @@ func WatchPath(s *Settings) { s.ErrorChan <- err return } + if err := cmd.Start(); err != nil { s.ErrorChan <- err return @@ -53,52 +52,69 @@ func WatchPath(s *Settings) { // Read the output of inotifywait and split it into lines scanner := bufio.NewScanner(stdout) for scanner.Scan() { - log.Println(scanner.Text()) line := scanner.Text() + log.Println(line) - r := csv.NewReader(strings.NewReader(line)) - - parts, err := r.Read() + parts, err := parseLine(line) if err != nil || len(parts) < 2 { s.ErrorChan <- fmt.Errorf(INVALID_OUTPUT) continue } - // Extract the input file name from the inotifywait output - prefix := parts[0] - file := parts[2] + prefix, file := parts[0], parts[2] + eventStrs := strings.Split(parts[1], ",") - eventsStr := strings.Split(parts[1], ",") if s.Verbose { - for _, eventStr := range eventsStr { + for _, eventStr := range eventStrs { log.Printf("eventStr: <%s>, <%s>", eventStr, line) } } - var eventsEvents []EVENT - isDir := false - for _, eventStr := range eventsStr { - if eventStr == FlagIsdir { - isDir = true - continue - } - - eventStr = strings.ToLower(eventStr) - event, ok := EVENT_MAP_REVERSE[eventStr] - if !ok { - s.ErrorChan <- fmt.Errorf("invalid eventStr: <%s>, <%s>", eventStr, line) - continue - } - eventsEvents = append(eventsEvents, EVENT(event)) + events, isDir := parseEvents(eventStrs, line, s) + if events == nil { + continue } event := FileEvent{ Filename: prefix + file, - Events: eventsEvents, + Events: events, IsDir: isDir, } // Send the file name to the channel s.FileEvents <- event } + + if err := scanner.Err(); err != nil { + s.ErrorChan <- err + } +} + +// parseLine parses a line of inotifywait output. +func parseLine(line string) ([]string, error) { + r := csv.NewReader(strings.NewReader(line)) + return r.Read() +} + +// parseEvents parses event strings into EVENT types. +func parseEvents(eventStrs []string, line string, s *Settings) ([]EVENT, bool) { + var events []EVENT + isDir := false + + for _, eventStr := range eventStrs { + if eventStr == FlagIsdir { + isDir = true + continue + } + + eventStr = strings.ToLower(eventStr) + event, ok := EVENT_MAP_REVERSE[eventStr] + if !ok { + s.ErrorChan <- fmt.Errorf("invalid eventStr: <%s>, <%s>", eventStr, line) + return nil, false + } + events = append(events, EVENT(event)) + } + + return events, isDir } diff --git a/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm b/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm index 99761296f..4230fba01 100644 --- a/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm +++ b/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm @@ -1,4 +1,4 @@ -FROM golang:1.20@sha256:2edf6aab2d57644f3fe7407132a0d1770846867465a39c2083770cf62734b05d +FROM golang:1.23@sha256:51a6466e8dbf3e00e422eb0f7a97ac450b2d57b33617bbe8d2ee0bddcd9d0d37 ENV GOOS=linux ENV GOARCH=arm @@ -10,7 +10,6 @@ ENV PKG_CONFIG_PATH=/usr/lib/arm-linux-gnueabihf/pkgconfig RUN dpkg --add-architecture armhf \ && apt update \ && apt install -y --no-install-recommends \ - upx \ gcc-arm-linux-gnueabihf \ libc6-dev-armhf-cross \ pkg-config \ diff --git a/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm64 b/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm64 index 66bd09474..59928252a 100644 --- a/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm64 +++ b/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm64 @@ -1,4 +1,4 @@ -FROM golang:1.20@sha256:2edf6aab2d57644f3fe7407132a0d1770846867465a39c2083770cf62734b05d +FROM golang:1.23@sha256:51a6466e8dbf3e00e422eb0f7a97ac450b2d57b33617bbe8d2ee0bddcd9d0d37 ENV GOOS=linux ENV GOARCH=arm64 diff --git a/vendor/github.com/pjbgf/sha1cd/LICENSE b/vendor/github.com/pjbgf/sha1cd/LICENSE index 261eeb9e9..c8ff622ff 100644 --- a/vendor/github.com/pjbgf/sha1cd/LICENSE +++ b/vendor/github.com/pjbgf/sha1cd/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2023 pjbgf Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/pjbgf/sha1cd/Makefile b/vendor/github.com/pjbgf/sha1cd/Makefile index b24f2cbad..278a109d8 100644 --- a/vendor/github.com/pjbgf/sha1cd/Makefile +++ b/vendor/github.com/pjbgf/sha1cd/Makefile @@ -32,8 +32,7 @@ build-nocgo: cross-build: build-arm build-arm64 build-nocgo generate: - go run sha1cdblock_amd64_asm.go -out sha1cdblock_amd64.s - sed -i 's;&\samd64;&\n// +build !noasm,gc,amd64;g' sha1cdblock_amd64.s + go generate -x ./... verify: generate git diff --exit-code diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cd.go b/vendor/github.com/pjbgf/sha1cd/sha1cd.go index a69e480ee..509569f66 100644 --- a/vendor/github.com/pjbgf/sha1cd/sha1cd.go +++ b/vendor/github.com/pjbgf/sha1cd/sha1cd.go @@ -20,6 +20,8 @@ import ( shared "github.com/pjbgf/sha1cd/internal" ) +//go:generate go run -C asm . -out ../sha1cdblock_amd64.s -pkg $GOPACKAGE + func init() { crypto.RegisterHash(crypto.SHA1, New) } diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.s b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.s index 86f9821ca..e5e213a52 100644 --- a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.s +++ b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.s @@ -1,7 +1,6 @@ -// Code generated by command: go run sha1cdblock_amd64_asm.go -out sha1cdblock_amd64.s. DO NOT EDIT. +// Code generated by command: go run asm.go -out ../sha1cdblock_amd64.s -pkg sha1cd. DO NOT EDIT. //go:build !noasm && gc && amd64 -// +build !noasm,gc,amd64 #include "textflag.h" diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/doc.go b/vendor/github.com/pjbgf/sha1cd/ubc/ubc.go similarity index 68% rename from vendor/github.com/pjbgf/sha1cd/ubc/doc.go rename to vendor/github.com/pjbgf/sha1cd/ubc/ubc.go index 0090e36b9..b0b4d76e3 100644 --- a/vendor/github.com/pjbgf/sha1cd/ubc/doc.go +++ b/vendor/github.com/pjbgf/sha1cd/ubc/ubc.go @@ -1,3 +1,5 @@ // ubc package provides ways for SHA1 blocks to be checked for // Unavoidable Bit Conditions that arise from crypto analysis attacks. package ubc + +//go:generate go run -C asm . -out ../ubc_amd64.s -pkg $GOPACKAGE diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/ubc_amd64.go b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_amd64.go new file mode 100644 index 000000000..09159bb5b --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_amd64.go @@ -0,0 +1,14 @@ +//go:build !noasm && gc && amd64 +// +build !noasm,gc,amd64 + +package ubc + +func CalculateDvMaskAMD64(W [80]uint32) uint32 + +// Check takes as input an expanded message block and verifies the unavoidable bitconditions +// for all listed DVs. It returns a dvmask where each bit belonging to a DV is set if all +// unavoidable bitconditions for that DV have been met. +// Thus, one needs to do the recompression check for each DV that has its bit set. +func CalculateDvMask(W [80]uint32) uint32 { + return CalculateDvMaskAMD64(W) +} diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/ubc_amd64.s b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_amd64.s new file mode 100644 index 000000000..c77ea77ec --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_amd64.s @@ -0,0 +1,1897 @@ +// Code generated by command: go run asm.go -out ../ubc_amd64.s -pkg ubc. DO NOT EDIT. + +//go:build !noasm && gc && amd64 + +#include "textflag.h" + +// func CalculateDvMaskAMD64(W [80]uint32) uint32 +TEXT ·CalculateDvMaskAMD64(SB), NOSPLIT, $0-324 + MOVL $0xffffffff, AX + + // (((((W[44] ^ W[45]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_I_51_0_bit | DV_I_52_0_bit | DV_II_45_0_bit | DV_II_46_0_bit | DV_II_50_0_bit | DV_II_51_0_bit)) + MOVL W_44+176(FP), CX + MOVL W_45+180(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xfd7c5f7f, CX + ANDL CX, AX + + // mask &= (((((W[49] ^ W[50]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_II_45_0_bit | DV_II_50_0_bit | DV_II_51_0_bit | DV_II_55_0_bit | DV_II_56_0_bit)) + MOVL W_49+196(FP), CX + MOVL W_50+200(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0x3d7efff7, CX + ANDL CX, AX + + // mask &= (((((W[48] ^ W[49]) >> 29) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_52_0_bit | DV_II_49_0_bit | DV_II_50_0_bit | DV_II_54_0_bit | DV_II_55_0_bit)) + MOVL W_48+192(FP), CX + MOVL W_49+196(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0x9f5f7ffb, CX + ANDL CX, AX + + // mask &= ((((W[47] ^ (W[50] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) + MOVL W_47+188(FP), CX + MOVL W_50+200(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0x7dfedddf, CX + ANDL CX, AX + + // mask &= (((((W[47] ^ W[48]) >> 29) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_51_0_bit | DV_II_48_0_bit | DV_II_49_0_bit | DV_II_53_0_bit | DV_II_54_0_bit)) + MOVL W_47+188(FP), CX + MOVL W_48+192(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xcfcfdffd, CX + ANDL CX, AX + + // mask &= (((((W[46] >> 4) ^ (W[49] >> 29)) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit | DV_II_50_0_bit | DV_II_55_0_bit)) + MOVL W_46+184(FP), CX + SHRL $0x04, CX + MOVL W_49+196(FP), DX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xbf7f7777, CX + ANDL CX, AX + + // mask &= (((((W[46] ^ W[47]) >> 29) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_50_0_bit | DV_II_47_0_bit | DV_II_48_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) + MOVL W_46+184(FP), CX + MOVL W_47+188(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xe7e7f7fe, CX + ANDL CX, AX + + // mask &= (((((W[45] >> 4) ^ (W[48] >> 29)) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit | DV_II_49_0_bit | DV_II_54_0_bit)) + MOVL W_45+180(FP), CX + SHRL $0x04, CX + MOVL W_48+192(FP), DX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xdfdfdddb, CX + ANDL CX, AX + + // mask &= (((((W[45] ^ W[46]) >> 29) & 1) - 1) | ^(DV_I_49_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_47_0_bit | DV_II_51_0_bit | DV_II_52_0_bit)) + MOVL W_45+180(FP), CX + MOVL W_46+184(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xf5f57dff, CX + ANDL CX, AX + + // mask &= (((((W[44] >> 4) ^ (W[47] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit | DV_II_48_0_bit | DV_II_53_0_bit)) + MOVL W_44+176(FP), CX + SHRL $0x04, CX + MOVL W_47+188(FP), DX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xefeff775, CX + ANDL CX, AX + + // mask &= (((((W[43] >> 4) ^ (W[46] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit | DV_II_47_0_bit | DV_II_52_0_bit)) + MOVL W_43+172(FP), CX + SHRL $0x04, CX + MOVL W_46+184(FP), DX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xf7f7fdda, CX + ANDL CX, AX + + // mask &= (((((W[43] ^ W[44]) >> 29) & 1) - 1) | ^(DV_I_47_0_bit | DV_I_50_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_49_0_bit | DV_II_50_0_bit)) + MOVL W_43+172(FP), CX + MOVL W_44+176(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xff5ed7df, CX + ANDL CX, AX + + // mask &= (((((W[42] >> 4) ^ (W[45] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_51_0_bit)) + MOVL W_42+168(FP), CX + SHRL $0x04, CX + MOVL W_45+180(FP), DX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xfdfd7f75, CX + ANDL CX, AX + + // mask &= (((((W[41] >> 4) ^ (W[44] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_50_0_bit)) + MOVL W_41+164(FP), CX + SHRL $0x04, CX + MOVL W_44+176(FP), DX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xff7edfda, CX + ANDL CX, AX + + // mask &= (((((W[40] ^ W[41]) >> 29) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_47_0_bit | DV_I_48_0_bit | DV_II_46_0_bit | DV_II_47_0_bit | DV_II_56_0_bit)) + MOVL W_40+160(FP), CX + MOVL W_41+164(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0x7ff5ff5d, CX + ANDL CX, AX + + // mask &= (((((W[54] ^ W[55]) >> 29) & 1) - 1) | ^(DV_I_51_0_bit | DV_II_47_0_bit | DV_II_50_0_bit | DV_II_55_0_bit | DV_II_56_0_bit)) + MOVL W_54+216(FP), CX + MOVL W_55+220(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0x3f77dfff, CX + ANDL CX, AX + + // mask &= (((((W[53] ^ W[54]) >> 29) & 1) - 1) | ^(DV_I_50_0_bit | DV_II_46_0_bit | DV_II_49_0_bit | DV_II_54_0_bit | DV_II_55_0_bit)) + MOVL W_53+212(FP), CX + MOVL W_54+216(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0x9fddf7ff, CX + ANDL CX, AX + + // mask &= (((((W[52] ^ W[53]) >> 29) & 1) - 1) | ^(DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit | DV_II_53_0_bit | DV_II_54_0_bit)) + MOVL W_52+208(FP), CX + MOVL W_53+212(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xcfeefdff, CX + ANDL CX, AX + + // mask &= ((((W[50] ^ (W[53] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_50_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_48_0_bit | DV_II_54_0_bit)) + MOVL W_50+200(FP), CX + MOVL W_53+212(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xdfed77ff, CX + ANDL CX, AX + + // mask &= (((((W[50] ^ W[51]) >> 29) & 1) - 1) | ^(DV_I_47_0_bit | DV_II_46_0_bit | DV_II_51_0_bit | DV_II_52_0_bit | DV_II_56_0_bit)) + MOVL W_50+200(FP), CX + MOVL W_51+204(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0x75fdffdf, CX + ANDL CX, AX + + // mask &= ((((W[49] ^ (W[52] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_47_0_bit | DV_II_53_0_bit)) + MOVL W_49+196(FP), CX + MOVL W_52+208(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xeff6ddff, CX + ANDL CX, AX + + // mask &= ((((W[48] ^ (W[51] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_52_0_bit)) + MOVL W_48+192(FP), CX + MOVL W_51+204(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xf7fd777f, CX + ANDL CX, AX + + // mask &= (((((W[42] ^ W[43]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_49_0_bit | DV_I_50_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) + MOVL W_42+168(FP), CX + MOVL W_43+172(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xffcff5f7, CX + ANDL CX, AX + + // mask &= (((((W[41] ^ W[42]) >> 29) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_48_0_bit | DV_I_49_0_bit | DV_II_47_0_bit | DV_II_48_0_bit)) + MOVL W_41+164(FP), CX + MOVL W_42+168(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xffe7fd7b, CX + ANDL CX, AX + + // mask &= (((((W[40] >> 4) ^ (W[43] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_50_0_bit | DV_II_49_0_bit | DV_II_56_0_bit)) + MOVL W_40+160(FP), CX + MOVL W_43+172(FP), DX + SHRL $0x04, CX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0x7fdff7f5, CX + ANDL CX, AX + + // mask &= (((((W[39] >> 4) ^ (W[42] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_49_0_bit | DV_II_48_0_bit | DV_II_55_0_bit)) + MOVL W_39+156(FP), CX + MOVL W_42+168(FP), DX + SHRL $0x04, CX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xbfeffdfa, CX + ANDL CX, AX + + // if (mask & (DV_I_44_0_bit | DV_I_48_0_bit | DV_II_47_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) != 0 { + // mask &= (((((W[38] >> 4) ^ (W[41] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_48_0_bit | DV_II_47_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) + // } + TESTL $0xa0080082, AX + JE f1 + MOVL W_38+152(FP), CX + MOVL W_41+164(FP), DX + SHRL $0x04, CX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0x5ff7ff7d, CX + ANDL CX, AX + +f1: + // mask &= (((((W[37] >> 4) ^ (W[40] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_47_0_bit | DV_II_46_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) + MOVL W_37+148(FP), CX + MOVL W_40+160(FP), DX + SHRL $0x04, CX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xaffdffde, CX + ANDL CX, AX + + // if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) != 0 { + // mask &= (((((W[55] ^ W[56]) >> 29) & 1) - 1) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) + // } + TESTL $0x82108000, AX + JE f2 + MOVL W_55+220(FP), CX + MOVL W_56+224(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0x7def7fff, CX + ANDL CX, AX + +f2: + // if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_50_0_bit | DV_II_56_0_bit)) != 0 { + // mask &= ((((W[52] ^ (W[55] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_50_0_bit | DV_II_56_0_bit)) + // } + TESTL $0x80908000, AX + JE f3 + MOVL W_52+208(FP), CX + MOVL W_55+220(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0x7f6f7fff, CX + ANDL CX, AX + +f3: + // if (mask & (DV_I_51_0_bit | DV_II_47_0_bit | DV_II_49_0_bit | DV_II_55_0_bit)) != 0 { + // mask &= ((((W[51] ^ (W[54] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_51_0_bit | DV_II_47_0_bit | DV_II_49_0_bit | DV_II_55_0_bit)) + // } + TESTL $0x40282000, AX + JE f4 + MOVL W_51+204(FP), CX + MOVL W_54+216(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xbfd7dfff, CX + ANDL CX, AX + +f4: + // if (mask & (DV_I_48_0_bit | DV_II_47_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) != 0 { + // mask &= (((((W[51] ^ W[52]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_II_47_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) + // } + TESTL $0x18080080, AX + JE f5 + MOVL W_51+204(FP), CX + MOVL W_52+208(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xe7f7ff7f, CX + ANDL CX, AX + +f5: + // if (mask & (DV_I_46_0_bit | DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit)) != 0 { + // mask &= (((((W[36] >> 4) ^ (W[40] >> 29)) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit)) + // } + TESTL $0x00110208, AX + JE f6 + MOVL W_36+144(FP), CX + SHRL $0x04, CX + MOVL W_40+160(FP), DX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xffeefdf7, CX + ANDL CX, AX + +f6: + // if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) != 0 { + // mask &= ((0 - (((W[53] ^ W[56]) >> 29) & 1)) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) + // } + TESTL $0x00308000, AX + JE f7 + MOVL W_53+212(FP), CX + MOVL W_56+224(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xffcf7fff, CX + ANDL CX, AX + +f7: + // if (mask & (DV_I_50_0_bit | DV_II_46_0_bit | DV_II_47_0_bit)) != 0 { + // mask &= ((0 - (((W[51] ^ W[54]) >> 29) & 1)) | ^(DV_I_50_0_bit | DV_II_46_0_bit | DV_II_47_0_bit)) + // } + TESTL $0x000a0800, AX + JE f8 + MOVL W_51+204(FP), CX + MOVL W_54+216(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xfff5f7ff, CX + ANDL CX, AX + +f8: + // if (mask & (DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit)) != 0 { + // mask &= ((0 - (((W[50] ^ W[52]) >> 29) & 1)) | ^(DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit)) + // } + TESTL $0x00012200, AX + JE f9 + MOVL W_50+200(FP), CX + MOVL W_52+208(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xfffeddff, CX + ANDL CX, AX + +f9: + // if (mask & (DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit)) != 0 { + // mask &= ((0 - (((W[49] ^ W[51]) >> 29) & 1)) | ^(DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit)) + // } + TESTL $0x00008880, AX + JE f10 + MOVL W_49+196(FP), CX + MOVL W_51+204(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xffff777f, CX + ANDL CX, AX + +f10: + // if (mask & (DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit)) != 0 { + // mask &= ((0 - (((W[48] ^ W[50]) >> 29) & 1)) | ^(DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit)) + // } + TESTL $0x00002220, AX + JE f11 + MOVL W_48+192(FP), CX + MOVL W_50+200(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xffffdddf, CX + ANDL CX, AX + +f11: + // if (mask & (DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit)) != 0 { + // mask &= ((0 - (((W[47] ^ W[49]) >> 29) & 1)) | ^(DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit)) + // } + TESTL $0x00000888, AX + JE f12 + MOVL W_47+188(FP), CX + MOVL W_49+196(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xfffff777, CX + ANDL CX, AX + +f12: + // if (mask & (DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit)) != 0 { + // mask &= ((0 - (((W[46] ^ W[48]) >> 29) & 1)) | ^(DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit)) + // } + TESTL $0x00000224, AX + JE f13 + MOVL W_46+184(FP), CX + MOVL W_48+192(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xfffffddb, CX + ANDL CX, AX + +f13: + // mask &= ((((W[45] ^ W[47]) & (1 << 6)) - (1 << 6)) | ^(DV_I_47_2_bit | DV_I_49_2_bit | DV_I_51_2_bit)) + MOVL W_45+180(FP), CX + MOVL W_47+188(FP), DX + XORL DX, CX + ANDL $0x00000040, CX + SUBL $0x00000040, CX + ORL $0xffffbbbf, CX + ANDL CX, AX + + // if (mask & (DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit)) != 0 { + // mask &= ((0 - (((W[45] ^ W[47]) >> 29) & 1)) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit)) + // } + TESTL $0x0000008a, AX + JE f14 + MOVL W_45+180(FP), CX + MOVL W_47+188(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xffffff75, CX + ANDL CX, AX + +f14: + // mask &= (((((W[44] ^ W[46]) >> 6) & 1) - 1) | ^(DV_I_46_2_bit | DV_I_48_2_bit | DV_I_50_2_bit)) + MOVL W_44+176(FP), CX + MOVL W_46+184(FP), DX + XORL DX, CX + SHRL $0x06, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xffffeeef, CX + ANDL CX, AX + + // if (mask & (DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit)) != 0 { + // mask &= ((0 - (((W[44] ^ W[46]) >> 29) & 1)) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit)) + // } + TESTL $0x00000025, AX + JE f15 + MOVL W_44+176(FP), CX + MOVL W_46+184(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xffffffda, CX + ANDL CX, AX + +f15: + // mask &= ((0 - ((W[41] ^ (W[42] >> 5)) & (1 << 1))) | ^(DV_I_48_2_bit | DV_II_46_2_bit | DV_II_51_2_bit)) + MOVL W_41+164(FP), CX + MOVL W_42+168(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + ORL $0xfbfbfeff, CX + ANDL CX, AX + + // mask &= ((0 - ((W[40] ^ (W[41] >> 5)) & (1 << 1))) | ^(DV_I_47_2_bit | DV_I_51_2_bit | DV_II_50_2_bit)) + MOVL W_40+160(FP), CX + MOVL W_41+164(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + ORL $0xfeffbfbf, CX + ANDL CX, AX + + // if (mask & (DV_I_44_0_bit | DV_I_46_0_bit | DV_II_56_0_bit)) != 0 { + // mask &= ((0 - (((W[40] ^ W[42]) >> 4) & 1)) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_II_56_0_bit)) + // } + TESTL $0x8000000a, AX + JE f16 + MOVL W_40+160(FP), CX + MOVL W_42+168(FP), DX + XORL DX, CX + SHRL $0x04, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0x7ffffff5, CX + ANDL CX, AX + +f16: + // mask &= ((0 - ((W[39] ^ (W[40] >> 5)) & (1 << 1))) | ^(DV_I_46_2_bit | DV_I_50_2_bit | DV_II_49_2_bit)) + MOVL W_39+156(FP), CX + MOVL W_40+160(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + ORL $0xffbfefef, CX + ANDL CX, AX + + // if (mask & (DV_I_43_0_bit | DV_I_45_0_bit | DV_II_55_0_bit)) != 0 { + // mask &= ((0 - (((W[39] ^ W[41]) >> 4) & 1)) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_II_55_0_bit)) + // } + TESTL $0x40000005, AX + JE f17 + MOVL W_39+156(FP), CX + MOVL W_41+164(FP), DX + XORL DX, CX + SHRL $0x04, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xbffffffa, CX + ANDL CX, AX + +f17: + // if (mask & (DV_I_44_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) != 0 { + // mask &= ((0 - (((W[38] ^ W[40]) >> 4) & 1)) | ^(DV_I_44_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) + // } + TESTL $0xa0000002, AX + JE f18 + MOVL W_38+152(FP), CX + MOVL W_40+160(FP), DX + XORL DX, CX + SHRL $0x04, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0x5ffffffd, CX + ANDL CX, AX + +f18: + // if (mask & (DV_I_43_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) != 0 { + // mask &= ((0 - (((W[37] ^ W[39]) >> 4) & 1)) | ^(DV_I_43_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) + // } + TESTL $0x50000001, AX + JE f19 + MOVL W_37+148(FP), CX + MOVL W_39+156(FP), DX + XORL DX, CX + SHRL $0x04, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xaffffffe, CX + ANDL CX, AX + +f19: + // mask &= ((0 - ((W[36] ^ (W[37] >> 5)) & (1 << 1))) | ^(DV_I_47_2_bit | DV_I_50_2_bit | DV_II_46_2_bit)) + MOVL W_36+144(FP), CX + MOVL W_37+148(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + ORL $0xfffbefbf, CX + ANDL CX, AX + + // if (mask & (DV_I_45_0_bit | DV_I_48_0_bit | DV_II_47_0_bit)) != 0 { + // mask &= (((((W[35] >> 4) ^ (W[39] >> 29)) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_48_0_bit | DV_II_47_0_bit)) + // } + TESTL $0x00080084, AX + JE f20 + MOVL W_35+140(FP), CX + MOVL W_39+156(FP), DX + SHRL $0x04, CX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + SUBL $0x00000001, CX + ORL $0xfff7ff7b, CX + ANDL CX, AX + +f20: + // if (mask & (DV_I_48_0_bit | DV_II_48_0_bit)) != 0 { + // mask &= ((0 - ((W[63] ^ (W[64] >> 5)) & (1 << 0))) | ^(DV_I_48_0_bit | DV_II_48_0_bit)) + // } + TESTL $0x00100080, AX + JE f21 + MOVL W_63+252(FP), CX + MOVL W_64+256(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xffefff7f, CX + ANDL CX, AX + +f21: + // if (mask & (DV_I_45_0_bit | DV_II_45_0_bit)) != 0 { + // mask &= ((0 - ((W[63] ^ (W[64] >> 5)) & (1 << 1))) | ^(DV_I_45_0_bit | DV_II_45_0_bit)) + // } + TESTL $0x00010004, AX + JE f22 + MOVL W_63+252(FP), CX + MOVL W_64+256(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + ORL $0xfffefffb, CX + ANDL CX, AX + +f22: + // if (mask & (DV_I_47_0_bit | DV_II_47_0_bit)) != 0 { + // mask &= ((0 - ((W[62] ^ (W[63] >> 5)) & (1 << 0))) | ^(DV_I_47_0_bit | DV_II_47_0_bit)) + // } + TESTL $0x00080020, AX + JE f23 + MOVL W_62+248(FP), CX + MOVL W_63+252(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xfff7ffdf, CX + ANDL CX, AX + +f23: + // if (mask & (DV_I_46_0_bit | DV_II_46_0_bit)) != 0 { + // mask &= ((0 - ((W[61] ^ (W[62] >> 5)) & (1 << 0))) | ^(DV_I_46_0_bit | DV_II_46_0_bit)) + // } + TESTL $0x00020008, AX + JE f24 + MOVL W_61+244(FP), CX + MOVL W_62+248(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xfffdfff7, CX + ANDL CX, AX + +f24: + // mask &= ((0 - ((W[61] ^ (W[62] >> 5)) & (1 << 2))) | ^(DV_I_46_2_bit | DV_II_46_2_bit)) + MOVL W_61+244(FP), CX + MOVL W_62+248(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000004, CX + NEGL CX + ORL $0xfffbffef, CX + ANDL CX, AX + + // if (mask & (DV_I_45_0_bit | DV_II_45_0_bit)) != 0 { + // mask &= ((0 - ((W[60] ^ (W[61] >> 5)) & (1 << 0))) | ^(DV_I_45_0_bit | DV_II_45_0_bit)) + // } + TESTL $0x00010004, AX + JE f25 + MOVL W_60+240(FP), CX + MOVL W_61+244(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xfffefffb, CX + ANDL CX, AX + +f25: + // if (mask & (DV_II_51_0_bit | DV_II_54_0_bit)) != 0 { + // mask &= (((((W[58] ^ W[59]) >> 29) & 1) - 1) | ^(DV_II_51_0_bit | DV_II_54_0_bit)) + // } + TESTL $0x22000000, AX + JE f26 + MOVL W_58+232(FP), CX + MOVL W_59+236(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + SUBL $0x00000001, CX + ORL $0xddffffff, CX + ANDL CX, AX + +f26: + // if (mask & (DV_II_50_0_bit | DV_II_53_0_bit)) != 0 { + // mask &= (((((W[57] ^ W[58]) >> 29) & 1) - 1) | ^(DV_II_50_0_bit | DV_II_53_0_bit)) + // } + TESTL $0x10800000, AX + JE f27 + MOVL W_57+228(FP), CX + MOVL W_58+232(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + SUBL $0x00000001, CX + ORL $0xef7fffff, CX + ANDL CX, AX + +f27: + // if (mask & (DV_II_52_0_bit | DV_II_54_0_bit)) != 0 { + // mask &= ((((W[56] ^ (W[59] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_52_0_bit | DV_II_54_0_bit)) + // } + TESTL $0x28000000, AX + JE f28 + MOVL W_56+224(FP), CX + MOVL W_59+236(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xd7ffffff, CX + ANDL CX, AX + +f28: + // if (mask & (DV_II_51_0_bit | DV_II_52_0_bit)) != 0 { + // mask &= ((0 - (((W[56] ^ W[59]) >> 29) & 1)) | ^(DV_II_51_0_bit | DV_II_52_0_bit)) + // } + TESTL $0x0a000000, AX + JE f29 + MOVL W_56+224(FP), CX + MOVL W_59+236(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xf5ffffff, CX + ANDL CX, AX + +f29: + // if (mask & (DV_II_49_0_bit | DV_II_52_0_bit)) != 0 { + // mask &= (((((W[56] ^ W[57]) >> 29) & 1) - 1) | ^(DV_II_49_0_bit | DV_II_52_0_bit)) + // } + TESTL $0x08200000, AX + JE f30 + MOVL W_56+224(FP), CX + MOVL W_57+228(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + SUBL $0x00000001, CX + ORL $0xf7dfffff, CX + ANDL CX, AX + +f30: + // if (mask & (DV_II_51_0_bit | DV_II_53_0_bit)) != 0 { + // mask &= ((((W[55] ^ (W[58] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_51_0_bit | DV_II_53_0_bit)) + // } + TESTL $0x12000000, AX + JE f31 + MOVL W_55+220(FP), CX + MOVL W_58+232(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xedffffff, CX + ANDL CX, AX + +f31: + // if (mask & (DV_II_50_0_bit | DV_II_52_0_bit)) != 0 { + // mask &= ((((W[54] ^ (W[57] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_50_0_bit | DV_II_52_0_bit)) + // } + TESTL $0x08800000, AX + JE f32 + MOVL W_54+216(FP), CX + MOVL W_57+228(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xf77fffff, CX + ANDL CX, AX + +f32: + // if (mask & (DV_II_49_0_bit | DV_II_51_0_bit)) != 0 { + // mask &= ((((W[53] ^ (W[56] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_49_0_bit | DV_II_51_0_bit)) + // } + TESTL $0x02200000, AX + JE f33 + MOVL W_53+212(FP), CX + MOVL W_56+224(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xfddfffff, CX + ANDL CX, AX + +f33: + // mask &= ((((W[51] ^ (W[50] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_50_2_bit | DV_II_46_2_bit)) + MOVL W_51+204(FP), CX + MOVL W_50+200(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + SUBL $0x00000002, CX + ORL $0xfffbefff, CX + ANDL CX, AX + + // mask &= ((((W[48] ^ W[50]) & (1 << 6)) - (1 << 6)) | ^(DV_I_50_2_bit | DV_II_46_2_bit)) + MOVL W_48+192(FP), CX + MOVL W_50+200(FP), DX + XORL DX, CX + ANDL $0x00000040, CX + SUBL $0x00000040, CX + ORL $0xfffbefff, CX + ANDL CX, AX + + // if (mask & (DV_I_51_0_bit | DV_I_52_0_bit)) != 0 { + // mask &= ((0 - (((W[48] ^ W[55]) >> 29) & 1)) | ^(DV_I_51_0_bit | DV_I_52_0_bit)) + // } + TESTL $0x0000a000, AX + JE f34 + MOVL W_48+192(FP), CX + MOVL W_55+220(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xffff5fff, CX + ANDL CX, AX + +f34: + // mask &= ((((W[47] ^ W[49]) & (1 << 6)) - (1 << 6)) | ^(DV_I_49_2_bit | DV_I_51_2_bit)) + MOVL W_47+188(FP), CX + MOVL W_49+196(FP), DX + XORL DX, CX + ANDL $0x00000040, CX + SUBL $0x00000040, CX + ORL $0xffffbbff, CX + ANDL CX, AX + + // mask &= ((((W[48] ^ (W[47] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_47_2_bit | DV_II_51_2_bit)) + MOVL W_48+192(FP), CX + MOVL W_47+188(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + SUBL $0x00000002, CX + ORL $0xfbffffbf, CX + ANDL CX, AX + + // mask &= ((((W[46] ^ W[48]) & (1 << 6)) - (1 << 6)) | ^(DV_I_48_2_bit | DV_I_50_2_bit)) + MOVL W_46+184(FP), CX + MOVL W_48+192(FP), DX + XORL DX, CX + ANDL $0x00000040, CX + SUBL $0x00000040, CX + ORL $0xffffeeff, CX + ANDL CX, AX + + // mask &= ((((W[47] ^ (W[46] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_46_2_bit | DV_II_50_2_bit)) + MOVL W_47+188(FP), CX + MOVL W_46+184(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + SUBL $0x00000002, CX + ORL $0xfeffffef, CX + ANDL CX, AX + + // mask &= ((0 - ((W[44] ^ (W[45] >> 5)) & (1 << 1))) | ^(DV_I_51_2_bit | DV_II_49_2_bit)) + MOVL W_44+176(FP), CX + MOVL W_45+180(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + ORL $0xffbfbfff, CX + ANDL CX, AX + + // mask &= ((((W[43] ^ W[45]) & (1 << 6)) - (1 << 6)) | ^(DV_I_47_2_bit | DV_I_49_2_bit)) + MOVL W_43+172(FP), CX + MOVL W_45+180(FP), DX + XORL DX, CX + ANDL $0x00000040, CX + SUBL $0x00000040, CX + ORL $0xfffffbbf, CX + ANDL CX, AX + + // mask &= (((((W[42] ^ W[44]) >> 6) & 1) - 1) | ^(DV_I_46_2_bit | DV_I_48_2_bit)) + MOVL W_42+168(FP), CX + MOVL W_44+176(FP), DX + XORL DX, CX + SHRL $0x06, CX + ANDL $0x00000001, CX + SUBL $0x00000001, CX + ORL $0xfffffeef, CX + ANDL CX, AX + + // mask &= ((((W[43] ^ (W[42] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_II_46_2_bit | DV_II_51_2_bit)) + MOVL W_43+172(FP), CX + MOVL W_42+168(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + SUBL $0x00000002, CX + ORL $0xfbfbffff, CX + ANDL CX, AX + + // mask &= ((((W[42] ^ (W[41] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_51_2_bit | DV_II_50_2_bit)) + MOVL W_42+168(FP), CX + MOVL W_41+164(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + SUBL $0x00000002, CX + ORL $0xfeffbfff, CX + ANDL CX, AX + + // mask &= ((((W[41] ^ (W[40] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_50_2_bit | DV_II_49_2_bit)) + MOVL W_41+164(FP), CX + MOVL W_40+160(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + SUBL $0x00000002, CX + ORL $0xffbfefff, CX + ANDL CX, AX + + // if (mask & (DV_I_52_0_bit | DV_II_51_0_bit)) != 0 { + // mask &= ((((W[39] ^ (W[43] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_52_0_bit | DV_II_51_0_bit)) + // } + TESTL $0x02008000, AX + JE f35 + MOVL W_39+156(FP), CX + MOVL W_43+172(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xfdff7fff, CX + ANDL CX, AX + +f35: + // if (mask & (DV_I_51_0_bit | DV_II_50_0_bit)) != 0 { + // mask &= ((((W[38] ^ (W[42] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_51_0_bit | DV_II_50_0_bit)) + // } + TESTL $0x00802000, AX + JE f36 + MOVL W_38+152(FP), CX + MOVL W_42+168(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xff7fdfff, CX + ANDL CX, AX + +f36: + // if (mask & (DV_I_48_2_bit | DV_I_51_2_bit)) != 0 { + // mask &= ((0 - ((W[37] ^ (W[38] >> 5)) & (1 << 1))) | ^(DV_I_48_2_bit | DV_I_51_2_bit)) + // } + TESTL $0x00004100, AX + JE f37 + MOVL W_37+148(FP), CX + MOVL W_38+152(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + ORL $0xffffbeff, CX + ANDL CX, AX + +f37: + // if (mask & (DV_I_50_0_bit | DV_II_49_0_bit)) != 0 { + // mask &= ((((W[37] ^ (W[41] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_50_0_bit | DV_II_49_0_bit)) + // } + TESTL $0x00200800, AX + JE f38 + MOVL W_37+148(FP), CX + MOVL W_41+164(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xffdff7ff, CX + ANDL CX, AX + +f38: + // if (mask & (DV_II_52_0_bit | DV_II_54_0_bit)) != 0 { + // mask &= ((0 - ((W[36] ^ W[38]) & (1 << 4))) | ^(DV_II_52_0_bit | DV_II_54_0_bit)) + // } + TESTL $0x28000000, AX + JE f39 + MOVL W_36+144(FP), CX + MOVL W_38+152(FP), DX + XORL DX, CX + ANDL $0x00000010, CX + NEGL CX + ORL $0xd7ffffff, CX + ANDL CX, AX + +f39: + // mask &= ((0 - ((W[35] ^ (W[36] >> 5)) & (1 << 1))) | ^(DV_I_46_2_bit | DV_I_49_2_bit)) + MOVL W_35+140(FP), CX + MOVL W_36+144(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + ORL $0xfffffbef, CX + ANDL CX, AX + + // if (mask & (DV_I_51_0_bit | DV_II_47_0_bit)) != 0 { + // mask &= ((((W[35] ^ (W[39] >> 25)) & (1 << 3)) - (1 << 3)) | ^(DV_I_51_0_bit | DV_II_47_0_bit)) + // } + TESTL $0x00082000, AX + JE f40 + MOVL W_35+140(FP), CX + MOVL W_39+156(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000008, CX + SUBL $0x00000008, CX + ORL $0xfff7dfff, CX + ANDL CX, AX + +f40: + // if mask != 0 + TESTL $0x00000000, AX + JNE end + + // if (mask & DV_I_43_0_bit) != 0 { + // if not((W[61]^(W[62]>>5))&(1<<1)) != 0 || + // not(not((W[59]^(W[63]>>25))&(1<<5))) != 0 || + // not((W[58]^(W[63]>>30))&(1<<0)) != 0 { + // mask &= ^DV_I_43_0_bit + // } + // } + BTL $0x00, AX + JNC f41_skip + MOVL W_61+244(FP), CX + MOVL W_62+248(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + CMPL CX, $0x00000000 + JE f41_in + MOVL W_59+236(FP), CX + MOVL W_63+252(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000020, CX + CMPL CX, $0x00000000 + JNE f41_in + MOVL W_58+232(FP), CX + MOVL W_63+252(FP), DX + SHRL $0x1e, DX + XORL DX, CX + ANDL $0x00000001, CX + NEGL CX + CMPL CX, $0x00000000 + JE f41_in + JMP f41_skip + +f41_in: + ANDL $0xfffffffe, AX + +f41_skip: + // if (mask & DV_I_44_0_bit) != 0 { + // if not((W[62]^(W[63]>>5))&(1<<1)) != 0 || + // not(not((W[60]^(W[64]>>25))&(1<<5))) != 0 || + // not((W[59]^(W[64]>>30))&(1<<0)) != 0 { + // mask &= ^DV_I_44_0_bit + // } + // } + BTL $0x01, AX + JNC f42_skip + MOVL W_62+248(FP), CX + MOVL W_63+252(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + CMPL CX, $0x00000000 + JE f42_in + MOVL W_60+240(FP), CX + MOVL W_64+256(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000020, CX + CMPL CX, $0x00000000 + JNE f42_in + MOVL W_59+236(FP), CX + MOVL W_64+256(FP), DX + SHRL $0x1e, DX + XORL DX, CX + ANDL $0x00000001, CX + NEGL CX + CMPL CX, $0x00000000 + JE f42_in + JMP f42_skip + +f42_in: + ANDL $0xfffffffd, AX + +f42_skip: + // if (mask & DV_I_46_2_bit) != 0 { + // mask &= ((^((W[40] ^ W[42]) >> 2)) | ^DV_I_46_2_bit) + // } + BTL $0x04, AX + JNC f43 + MOVL W_40+160(FP), CX + MOVL W_42+168(FP), DX + XORL DX, CX + SHRL $0x02, CX + NOTL CX + ORL $0xffffffef, CX + ANDL CX, AX + +f43: + // if (mask & DV_I_47_2_bit) != 0 { + // if not((W[62]^(W[63]>>5))&(1<<2)) != 0 || + // not(not((W[41]^W[43])&(1<<6))) != 0 { + // mask &= ^DV_I_47_2_bit + // } + // } + BTL $0x06, AX + JNC f44_skip + MOVL W_62+248(FP), CX + MOVL W_63+252(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000004, CX + NEGL CX + CMPL CX, $0x00000000 + JE f44_in + MOVL W_41+164(FP), CX + MOVL W_43+172(FP), DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f44_in + JMP f44_skip + +f44_in: + ANDL $0xffffffbf, AX + +f44_skip: + // if (mask & DV_I_48_2_bit) != 0 { + // if not((W[63]^(W[64]>>5))&(1<<2)) != 0 || + // not(not((W[48]^(W[49]<<5))&(1<<6))) != 0 { + // mask &= ^DV_I_48_2_bit + // } + // } + BTL $0x08, AX + JNC f45_skip + MOVL W_63+252(FP), CX + MOVL W_64+256(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000004, CX + NEGL CX + CMPL CX, $0x00000000 + JE f45_in + MOVL W_48+192(FP), CX + MOVL W_49+196(FP), DX + SHLL $0x05, DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f45_in + JMP f45_skip + +f45_in: + ANDL $0xfffffeff, AX + +f45_skip: + // if (mask & DV_I_49_2_bit) != 0 { + // if not(not((W[49]^(W[50]<<5))&(1<<6))) != 0 || + // not((W[42]^W[50])&(1<<1)) != 0 || + // not(not((W[39]^(W[40]<<5))&(1<<6))) != 0 || + // not((W[38]^W[40])&(1<<1)) != 0 { + // mask &= ^DV_I_49_2_bit + // } + // } + BTL $0x0a, AX + JNC f46_skip + MOVL W_49+196(FP), CX + MOVL W_50+200(FP), DX + SHLL $0x05, DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f46_in + MOVL W_42+168(FP), CX + MOVL W_50+200(FP), DX + XORL DX, CX + ANDL $0x00000002, CX + CMPL CX, $0x00000000 + JE f46_in + MOVL W_39+156(FP), CX + MOVL W_40+160(FP), DX + SHLL $0x05, DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f46_in + MOVL W_38+152(FP), CX + MOVL W_40+160(FP), DX + XORL DX, CX + ANDL $0x00000002, CX + CMPL CX, $0x00000000 + JE f46_in + JMP f46_skip + +f46_in: + ANDL $0xfffffbff, AX + +f46_skip: + // if (mask & DV_I_50_0_bit) != 0 { + // mask &= (((W[36] ^ W[37]) << 7) | ^DV_I_50_0_bit) + // } + BTL $0x0b, AX + JNC f47 + MOVL W_36+144(FP), CX + MOVL W_37+148(FP), DX + XORL DX, CX + SHLL $0x07, CX + ORL $0xfffff7ff, CX + ANDL CX, AX + +f47: + // if (mask & DV_I_50_2_bit) != 0 { + // mask &= (((W[43] ^ W[51]) << 11) | ^DV_I_50_2_bit) + // } + BTL $0x0c, AX + JNC f48 + MOVL W_43+172(FP), CX + MOVL W_51+204(FP), DX + XORL DX, CX + SHLL $0x0b, CX + ORL $0xffffefff, CX + ANDL CX, AX + +f48: + // if (mask & DV_I_51_0_bit) != 0 { + // mask &= (((W[37] ^ W[38]) << 9) | ^DV_I_51_0_bit) + // } + BTL $0x0d, AX + JNC f49 + MOVL W_37+148(FP), CX + MOVL W_38+152(FP), DX + XORL DX, CX + SHLL $0x09, CX + ORL $0xffffdfff, CX + ANDL CX, AX + +f49: + // if (mask & DV_I_51_2_bit) != 0 { + // if not(not((W[51]^(W[52]<<5))&(1<<6))) != 0 || + // not(not((W[49]^W[51])&(1<<6))) != 0 || + // not(not((W[37]^(W[37]>>5))&(1<<1))) != 0 || + // not(not((W[35]^(W[39]>>25))&(1<<5))) != 0 { + // mask &= ^DV_I_51_2_bit + // } + // } + BTL $0x0e, AX + JNC f50_skip + MOVL W_51+204(FP), CX + MOVL W_52+208(FP), DX + SHLL $0x05, DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f50_in + MOVL W_49+196(FP), CX + MOVL W_51+204(FP), DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f50_in + MOVL W_37+148(FP), CX + MOVL W_37+148(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + CMPL CX, $0x00000000 + JNE f50_in + MOVL W_35+140(FP), CX + MOVL W_39+156(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000020, CX + CMPL CX, $0x00000000 + JNE f50_in + JMP f50_skip + +f50_in: + ANDL $0xffffbfff, AX + +f50_skip: + // if (mask & DV_I_52_0_bit) != 0 { + // mask &= (((W[38] ^ W[39]) << 11) | ^DV_I_52_0_bit) + // } + BTL $0x0f, AX + JNC f51 + MOVL W_38+152(FP), CX + MOVL W_39+156(FP), DX + XORL DX, CX + SHLL $0x0b, CX + ORL $0xffff7fff, CX + ANDL CX, AX + +f51: + // if (mask & DV_II_46_2_bit) != 0 { + // mask &= (((W[47] ^ W[51]) << 17) | ^DV_II_46_2_bit) + // } + TESTL $0x00040000, AX + BTL $0x12, AX + JNC f52 + MOVL W_47+188(FP), CX + MOVL W_51+204(FP), DX + XORL DX, CX + SHLL $0x11, CX + ORL $0xfffbffff, CX + ANDL CX, AX + +f52: + // if (mask & DV_II_48_0_bit) != 0 { + // if not(not((W[36]^(W[40]>>25))&(1<<3))) != 0 || + // not((W[35]^(W[40]<<2))&(1<<30)) != 0 { + // mask &= ^DV_II_48_0_bit + // } + // } + BTL $0x14, AX + JNC f53_skip + MOVL W_36+144(FP), CX + MOVL W_40+160(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000008, CX + CMPL CX, $0x00000000 + JNE f53_in + MOVL W_35+140(FP), CX + MOVL W_40+160(FP), DX + SHLL $0x02, DX + XORL DX, CX + ANDL $0x40000000, CX + CMPL CX, $0x00000000 + JNE f53_in + JMP f53_skip + +f53_in: + ANDL $0xffefffff, AX + +f53_skip: + // if (mask & DV_II_49_0_bit) != 0 { + // if not(not((W[37]^(W[41]>>25))&(1<<3))) != 0 || + // not((W[36]^(W[41]<<2))&(1<<30)) != 0 { + // mask &= ^DV_II_49_0_bit + // } + // } + BTL $0x15, AX + JNC f54_skip + MOVL W_37+148(FP), CX + MOVL W_41+164(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000008, CX + CMPL CX, $0x00000000 + JNE f54_in + MOVL W_36+144(FP), CX + MOVL W_41+164(FP), DX + SHLL $0x02, DX + XORL DX, CX + ANDL $0x40000000, CX + CMPL CX, $0x00000000 + JNE f54_in + JMP f54_skip + +f54_in: + ANDL $0xffdfffff, AX + +f54_skip: + // if (mask & DV_II_49_2_bit) != 0 { + // if not(not((W[53]^(W[54]<<5))&(1<<6))) != 0 || + // not(not((W[51]^W[53])&(1<<6))) != 0 || + // not((W[50]^W[54])&(1<<1)) != 0 || + // not(not((W[45]^(W[46]<<5))&(1<<6))) != 0 || + // not(not((W[37]^(W[41]>>25))&(1<<5))) != 0 || + // not((W[36]^(W[41]>>30))&(1<<0)) != 0 { + // mask &= ^DV_II_49_2_bit + // } + // } + BTL $0x16, AX + JNC f55_skip + MOVL W_53+212(FP), CX + MOVL W_54+216(FP), DX + SHLL $0x05, DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f55_in + MOVL W_51+204(FP), CX + MOVL W_53+212(FP), DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f55_in + MOVL W_50+200(FP), CX + MOVL W_54+216(FP), DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + CMPL CX, $0x00000000 + JE f55_in + MOVL W_45+180(FP), CX + MOVL W_46+184(FP), DX + SHLL $0x05, DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f55_in + MOVL W_37+148(FP), CX + MOVL W_41+164(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000020, CX + CMPL CX, $0x00000000 + JNE f55_in + MOVL W_36+144(FP), CX + MOVL W_41+164(FP), DX + SHRL $0x1e, DX + XORL DX, CX + ANDL $0x00000001, CX + NEGL CX + CMPL CX, $0x00000000 + JE f55_in + JMP f55_skip + +f55_in: + ANDL $0xffbfffff, AX + +f55_skip: + // if (mask & DV_II_50_0_bit) != 0 { + // if not((W[55]^W[58])&(1<<29)) != 0 || + // not(not((W[38]^(W[42]>>25))&(1<<3))) != 0 || + // not((W[37]^(W[42]<<2))&(1<<30)) != 0 { + // mask &= ^DV_II_50_0_bit + // } + // } + BTL $0x17, AX + JNC f56_skip + MOVL W_55+220(FP), CX + MOVL W_58+232(FP), DX + XORL DX, CX + ANDL $0x20000000, CX + NEGL CX + CMPL CX, $0x00000000 + JE f56_in + MOVL W_38+152(FP), CX + MOVL W_42+168(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000008, CX + CMPL CX, $0x00000000 + JNE f56_in + MOVL W_37+148(FP), CX + MOVL W_42+168(FP), DX + SHRL $0x02, DX + XORL DX, CX + ANDL $0x40000000, CX + NEGL CX + CMPL CX, $0x00000000 + JE f56_in + JMP f56_skip + +f56_in: + ANDL $0xff7fffff, AX + +f56_skip: + // if (mask & DV_II_50_2_bit) != 0 { + // if not(not((W[54]^(W[55]<<5))&(1<<6))) != 0 || + // not(not((W[52]^W[54])&(1<<6))) != 0 || + // not((W[51]^W[55])&(1<<1)) != 0 || + // not((W[45]^W[47])&(1<<1)) != 0 || + // not(not((W[38]^(W[42]>>25))&(1<<5))) != 0 || + // not((W[37]^(W[42]>>30))&(1<<0)) != 0 { + // mask &= ^DV_II_50_2_bit + // } + // } + BTL $0x18, AX + JNC f57_skip + MOVL W_54+216(FP), CX + MOVL W_55+220(FP), DX + SHLL $0x05, DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f57_in + MOVL W_52+208(FP), CX + MOVL W_54+216(FP), DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f57_in + MOVL W_51+204(FP), CX + MOVL W_55+220(FP), DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + CMPL CX, $0x00000000 + JE f57_in + MOVL W_45+180(FP), CX + MOVL W_47+188(FP), DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + CMPL CX, $0x00000000 + JE f57_in + MOVL W_38+152(FP), CX + MOVL W_42+168(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000020, CX + CMPL CX, $0x00000000 + JNE f57_in + MOVL W_37+148(FP), CX + MOVL W_42+168(FP), DX + SHRL $0x1e, DX + XORL DX, CX + ANDL $0x00000001, CX + NEGL CX + CMPL CX, $0x00000000 + JE f57_in + JMP f57_skip + +f57_in: + ANDL $0xfeffffff, AX + +f57_skip: + // if (mask & DV_II_51_0_bit) != 0 { + // if not(not((W[39]^(W[43]>>25))&(1<<3))) != 0 || + // not((W[38]^(W[43]<<2))&(1<<30)) != 0 { + // mask &= ^DV_II_51_0_bit + // } + // } + BTL $0x19, AX + JNC f58_skip + MOVL W_39+156(FP), CX + MOVL W_43+172(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000008, CX + CMPL CX, $0x00000000 + JNE f58_in + MOVL W_38+152(FP), CX + MOVL W_43+172(FP), DX + SHLL $0x02, DX + XORL DX, CX + ANDL $0x40000000, CX + NEGL CX + CMPL CX, $0x00000000 + JE f58_in + JMP f58_skip + +f58_in: + ANDL $0xfdffffff, AX + +f58_skip: + // if (mask & DV_II_51_2_bit) != 0 { + // if not(not((W[55]^(W[56]<<5))&(1<<6))) != 0 || + // not(not((W[53]^W[55])&(1<<6))) != 0 || + // not((W[52]^W[56])&(1<<1)) != 0 || + // not((W[46]^W[48])&(1<<1)) != 0 || + // not(not((W[39]^(W[43]>>25))&(1<<5))) != 0 || + // not((W[38]^(W[43]>>30))&(1<<0)) != 0 { + // mask &= ^DV_II_51_2_bit + // } + // } + BTL $0x1a, AX + JNC f59_skip + MOVL W_55+220(FP), CX + MOVL W_56+224(FP), DX + SHLL $0x05, DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f59_in + MOVL W_53+212(FP), CX + MOVL W_55+220(FP), DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f59_in + MOVL W_52+208(FP), CX + MOVL W_56+224(FP), DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + CMPL CX, $0x00000000 + JE f59_in + MOVL W_46+184(FP), CX + MOVL W_48+192(FP), DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + CMPL CX, $0x00000000 + JE f59_in + MOVL W_39+156(FP), CX + MOVL W_43+172(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000020, CX + CMPL CX, $0x00000000 + JNE f59_in + MOVL W_38+152(FP), CX + MOVL W_43+172(FP), DX + SHRL $0x1e, DX + XORL DX, CX + ANDL $0x00000001, CX + NEGL CX + CMPL CX, $0x00000000 + JE f59_in + JMP f59_skip + +f59_in: + ANDL $0xfbffffff, AX + +f59_skip: + // if (mask & DV_II_52_0_bit) != 0 { + // if not(not((W[59]^W[60])&(1<<29))) != 0 || + // not(not((W[40]^(W[44]>>25))&(1<<3))) != 0 || + // not(not((W[40]^(W[44]>>25))&(1<<4))) != 0 || + // not((W[39]^(W[44]<<2))&(1<<30)) != 0 { + // mask &= ^DV_II_52_0_bit + // } + // } + BTL $0x1b, AX + JNC f60_skip + MOVL W_59+236(FP), CX + MOVL W_60+240(FP), DX + XORL DX, CX + ANDL $0x20000000, CX + CMPL CX, $0x00000000 + JNE f60_in + MOVL W_40+160(FP), CX + MOVL W_44+176(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000008, CX + CMPL CX, $0x00000000 + JNE f60_in + MOVL W_40+160(FP), CX + MOVL W_44+176(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + CMPL CX, $0x00000000 + JNE f60_in + MOVL W_39+156(FP), CX + MOVL W_44+176(FP), DX + SHLL $0x02, DX + XORL DX, CX + ANDL $0x40000000, CX + NEGL CX + CMPL CX, $0x00000000 + JE f60_in + JMP f60_skip + +f60_in: + ANDL $0xf7ffffff, AX + +f60_skip: + // if (mask & DV_II_53_0_bit) != 0 { + // if not((W[58]^W[61])&(1<<29)) != 0 || + // not(not((W[57]^(W[61]>>25))&(1<<4))) != 0 || + // not(not((W[41]^(W[45]>>25))&(1<<3))) != 0 || + // not(not((W[41]^(W[45]>>25))&(1<<4))) != 0 { + // mask &= ^DV_II_53_0_bit + // } + // } + BTL $0x1c, AX + JNC f61_skip + MOVL W_58+232(FP), CX + MOVL W_61+244(FP), DX + XORL DX, CX + ANDL $0x20000000, CX + NEGL CX + CMPL CX, $0x00000000 + JE f61_in + MOVL W_57+228(FP), CX + MOVL W_61+244(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + CMPL CX, $0x00000000 + JNE f61_in + MOVL W_41+164(FP), CX + MOVL W_45+180(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000008, CX + CMPL CX, $0x00000000 + JNE f61_in + MOVL W_41+164(FP), CX + MOVL W_45+180(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + CMPL CX, $0x00000000 + JNE f61_in + JMP f61_skip + +f61_in: + ANDL $0xefffffff, AX + +f61_skip: + // if (mask & DV_II_54_0_bit) != 0 { + // if not(not((W[58]^(W[62]>>25))&(1<<4))) != 0 || + // not(not((W[42]^(W[46]>>25))&(1<<3))) != 0 || + // not(not((W[42]^(W[46]>>25))&(1<<4))) != 0 { + // mask &= ^DV_II_54_0_bit + // } + // } + BTL $0x1d, AX + JNC f62_skip + MOVL W_58+232(FP), CX + MOVL W_62+248(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + CMPL CX, $0x00000000 + JNE f62_in + MOVL W_42+168(FP), CX + MOVL W_46+184(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000008, CX + CMPL CX, $0x00000000 + JNE f62_in + MOVL W_42+168(FP), CX + MOVL W_46+184(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + CMPL CX, $0x00000000 + JNE f62_in + JMP f62_skip + +f62_in: + ANDL $0xdfffffff, AX + +f62_skip: + // if (mask & DV_II_55_0_bit) != 0 { + // if not(not((W[59]^(W[63]>>25))&(1<<4))) != 0 || + // not(not((W[57]^(W[59]>>25))&(1<<4))) != 0 || + // not(not((W[43]^(W[47]>>25))&(1<<3))) != 0 || + // not(not((W[43]^(W[47]>>25))&(1<<4))) != 0 { + // mask &= ^DV_II_55_0_bit + // } + // } + BTL $0x1e, AX + JNC f63_skip + MOVL W_59+236(FP), CX + MOVL W_63+252(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + CMPL CX, $0x00000000 + JNE f63_in + MOVL W_57+228(FP), CX + MOVL W_59+236(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + CMPL CX, $0x00000000 + JNE f63_in + MOVL W_43+172(FP), CX + MOVL W_47+188(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000008, CX + CMPL CX, $0x00000000 + JNE f63_in + MOVL W_43+172(FP), CX + MOVL W_47+188(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + CMPL CX, $0x00000000 + JNE f63_in + JMP f63_skip + +f63_in: + ANDL $0xbfffffff, AX + +f63_skip: + // if (mask & DV_II_56_0_bit) != 0 { + // if not(not((W[60]^(W[64]>>25))&(1<<4))) != 0 || + // not(not((W[44]^(W[48]>>25))&(1<<3))) != 0 || + // not(not((W[44]^(W[48]>>25))&(1<<4))) != 0 { + // mask &= ^DV_II_56_0_bit + // } + // } + BTL $0x1f, AX + JNC f64_skip + MOVL W_60+240(FP), CX + MOVL W_64+256(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + CMPL CX, $0x00000000 + JNE f64_in + MOVL W_44+176(FP), CX + MOVL W_48+192(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000008, CX + CMPL CX, $0x00000000 + JNE f64_in + MOVL W_44+176(FP), CX + MOVL W_48+192(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + CMPL CX, $0x00000000 + JNE f64_in + JMP f64_skip + +f64_in: + ANDL $0x7fffffff, AX + +f64_skip: +end: + MOVL AX, ret+320(FP) + RET diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/check.go b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_generic.go similarity index 99% rename from vendor/github.com/pjbgf/sha1cd/ubc/check.go rename to vendor/github.com/pjbgf/sha1cd/ubc/ubc_generic.go index 167a5558f..ee95bd52d 100644 --- a/vendor/github.com/pjbgf/sha1cd/ubc/check.go +++ b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_generic.go @@ -25,7 +25,7 @@ type DvInfo struct { // for all listed DVs. It returns a dvmask where each bit belonging to a DV is set if all // unavoidable bitconditions for that DV have been met. // Thus, one needs to do the recompression check for each DV that has its bit set. -func CalculateDvMask(W [80]uint32) uint32 { +func CalculateDvMaskGeneric(W [80]uint32) uint32 { mask := uint32(0xFFFFFFFF) mask &= (((((W[44] ^ W[45]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_I_51_0_bit | DV_I_52_0_bit | DV_II_45_0_bit | DV_II_46_0_bit | DV_II_50_0_bit | DV_II_51_0_bit)) mask &= (((((W[49] ^ W[50]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_II_45_0_bit | DV_II_50_0_bit | DV_II_51_0_bit | DV_II_55_0_bit | DV_II_56_0_bit)) diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/ubc_noasm.go b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_noasm.go new file mode 100644 index 000000000..48d6dff16 --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_noasm.go @@ -0,0 +1,12 @@ +//go:build !amd64 || noasm || !gc +// +build !amd64 noasm !gc + +package ubc + +// Check takes as input an expanded message block and verifies the unavoidable bitconditions +// for all listed DVs. It returns a dvmask where each bit belonging to a DV is set if all +// unavoidable bitconditions for that DV have been met. +// Thus, one needs to do the recompression check for each DV that has its bit set. +func CalculateDvMask(W [80]uint32) uint32 { + return CalculateDvMaskGeneric(W) +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go index 4f7b42488..915d5090d 100644 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go @@ -34,8 +34,6 @@ const ( DiffInsert Operation = 1 // DiffEqual item represents an equal diff. DiffEqual Operation = 0 - //IndexSeparator is used to seperate the array indexes in an index string - IndexSeparator = "," ) // Diff represents one diff operation @@ -406,14 +404,11 @@ func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff { hydrated := make([]Diff, 0, len(diffs)) for _, aDiff := range diffs { - chars := strings.Split(aDiff.Text, IndexSeparator) - text := make([]string, len(chars)) + runes := []rune(aDiff.Text) + text := make([]string, len(runes)) - for i, r := range chars { - i1, err := strconv.Atoi(r) - if err == nil { - text[i] = lineArray[i1] - } + for i, r := range runes { + text[i] = lineArray[runeToInt(r)] } aDiff.Text = strings.Join(text, "") diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go index 44c435954..eb727bb59 100644 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go @@ -9,11 +9,16 @@ package diffmatchpatch import ( - "strconv" + "fmt" "strings" "unicode/utf8" ) +const UNICODE_INVALID_RANGE_START = 0xD800 +const UNICODE_INVALID_RANGE_END = 0xDFFF +const UNICODE_INVALID_RANGE_DELTA = UNICODE_INVALID_RANGE_END - UNICODE_INVALID_RANGE_START + 1 +const UNICODE_RANGE_MAX = 0x10FFFF + // unescaper unescapes selected chars for compatibility with JavaScript's encodeURI. // In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc. var unescaper = strings.NewReplacer( @@ -93,14 +98,93 @@ func intArrayToString(ns []uint32) string { return "" } - indexSeparator := IndexSeparator[0] - - // Appr. 3 chars per num plus the comma. - b := []byte{} + b := []rune{} for _, n := range ns { - b = strconv.AppendInt(b, int64(n), 10) - b = append(b, indexSeparator) + b = append(b, intToRune(n)) } - b = b[:len(b)-1] return string(b) } + +// These constants define the number of bits representable +// in 1,2,3,4 byte utf8 sequences, respectively. +const ONE_BYTE_BITS = 7 +const TWO_BYTE_BITS = 11 +const THREE_BYTE_BITS = 16 +const FOUR_BYTE_BITS = 21 + +// Helper for getting a sequence of bits from an integer. +func getBits(i uint32, cnt byte, from byte) byte { + return byte((i >> from) & ((1 << cnt) - 1)) +} + +// Converts an integer in the range 0~1112060 into a rune. +// Based on the ranges table in https://en.wikipedia.org/wiki/UTF-8 +func intToRune(i uint32) rune { + if i < (1 << ONE_BYTE_BITS) { + return rune(i) + } + + if i < (1 << TWO_BYTE_BITS) { + r, size := utf8.DecodeRune([]byte{0b11000000 | getBits(i, 5, 6), 0b10000000 | getBits(i, 6, 0)}) + if size != 2 || r == utf8.RuneError { + panic(fmt.Sprintf("Error encoding an int %d with size 2, got rune %v and size %d", size, r, i)) + } + return r + } + + // Last -3 here needed because for some reason 3rd to last codepoint 65533 in this range + // was returning utf8.RuneError during encoding. + if i < ((1 << THREE_BYTE_BITS) - UNICODE_INVALID_RANGE_DELTA - 3) { + if i >= UNICODE_INVALID_RANGE_START { + i += UNICODE_INVALID_RANGE_DELTA + } + + r, size := utf8.DecodeRune([]byte{0b11100000 | getBits(i, 4, 12), 0b10000000 | getBits(i, 6, 6), 0b10000000 | getBits(i, 6, 0)}) + if size != 3 || r == utf8.RuneError { + panic(fmt.Sprintf("Error encoding an int %d with size 3, got rune %v and size %d", size, r, i)) + } + return r + } + + if i < (1<= UNICODE_INVALID_RANGE_END { + return result - UNICODE_INVALID_RANGE_DELTA + } + + return result + } + + if size == 4 { + result := uint32(bytes[0]&0b111)<<18 | uint32(bytes[1]&0b111111)<<12 | uint32(bytes[2]&0b111111)<<6 | uint32(bytes[3]&0b111111) + return result - UNICODE_INVALID_RANGE_DELTA - 3 + } + + panic(fmt.Sprintf("Unexpected state decoding rune=%v size=%d", r, size)) +} diff --git a/vendor/github.com/skeema/knownhosts/CONTRIBUTING.md b/vendor/github.com/skeema/knownhosts/CONTRIBUTING.md new file mode 100644 index 000000000..9624f8276 --- /dev/null +++ b/vendor/github.com/skeema/knownhosts/CONTRIBUTING.md @@ -0,0 +1,36 @@ +# Contributing to skeema/knownhosts + +Thank you for your interest in contributing! This document provides guidelines for submitting pull requests. + +### Link to an issue + +Before starting the pull request process, initial discussion should take place on a GitHub issue first. For bug reports, the issue should track the open bug and confirm it is reproducible. For feature requests, the issue should cover why the feature is necessary. + +In the issue comments, discuss your suggested approach for a fix/implementation, and please wait to get feedback before opening a pull request. + +### Test coverage + +In general, please provide reasonably thorough test coverage. Whenever possible, your PR should aim to match or improve the overall test coverage percentage of the package. You can run tests and check coverage locally using `go test -cover`. We also have CI automation in GitHub Actions which will comment on each pull request with a coverage percentage. + +That said, it is fine to submit an initial draft / work-in-progress PR without coverage, if you are waiting on implementation feedback before writing the tests. + +We intentionally avoid hard-coding SSH keys or known_hosts files into the test logic. Instead, the tests generate new keys and then use them to generate a known_hosts file, which is then cached/reused for that overall test run, in order to keep performance reasonable. + +### Documentation + +Exported types require doc comments. The linter CI step will catch this if missing. + +### Backwards compatibility + +Because this package is imported by [nearly 7000 repos on GitHub](https://github.com/skeema/knownhosts/network/dependents), we must be very strict about backwards compatibility of exported symbols and function signatures. + +Backwards compatibility can be very tricky in some situations. In this case, a maintainer may need to add additional commits to your branch to adjust the approach. Please do not take offense if this occurs; it is sometimes simply faster to implement a refactor on our end directly. When the PR/branch is merged, a merge commit will be used, to ensure your commits appear as-is in the repo history and are still properly credited to you. + +### Avoid rewriting core x/crypto/ssh/knownhosts logic + +skeema/knownhosts is intended to be a relatively thin *wrapper* around x/crypto/ssh/knownhosts, without duplicating or re-implementing the core known_hosts file parsing and host key handling logic. Importers of this package should be confident that it can be used as a nearly-drop-in replacement for x/crypto/ssh/knownhosts without introducing substantial risk, security flaws, parser differentials, or unexpected behavior changes. + +To solve shortcomings in x/crypto/ssh/knownhosts, we try to come up with workarounds that still utilize x/crypto/ssh/knownhosts functionality whenever possible. + +Some bugs in x/crypto/ssh/knownhosts do require re-reading the known_hosts file here to solve, but we make that *optional* by offering separate constructors/types with and without that behavior. + diff --git a/vendor/github.com/skeema/knownhosts/NOTICE b/vendor/github.com/skeema/knownhosts/NOTICE index 619a5a7ea..a92cb34d6 100644 --- a/vendor/github.com/skeema/knownhosts/NOTICE +++ b/vendor/github.com/skeema/knownhosts/NOTICE @@ -1,4 +1,4 @@ -Copyright 2023 Skeema LLC and the Skeema Knownhosts authors +Copyright 2024 Skeema LLC and the Skeema Knownhosts authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/skeema/knownhosts/README.md b/vendor/github.com/skeema/knownhosts/README.md index 85339bc03..046bc0edc 100644 --- a/vendor/github.com/skeema/knownhosts/README.md +++ b/vendor/github.com/skeema/knownhosts/README.md @@ -1,31 +1,33 @@ # knownhosts: enhanced Golang SSH known_hosts management [![build status](https://img.shields.io/github/actions/workflow/status/skeema/knownhosts/tests.yml?branch=main)](https://github.com/skeema/knownhosts/actions) +[![code coverage](https://img.shields.io/coveralls/skeema/knownhosts.svg)](https://coveralls.io/r/skeema/knownhosts) [![godoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://pkg.go.dev/github.com/skeema/knownhosts) > This repo is brought to you by [Skeema](https://github.com/skeema/skeema), a > declarative pure-SQL schema management system for MySQL and MariaDB. Our -> premium products include extensive [SSH tunnel](https://www.skeema.io/docs/options/#ssh) +> premium products include extensive [SSH tunnel](https://www.skeema.io/docs/features/ssh/) > functionality, which internally makes use of this package. Go provides excellent functionality for OpenSSH known_hosts files in its external package [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts). -However, that package is somewhat low-level, making it difficult to implement full known_hosts management similar to command-line `ssh`'s behavior for `StrictHostKeyChecking=no` configuration. +However, that package is somewhat low-level, making it difficult to implement full known_hosts management similar to OpenSSH's command-line behavior. Additionally, [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts) has several known issues in edge cases, some of which have remained open for multiple years. -This repo ([github.com/skeema/knownhosts](https://github.com/skeema/knownhosts)) is a thin wrapper package around [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts), adding the following functionality: +Package [github.com/skeema/knownhosts](https://github.com/skeema/knownhosts) provides a *thin wrapper* around [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts), adding the following improvements and fixes without duplicating its core logic: * Look up known_hosts public keys for any given host -* Auto-populate ssh.ClientConfig.HostKeyAlgorithms easily based on known_hosts, providing a solution for [golang/go#29286](https://github.com/golang/go/issues/29286) +* Auto-populate ssh.ClientConfig.HostKeyAlgorithms easily based on known_hosts, providing a solution for [golang/go#29286](https://github.com/golang/go/issues/29286). (This also properly handles cert algorithms for hosts using CA keys when [using the NewDB constructor](#enhancements-requiring-extra-parsing) added in skeema/knownhosts v1.3.0.) +* Properly match wildcard hostname known_hosts entries regardless of port number, providing a solution for [golang/go#52056](https://github.com/golang/go/issues/52056). (Added in v1.3.0; requires [using the NewDB constructor](#enhancements-requiring-extra-parsing)) * Write new known_hosts entries to an io.Writer * Properly format/normalize new known_hosts entries containing ipv6 addresses, providing a solution for [golang/go#53463](https://github.com/golang/go/issues/53463) -* Determine if an ssh.HostKeyCallback's error corresponds to a host whose key has changed (indicating potential MitM attack) vs a host that just isn't known yet +* Easily determine if an ssh.HostKeyCallback's error corresponds to a host whose key has changed (indicating potential MitM attack) vs a host that just isn't known yet ## How host key lookup works Although [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts) doesn't directly expose a way to query its known_host map, we use a subtle trick to do so: invoke the HostKeyCallback with a valid host but a bogus key. The resulting KeyError allows us to determine which public keys are actually present for that host. -By using this technique, [github.com/skeema/knownhosts](https://github.com/skeema/knownhosts) doesn't need to duplicate or re-implement any of the actual known_hosts management from [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts). +By using this technique, [github.com/skeema/knownhosts](https://github.com/skeema/knownhosts) doesn't need to duplicate any of the core known_hosts host-lookup logic from [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts). ## Populating ssh.ClientConfig.HostKeyAlgorithms based on known_hosts @@ -42,20 +44,33 @@ import ( ) func sshConfigForHost(hostWithPort string) (*ssh.ClientConfig, error) { - kh, err := knownhosts.New("/home/myuser/.ssh/known_hosts") + kh, err := knownhosts.NewDB("/home/myuser/.ssh/known_hosts") if err != nil { return nil, err } config := &ssh.ClientConfig{ User: "myuser", Auth: []ssh.AuthMethod{ /* ... */ }, - HostKeyCallback: kh.HostKeyCallback(), // or, equivalently, use ssh.HostKeyCallback(kh) + HostKeyCallback: kh.HostKeyCallback(), HostKeyAlgorithms: kh.HostKeyAlgorithms(hostWithPort), } return config, nil } ``` +## Enhancements requiring extra parsing + +Originally, this package did not re-read/re-parse the known_hosts files at all, relying entirely on [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts) for all known_hosts file reading and processing. This package only offered a constructor called `New`, returning a host key callback, identical to the call pattern of [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts) but with extra methods available on the callback type. + +However, a couple shortcomings in [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts) cannot possibly be solved without re-reading the known_hosts file. Therefore, as of v1.3.0 of this package, we now offer an alternative constructor `NewDB`, which does an additional read of the known_hosts file (after the one from [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts)), in order to detect: + +* @cert-authority lines, so that we can correctly return cert key algorithms instead of normal host key algorithms when appropriate +* host pattern wildcards, so that we can match OpenSSH's behavior for non-standard port numbers, unlike how [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts) normally treats them + +Aside from *detecting* these special cases, this package otherwise still directly uses [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts) for host lookups and all other known_hosts file processing. We do **not** fork or re-implement those core behaviors of [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts). + +The performance impact of this extra known_hosts read should be minimal, as the file should typically be in the filesystem cache already from the original read by [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts). That said, users who wish to avoid the extra read can stay with the `New` constructor, which intentionally retains its pre-v1.3.0 behavior as-is. However, the extra fixes for @cert-authority and host pattern wildcards will not be enabled in that case. + ## Writing new known_hosts entries If you wish to mimic the behavior of OpenSSH's `StrictHostKeyChecking=no` or `StrictHostKeyChecking=ask`, this package provides a few functions to simplify this task. For example: @@ -63,7 +78,7 @@ If you wish to mimic the behavior of OpenSSH's `StrictHostKeyChecking=no` or `St ```golang sshHost := "yourserver.com:22" khPath := "/home/myuser/.ssh/known_hosts" -kh, err := knownhosts.New(khPath) +kh, err := knownhosts.NewDB(khPath) if err != nil { log.Fatal("Failed to read known_hosts: ", err) } @@ -71,7 +86,8 @@ if err != nil { // Create a custom permissive hostkey callback which still errors on hosts // with changed keys, but allows unknown hosts and adds them to known_hosts cb := ssh.HostKeyCallback(func(hostname string, remote net.Addr, key ssh.PublicKey) error { - err := kh(hostname, remote, key) + innerCallback := kh.HostKeyCallback() + err := innerCallback(hostname, remote, key) if knownhosts.IsHostKeyChanged(err) { return fmt.Errorf("REMOTE HOST IDENTIFICATION HAS CHANGED for host %s! This may indicate a MitM attack.", hostname) } else if knownhosts.IsHostUnknown(err) { @@ -100,7 +116,7 @@ config := &ssh.ClientConfig{ ## License -**Source code copyright 2023 Skeema LLC and the Skeema Knownhosts authors** +**Source code copyright 2024 Skeema LLC and the Skeema Knownhosts authors** ```text Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/vendor/github.com/skeema/knownhosts/knownhosts.go b/vendor/github.com/skeema/knownhosts/knownhosts.go index c2fb51605..2b7536e0d 100644 --- a/vendor/github.com/skeema/knownhosts/knownhosts.go +++ b/vendor/github.com/skeema/knownhosts/knownhosts.go @@ -3,11 +3,14 @@ package knownhosts import ( + "bufio" + "bytes" "encoding/base64" "errors" "fmt" "io" "net" + "os" "sort" "strings" @@ -15,14 +18,265 @@ import ( xknownhosts "golang.org/x/crypto/ssh/knownhosts" ) -// HostKeyCallback wraps ssh.HostKeyCallback with an additional method to -// perform host key algorithm lookups from the known_hosts entries. +// HostKeyDB wraps logic in golang.org/x/crypto/ssh/knownhosts with additional +// behaviors, such as the ability to perform host key/algorithm lookups from +// known_hosts entries. +type HostKeyDB struct { + callback ssh.HostKeyCallback + isCert map[string]bool // keyed by "filename:line" + isWildcard map[string]bool // keyed by "filename:line" +} + +// NewDB creates a HostKeyDB from the given OpenSSH known_hosts file(s). It +// reads and parses the provided files one additional time (beyond logic in +// golang.org/x/crypto/ssh/knownhosts) in order to: +// +// - Handle CA lines properly and return ssh.CertAlgo* values when calling the +// HostKeyAlgorithms method, for use in ssh.ClientConfig.HostKeyAlgorithms +// - Allow * wildcards in hostnames to match on non-standard ports, providing +// a workaround for https://github.com/golang/go/issues/52056 in order to +// align with OpenSSH's wildcard behavior +// +// When supplying multiple files, their order does not matter. +func NewDB(files ...string) (*HostKeyDB, error) { + cb, err := xknownhosts.New(files...) + if err != nil { + return nil, err + } + hkdb := &HostKeyDB{ + callback: cb, + isCert: make(map[string]bool), + isWildcard: make(map[string]bool), + } + + // Re-read each file a single time, looking for @cert-authority lines. The + // logic for reading the file is designed to mimic hostKeyDB.Read from + // golang.org/x/crypto/ssh/knownhosts + for _, filename := range files { + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + scanner := bufio.NewScanner(f) + lineNum := 0 + for scanner.Scan() { + lineNum++ + line := scanner.Bytes() + line = bytes.TrimSpace(line) + // Does the line start with "@cert-authority" followed by whitespace? + if len(line) > 15 && bytes.HasPrefix(line, []byte("@cert-authority")) && (line[15] == ' ' || line[15] == '\t') { + mapKey := fmt.Sprintf("%s:%d", filename, lineNum) + hkdb.isCert[mapKey] = true + line = bytes.TrimSpace(line[16:]) + } + // truncate line to just the host pattern field + if i := bytes.IndexAny(line, "\t "); i >= 0 { + line = line[:i] + } + // Does the host pattern contain a * wildcard and no specific port? + if i := bytes.IndexRune(line, '*'); i >= 0 && !bytes.Contains(line[i:], []byte("]:")) { + mapKey := fmt.Sprintf("%s:%d", filename, lineNum) + hkdb.isWildcard[mapKey] = true + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("knownhosts: %s:%d: %w", filename, lineNum, err) + } + } + return hkdb, nil +} + +// HostKeyCallback returns an ssh.HostKeyCallback. This can be used directly in +// ssh.ClientConfig.HostKeyCallback, as shown in the example for NewDB. +// Alternatively, you can wrap it with an outer callback to potentially handle +// appending a new entry to the known_hosts file; see example in WriteKnownHost. +func (hkdb *HostKeyDB) HostKeyCallback() ssh.HostKeyCallback { + // Either NewDB found no wildcard host patterns, or hkdb was created from + // HostKeyCallback.ToDB in which case we didn't scan known_hosts for them: + // return the callback (which came from x/crypto/ssh/knownhosts) as-is + if len(hkdb.isWildcard) == 0 { + return hkdb.callback + } + + // If we scanned for wildcards and found at least one, return a wrapped + // callback with extra behavior: if the host lookup found no matches, and the + // host arg had a non-standard port, re-do the lookup on standard port 22. If + // that second call returns a *xknownhosts.KeyError, filter down any resulting + // Want keys to known wildcard entries. + f := func(hostname string, remote net.Addr, key ssh.PublicKey) error { + callbackErr := hkdb.callback(hostname, remote, key) + if callbackErr == nil || IsHostKeyChanged(callbackErr) { // hostname has known_host entries as-is + return callbackErr + } + justHost, port, splitErr := net.SplitHostPort(hostname) + if splitErr != nil || port == "" || port == "22" { // hostname already using standard port + return callbackErr + } + // If we reach here, the port was non-standard and no known_host entries + // were found for the non-standard port. Try again with standard port. + if tcpAddr, ok := remote.(*net.TCPAddr); ok && tcpAddr.Port != 22 { + remote = &net.TCPAddr{ + IP: tcpAddr.IP, + Port: 22, + Zone: tcpAddr.Zone, + } + } + callbackErr = hkdb.callback(justHost+":22", remote, key) + var keyErr *xknownhosts.KeyError + if errors.As(callbackErr, &keyErr) && len(keyErr.Want) > 0 { + wildcardKeys := make([]xknownhosts.KnownKey, 0, len(keyErr.Want)) + for _, wantKey := range keyErr.Want { + if hkdb.isWildcard[fmt.Sprintf("%s:%d", wantKey.Filename, wantKey.Line)] { + wildcardKeys = append(wildcardKeys, wantKey) + } + } + callbackErr = &xknownhosts.KeyError{ + Want: wildcardKeys, + } + } + return callbackErr + } + return ssh.HostKeyCallback(f) +} + +// PublicKey wraps ssh.PublicKey with an additional field, to identify +// whether the key corresponds to a certificate authority. +type PublicKey struct { + ssh.PublicKey + Cert bool +} + +// HostKeys returns a slice of known host public keys for the supplied host:port +// found in the known_hosts file(s), or an empty slice if the host is not +// already known. For hosts that have multiple known_hosts entries (for +// different key types), the result will be sorted by known_hosts filename and +// line number. +// If hkdb was originally created by calling NewDB, the Cert boolean field of +// each result entry reports whether the key corresponded to a @cert-authority +// line. If hkdb was NOT obtained from NewDB, then Cert will always be false. +func (hkdb *HostKeyDB) HostKeys(hostWithPort string) (keys []PublicKey) { + var keyErr *xknownhosts.KeyError + placeholderAddr := &net.TCPAddr{IP: []byte{0, 0, 0, 0}} + placeholderPubKey := &fakePublicKey{} + var kkeys []xknownhosts.KnownKey + callback := hkdb.HostKeyCallback() + if hkcbErr := callback(hostWithPort, placeholderAddr, placeholderPubKey); errors.As(hkcbErr, &keyErr) { + kkeys = append(kkeys, keyErr.Want...) + knownKeyLess := func(i, j int) bool { + if kkeys[i].Filename < kkeys[j].Filename { + return true + } + return (kkeys[i].Filename == kkeys[j].Filename && kkeys[i].Line < kkeys[j].Line) + } + sort.Slice(kkeys, knownKeyLess) + keys = make([]PublicKey, len(kkeys)) + for n := range kkeys { + keys[n] = PublicKey{ + PublicKey: kkeys[n].Key, + } + if len(hkdb.isCert) > 0 { + keys[n].Cert = hkdb.isCert[fmt.Sprintf("%s:%d", kkeys[n].Filename, kkeys[n].Line)] + } + } + } + return keys +} + +// HostKeyAlgorithms returns a slice of host key algorithms for the supplied +// host:port found in the known_hosts file(s), or an empty slice if the host +// is not already known. The result may be used in ssh.ClientConfig's +// HostKeyAlgorithms field, either as-is or after filtering (if you wish to +// ignore or prefer particular algorithms). For hosts that have multiple +// known_hosts entries (of different key types), the result will be sorted by +// known_hosts filename and line number. +// If hkdb was originally created by calling NewDB, any @cert-authority lines +// in the known_hosts file will properly be converted to the corresponding +// ssh.CertAlgo* values. +func (hkdb *HostKeyDB) HostKeyAlgorithms(hostWithPort string) (algos []string) { + // We ensure that algos never contains duplicates. This is done for robustness + // even though currently golang.org/x/crypto/ssh/knownhosts never exposes + // multiple keys of the same type. This way our behavior here is unaffected + // even if https://github.com/golang/go/issues/28870 is implemented, for + // example by https://github.com/golang/crypto/pull/254. + hostKeys := hkdb.HostKeys(hostWithPort) + seen := make(map[string]struct{}, len(hostKeys)) + addAlgo := func(typ string, cert bool) { + if cert { + typ = keyTypeToCertAlgo(typ) + } + if _, already := seen[typ]; !already { + algos = append(algos, typ) + seen[typ] = struct{}{} + } + } + for _, key := range hostKeys { + typ := key.Type() + if typ == ssh.KeyAlgoRSA { + // KeyAlgoRSASHA256 and KeyAlgoRSASHA512 are only public key algorithms, + // not public key formats, so they can't appear as a PublicKey.Type. + // The corresponding PublicKey.Type is KeyAlgoRSA. See RFC 8332, Section 2. + addAlgo(ssh.KeyAlgoRSASHA512, key.Cert) + addAlgo(ssh.KeyAlgoRSASHA256, key.Cert) + } + addAlgo(typ, key.Cert) + } + return algos +} + +func keyTypeToCertAlgo(keyType string) string { + switch keyType { + case ssh.KeyAlgoRSA: + return ssh.CertAlgoRSAv01 + case ssh.KeyAlgoRSASHA256: + return ssh.CertAlgoRSASHA256v01 + case ssh.KeyAlgoRSASHA512: + return ssh.CertAlgoRSASHA512v01 + case ssh.KeyAlgoDSA: + return ssh.CertAlgoDSAv01 + case ssh.KeyAlgoECDSA256: + return ssh.CertAlgoECDSA256v01 + case ssh.KeyAlgoSKECDSA256: + return ssh.CertAlgoSKECDSA256v01 + case ssh.KeyAlgoECDSA384: + return ssh.CertAlgoECDSA384v01 + case ssh.KeyAlgoECDSA521: + return ssh.CertAlgoECDSA521v01 + case ssh.KeyAlgoED25519: + return ssh.CertAlgoED25519v01 + case ssh.KeyAlgoSKED25519: + return ssh.CertAlgoSKED25519v01 + } + return "" +} + +// HostKeyCallback wraps ssh.HostKeyCallback with additional methods to +// perform host key and algorithm lookups from the known_hosts entries. It is +// otherwise identical to ssh.HostKeyCallback, and does not introduce any file- +// parsing behavior beyond what is in golang.org/x/crypto/ssh/knownhosts. +// +// In most situations, use HostKeyDB and its constructor NewDB instead of using +// the HostKeyCallback type. The HostKeyCallback type is only provided for +// backwards compatibility with older versions of this package, as well as for +// very strict situations where any extra known_hosts file-parsing is +// undesirable. +// +// Methods of HostKeyCallback do not provide any special treatment for +// @cert-authority lines, which will (incorrectly) look like normal non-CA host +// keys. Additionally, HostKeyCallback lacks the fix for applying * wildcard +// known_host entries to all ports, like OpenSSH's behavior. type HostKeyCallback ssh.HostKeyCallback -// New creates a host key callback from the given OpenSSH host key files. The +// New creates a HostKeyCallback from the given OpenSSH known_hosts file(s). The // returned value may be used in ssh.ClientConfig.HostKeyCallback by casting it // to ssh.HostKeyCallback, or using its HostKeyCallback method. Otherwise, it // operates the same as the New function in golang.org/x/crypto/ssh/knownhosts. +// When supplying multiple files, their order does not matter. +// +// In most situations, you should avoid this function, as the returned value +// lacks several enhanced behaviors. See doc comment for HostKeyCallback for +// more information. Instead, most callers should use NewDB to create a +// HostKeyDB, which includes these enhancements. func New(files ...string) (HostKeyCallback, error) { cb, err := xknownhosts.New(files...) return HostKeyCallback(cb), err @@ -34,31 +288,39 @@ func (hkcb HostKeyCallback) HostKeyCallback() ssh.HostKeyCallback { return ssh.HostKeyCallback(hkcb) } +// ToDB converts the receiver into a HostKeyDB. However, the returned HostKeyDB +// lacks the enhanced behaviors described in the doc comment for NewDB: proper +// CA support, and wildcard matching on nonstandard ports. +// +// It is generally preferable to create a HostKeyDB by using NewDB. The ToDB +// method is only provided for situations in which the calling code needs to +// make the extra NewDB behaviors optional / user-configurable, perhaps for +// reasons of performance or code trust (since NewDB reads the known_host file +// an extra time, which may be undesirable in some strict situations). This way, +// callers can conditionally create a non-enhanced HostKeyDB by using New and +// ToDB. See code example. +func (hkcb HostKeyCallback) ToDB() *HostKeyDB { + // This intentionally leaves the isCert and isWildcard map fields as nil, as + // there is no way to retroactively populate them from just a HostKeyCallback. + // Methods of HostKeyDB will skip any related enhanced behaviors accordingly. + return &HostKeyDB{callback: ssh.HostKeyCallback(hkcb)} +} + // HostKeys returns a slice of known host public keys for the supplied host:port // found in the known_hosts file(s), or an empty slice if the host is not // already known. For hosts that have multiple known_hosts entries (for // different key types), the result will be sorted by known_hosts filename and // line number. -func (hkcb HostKeyCallback) HostKeys(hostWithPort string) (keys []ssh.PublicKey) { - var keyErr *xknownhosts.KeyError - placeholderAddr := &net.TCPAddr{IP: []byte{0, 0, 0, 0}} - placeholderPubKey := &fakePublicKey{} - var kkeys []xknownhosts.KnownKey - if hkcbErr := hkcb(hostWithPort, placeholderAddr, placeholderPubKey); errors.As(hkcbErr, &keyErr) { - kkeys = append(kkeys, keyErr.Want...) - knownKeyLess := func(i, j int) bool { - if kkeys[i].Filename < kkeys[j].Filename { - return true - } - return (kkeys[i].Filename == kkeys[j].Filename && kkeys[i].Line < kkeys[j].Line) - } - sort.Slice(kkeys, knownKeyLess) - keys = make([]ssh.PublicKey, len(kkeys)) - for n := range kkeys { - keys[n] = kkeys[n].Key - } +// In the returned values, there is no way to distinguish between CA keys +// (known_hosts lines beginning with @cert-authority) and regular keys. To do +// so, see NewDB and HostKeyDB.HostKeys instead. +func (hkcb HostKeyCallback) HostKeys(hostWithPort string) []ssh.PublicKey { + annotatedKeys := hkcb.ToDB().HostKeys(hostWithPort) + rawKeys := make([]ssh.PublicKey, len(annotatedKeys)) + for n, ak := range annotatedKeys { + rawKeys[n] = ak.PublicKey } - return keys + return rawKeys } // HostKeyAlgorithms returns a slice of host key algorithms for the supplied @@ -68,35 +330,29 @@ func (hkcb HostKeyCallback) HostKeys(hostWithPort string) (keys []ssh.PublicKey) // ignore or prefer particular algorithms). For hosts that have multiple // known_hosts entries (for different key types), the result will be sorted by // known_hosts filename and line number. +// The returned values will not include ssh.CertAlgo* values. If any +// known_hosts lines had @cert-authority prefixes, their original key algo will +// be returned instead. For proper CA support, see NewDB and +// HostKeyDB.HostKeyAlgorithms instead. func (hkcb HostKeyCallback) HostKeyAlgorithms(hostWithPort string) (algos []string) { - // We ensure that algos never contains duplicates. This is done for robustness - // even though currently golang.org/x/crypto/ssh/knownhosts never exposes - // multiple keys of the same type. This way our behavior here is unaffected - // even if https://github.com/golang/go/issues/28870 is implemented, for - // example by https://github.com/golang/crypto/pull/254. - hostKeys := hkcb.HostKeys(hostWithPort) - seen := make(map[string]struct{}, len(hostKeys)) - for _, key := range hostKeys { - typ := key.Type() - if _, already := seen[typ]; !already { - algos = append(algos, typ) - seen[typ] = struct{}{} - } - } - return algos + return hkcb.ToDB().HostKeyAlgorithms(hostWithPort) } // HostKeyAlgorithms is a convenience function for performing host key algorithm // lookups on an ssh.HostKeyCallback directly. It is intended for use in code // paths that stay with the New method of golang.org/x/crypto/ssh/knownhosts -// rather than this package's New method. +// rather than this package's New or NewDB methods. +// The returned values will not include ssh.CertAlgo* values. If any +// known_hosts lines had @cert-authority prefixes, their original key algo will +// be returned instead. For proper CA support, see NewDB and +// HostKeyDB.HostKeyAlgorithms instead. func HostKeyAlgorithms(cb ssh.HostKeyCallback, hostWithPort string) []string { return HostKeyCallback(cb).HostKeyAlgorithms(hostWithPort) } // IsHostKeyChanged returns a boolean indicating whether the error indicates // the host key has changed. It is intended to be called on the error returned -// from invoking a HostKeyCallback to check whether an SSH host is known. +// from invoking a host key callback, to check whether an SSH host is known. func IsHostKeyChanged(err error) bool { var keyErr *xknownhosts.KeyError return errors.As(err, &keyErr) && len(keyErr.Want) > 0 @@ -104,7 +360,7 @@ func IsHostKeyChanged(err error) bool { // IsHostUnknown returns a boolean indicating whether the error represents an // unknown host. It is intended to be called on the error returned from invoking -// a HostKeyCallback to check whether an SSH host is known. +// a host key callback to check whether an SSH host is known. func IsHostUnknown(err error) bool { var keyErr *xknownhosts.KeyError return errors.As(err, &keyErr) && len(keyErr.Want) == 0 @@ -144,11 +400,12 @@ func Line(addresses []string, key ssh.PublicKey) string { }, " ") } -// WriteKnownHost writes a known_hosts line to writer for the supplied hostname, +// WriteKnownHost writes a known_hosts line to w for the supplied hostname, // remote, and key. This is useful when writing a custom hostkey callback which -// wraps a callback obtained from knownhosts.New to provide additional -// known_hosts management functionality. The hostname, remote, and key typically -// correspond to the callback's args. +// wraps a callback obtained from this package to provide additional known_hosts +// management functionality. The hostname, remote, and key typically correspond +// to the callback's args. This function does not support writing +// @cert-authority lines. func WriteKnownHost(w io.Writer, hostname string, remote net.Addr, key ssh.PublicKey) error { // Always include hostname; only also include remote if it isn't a zero value // and doesn't normalize to the same string as hostname. @@ -167,6 +424,14 @@ func WriteKnownHost(w io.Writer, hostname string, remote net.Addr, key ssh.Publi return err } +// WriteKnownHostCA writes a @cert-authority line to w for the supplied host +// name/pattern and key. +func WriteKnownHostCA(w io.Writer, hostPattern string, key ssh.PublicKey) error { + encodedKey := base64.StdEncoding.EncodeToString(key.Marshal()) + _, err := fmt.Fprintf(w, "@cert-authority %s %s %s\n", hostPattern, key.Type(), encodedKey) + return err +} + // fakePublicKey is used as part of the work-around for // https://github.com/golang/go/issues/29286 type fakePublicKey struct{} diff --git a/vendor/modules.txt b/vendor/modules.txt index bb3613f6f..9423814dc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -47,8 +47,8 @@ github.com/Nerzal/gocloak/v13/pkg/jwx # github.com/OneOfOne/xxhash v1.2.8 ## explicit; go 1.11 github.com/OneOfOne/xxhash -# github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 -## explicit; go 1.13 +# github.com/ProtonMail/go-crypto v1.1.5 +## explicit; go 1.17 github.com/ProtonMail/go-crypto/bitcurves github.com/ProtonMail/go-crypto/brainpool github.com/ProtonMail/go-crypto/eax @@ -59,6 +59,8 @@ github.com/ProtonMail/go-crypto/openpgp/aes/keywrap github.com/ProtonMail/go-crypto/openpgp/armor github.com/ProtonMail/go-crypto/openpgp/ecdh github.com/ProtonMail/go-crypto/openpgp/ecdsa +github.com/ProtonMail/go-crypto/openpgp/ed25519 +github.com/ProtonMail/go-crypto/openpgp/ed448 github.com/ProtonMail/go-crypto/openpgp/eddsa github.com/ProtonMail/go-crypto/openpgp/elgamal github.com/ProtonMail/go-crypto/openpgp/errors @@ -67,6 +69,8 @@ github.com/ProtonMail/go-crypto/openpgp/internal/ecc github.com/ProtonMail/go-crypto/openpgp/internal/encoding github.com/ProtonMail/go-crypto/openpgp/packet github.com/ProtonMail/go-crypto/openpgp/s2k +github.com/ProtonMail/go-crypto/openpgp/x25519 +github.com/ProtonMail/go-crypto/openpgp/x448 # github.com/RoaringBitmap/roaring v1.9.3 ## explicit; go 1.14 github.com/RoaringBitmap/roaring @@ -350,8 +354,8 @@ github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1 github.com/cs3org/go-cs3apis/cs3/storage/registry/v1beta1 github.com/cs3org/go-cs3apis/cs3/tx/v1beta1 github.com/cs3org/go-cs3apis/cs3/types/v1beta1 -# github.com/cyphar/filepath-securejoin v0.2.4 -## explicit; go 1.13 +# github.com/cyphar/filepath-securejoin v0.3.6 +## explicit; go 1.18 github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit @@ -448,16 +452,16 @@ github.com/go-git/gcfg github.com/go-git/gcfg/scanner github.com/go-git/gcfg/token github.com/go-git/gcfg/types -# github.com/go-git/go-billy/v5 v5.5.0 -## explicit; go 1.19 +# github.com/go-git/go-billy/v5 v5.6.2 +## explicit; go 1.21 github.com/go-git/go-billy/v5 github.com/go-git/go-billy/v5/helper/chroot github.com/go-git/go-billy/v5/helper/polyfill github.com/go-git/go-billy/v5/memfs github.com/go-git/go-billy/v5/osfs github.com/go-git/go-billy/v5/util -# github.com/go-git/go-git/v5 v5.11.0 -## explicit; go 1.19 +# github.com/go-git/go-git/v5 v5.13.2 +## explicit; go 1.21 github.com/go-git/go-git/v5 github.com/go-git/go-git/v5/config github.com/go-git/go-git/v5/internal/path_util @@ -1185,8 +1189,8 @@ github.com/open-policy-agent/opa/v1/types github.com/open-policy-agent/opa/v1/util github.com/open-policy-agent/opa/v1/util/decoding github.com/open-policy-agent/opa/v1/version -# github.com/opencloud-eu/reva/v2 v2.27.3-0.20250127153848-a84e6c39c206 -## explicit; go 1.22.7 +# github.com/opencloud-eu/reva/v2 v2.27.3-0.20250213085913-418bbbbeed54 +## explicit; go 1.23.1 github.com/opencloud-eu/reva/v2/cmd/revad/internal/grace github.com/opencloud-eu/reva/v2/cmd/revad/runtime github.com/opencloud-eu/reva/v2/internal/grpc/interceptors/appctx @@ -1401,7 +1405,6 @@ github.com/opencloud-eu/reva/v2/pkg/preferences/loader github.com/opencloud-eu/reva/v2/pkg/preferences/memory github.com/opencloud-eu/reva/v2/pkg/preferences/registry github.com/opencloud-eu/reva/v2/pkg/publicshare -github.com/opencloud-eu/reva/v2/pkg/publicshare/manager/cs3 github.com/opencloud-eu/reva/v2/pkg/publicshare/manager/json github.com/opencloud-eu/reva/v2/pkg/publicshare/manager/json/persistence github.com/opencloud-eu/reva/v2/pkg/publicshare/manager/json/persistence/cs3 @@ -1432,7 +1435,6 @@ github.com/opencloud-eu/reva/v2/pkg/share/cache github.com/opencloud-eu/reva/v2/pkg/share/cache/warmup/cbox github.com/opencloud-eu/reva/v2/pkg/share/cache/warmup/loader github.com/opencloud-eu/reva/v2/pkg/share/cache/warmup/registry -github.com/opencloud-eu/reva/v2/pkg/share/manager/cs3 github.com/opencloud-eu/reva/v2/pkg/share/manager/json github.com/opencloud-eu/reva/v2/pkg/share/manager/jsoncs3 github.com/opencloud-eu/reva/v2/pkg/share/manager/jsoncs3/providercache @@ -1544,14 +1546,9 @@ github.com/opencloud-eu/reva/v2/pkg/storage/utils/downloader github.com/opencloud-eu/reva/v2/pkg/storage/utils/eosfs github.com/opencloud-eu/reva/v2/pkg/storage/utils/filelocks github.com/opencloud-eu/reva/v2/pkg/storage/utils/grants -github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer -github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/errors -github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/index -github.com/opencloud-eu/reva/v2/pkg/storage/utils/indexer/option github.com/opencloud-eu/reva/v2/pkg/storage/utils/localfs github.com/opencloud-eu/reva/v2/pkg/storage/utils/metadata github.com/opencloud-eu/reva/v2/pkg/storage/utils/middleware -github.com/opencloud-eu/reva/v2/pkg/storage/utils/sync github.com/opencloud-eu/reva/v2/pkg/storage/utils/templates github.com/opencloud-eu/reva/v2/pkg/storage/utils/walker github.com/opencloud-eu/reva/v2/pkg/storagespace @@ -1594,8 +1591,8 @@ github.com/owncloud/libre-graph-api-go # github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c ## explicit; go 1.12 github.com/oxtoacart/bpool -# github.com/pablodz/inotifywaitgo v0.0.7 -## explicit; go 1.21 +# github.com/pablodz/inotifywaitgo v0.0.9 +## explicit; go 1.23.1 github.com/pablodz/inotifywaitgo/inotifywaitgo # github.com/patrickmn/go-cache v2.1.0+incompatible ## explicit @@ -1610,8 +1607,8 @@ github.com/pierrec/lz4/v4/internal/lz4block github.com/pierrec/lz4/v4/internal/lz4errors github.com/pierrec/lz4/v4/internal/lz4stream github.com/pierrec/lz4/v4/internal/xxh32 -# github.com/pjbgf/sha1cd v0.3.0 -## explicit; go 1.19 +# github.com/pjbgf/sha1cd v0.3.2 +## explicit; go 1.21 github.com/pjbgf/sha1cd github.com/pjbgf/sha1cd/internal github.com/pjbgf/sha1cd/ubc @@ -1760,8 +1757,8 @@ github.com/segmentio/ksuid # github.com/sercand/kuberesolver/v5 v5.1.1 ## explicit; go 1.18 github.com/sercand/kuberesolver/v5 -# github.com/sergi/go-diff v1.3.1 -## explicit; go 1.12 +# github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 +## explicit; go 1.13 github.com/sergi/go-diff/diffmatchpatch # github.com/sethvargo/go-password v0.3.1 ## explicit; go 1.21 @@ -1788,7 +1785,7 @@ github.com/shurcooL/vfsgen # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/skeema/knownhosts v1.2.1 +# github.com/skeema/knownhosts v1.3.0 ## explicit; go 1.17 github.com/skeema/knownhosts # github.com/spacewander/go-suffix-tree v0.0.0-20191010040751-0865e368c784