Merge pull request #900 from opencloud-eu/dependabot/go_modules/github.com/blevesearch/bleve/v2-2.5.1

build(deps): bump github.com/blevesearch/bleve/v2 from 2.5.0 to 2.5.1
This commit is contained in:
Ralf Haferkamp
2025-05-21 10:47:35 +02:00
committed by GitHub
100 changed files with 4027 additions and 1463 deletions

21
go.mod
View File

@@ -11,7 +11,7 @@ require (
github.com/Nerzal/gocloak/v13 v13.9.0
github.com/bbalet/stopwords v1.0.0
github.com/beevik/etree v1.5.1
github.com/blevesearch/bleve/v2 v2.5.0
github.com/blevesearch/bleve/v2 v2.5.1
github.com/cenkalti/backoff v2.2.1+incompatible
github.com/coreos/go-oidc/v3 v3.14.1
github.com/cs3org/go-cs3apis v0.0.0-20241105092511-3ad35d174fc1
@@ -132,23 +132,23 @@ require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/bitly/go-simplejson v0.5.0 // indirect
github.com/bits-and-blooms/bitset v1.22.0 // indirect
github.com/blevesearch/bleve_index_api v1.2.7 // indirect
github.com/blevesearch/geo v0.1.20 // indirect
github.com/blevesearch/bleve_index_api v1.2.8 // indirect
github.com/blevesearch/geo v0.2.3 // indirect
github.com/blevesearch/go-faiss v1.0.25 // indirect
github.com/blevesearch/go-porterstemmer v1.0.3 // indirect
github.com/blevesearch/gtreap v0.1.1 // indirect
github.com/blevesearch/mmap-go v1.0.4 // indirect
github.com/blevesearch/scorch_segment_api/v2 v2.3.9 // indirect
github.com/blevesearch/scorch_segment_api/v2 v2.3.10 // indirect
github.com/blevesearch/segment v0.9.1 // indirect
github.com/blevesearch/snowballstem v0.9.0 // indirect
github.com/blevesearch/upsidedown_store_api v1.0.2 // indirect
github.com/blevesearch/vellum v1.1.0 // indirect
github.com/blevesearch/zapx/v11 v11.4.1 // indirect
github.com/blevesearch/zapx/v12 v12.4.1 // indirect
github.com/blevesearch/zapx/v13 v13.4.1 // indirect
github.com/blevesearch/zapx/v14 v14.4.1 // indirect
github.com/blevesearch/zapx/v15 v15.4.1 // indirect
github.com/blevesearch/zapx/v16 v16.2.2 // indirect
github.com/blevesearch/zapx/v11 v11.4.2 // indirect
github.com/blevesearch/zapx/v12 v12.4.2 // indirect
github.com/blevesearch/zapx/v13 v13.4.2 // indirect
github.com/blevesearch/zapx/v14 v14.4.2 // indirect
github.com/blevesearch/zapx/v15 v15.4.2 // indirect
github.com/blevesearch/zapx/v16 v16.2.3 // indirect
github.com/bluele/gcache v0.0.2 // indirect
github.com/bombsimon/logrusr/v3 v3.1.0 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
@@ -211,7 +211,6 @@ require (
github.com/gofrs/flock v0.12.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/gomodule/redigo v1.9.2 // indirect

42
go.sum
View File

@@ -146,12 +146,12 @@ github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6
github.com/bits-and-blooms/bitset v1.22.0 h1:Tquv9S8+SGaS3EhyA+up3FXzmkhxPGjQQCkcs2uw7w4=
github.com/bits-and-blooms/bitset v1.22.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/blevesearch/bleve/v2 v2.5.0 h1:HzYqBy/5/M9Ul9ESEmXzN/3Jl7YpmWBdHM/+zzv/3k4=
github.com/blevesearch/bleve/v2 v2.5.0/go.mod h1:PcJzTPnEynO15dCf9isxOga7YFRa/cMSsbnRwnszXUk=
github.com/blevesearch/bleve_index_api v1.2.7 h1:c8r9vmbaYQroAMSGag7zq5gEVPiuXrUQDqfnj7uYZSY=
github.com/blevesearch/bleve_index_api v1.2.7/go.mod h1:rKQDl4u51uwafZxFrPD1R7xFOwKnzZW7s/LSeK4lgo0=
github.com/blevesearch/geo v0.1.20 h1:paaSpu2Ewh/tn5DKn/FB5SzvH0EWupxHEIwbCk/QPqM=
github.com/blevesearch/geo v0.1.20/go.mod h1:DVG2QjwHNMFmjo+ZgzrIq2sfCh6rIHzy9d9d0B59I6w=
github.com/blevesearch/bleve/v2 v2.5.1 h1:cc/O++W2Hcjp1SU5ETHeE+QYWv2oV88ldYEPowdmg8M=
github.com/blevesearch/bleve/v2 v2.5.1/go.mod h1:9g/wnbWKm9AgXrU8Ecqi+IDdqjUHWymwkQRDg+5tafU=
github.com/blevesearch/bleve_index_api v1.2.8 h1:Y98Pu5/MdlkRyLM0qDHostYo7i+Vv1cDNhqTeR4Sy6Y=
github.com/blevesearch/bleve_index_api v1.2.8/go.mod h1:rKQDl4u51uwafZxFrPD1R7xFOwKnzZW7s/LSeK4lgo0=
github.com/blevesearch/geo v0.2.3 h1:K9/vbGI9ehlXdxjxDRJtoAMt7zGAsMIzc6n8zWcwnhg=
github.com/blevesearch/geo v0.2.3/go.mod h1:K56Q33AzXt2YExVHGObtmRSFYZKYGv0JEN5mdacJJR8=
github.com/blevesearch/go-faiss v1.0.25 h1:lel1rkOUGbT1CJ0YgzKwC7k+XH0XVBHnCVWahdCXk4U=
github.com/blevesearch/go-faiss v1.0.25/go.mod h1:OMGQwOaRRYxrmeNdMrXJPvVx8gBnvE5RYrr0BahNnkk=
github.com/blevesearch/go-porterstemmer v1.0.3 h1:GtmsqID0aZdCSNiY8SkuPJ12pD4jI+DdXTAn4YRcHCo=
@@ -160,8 +160,8 @@ github.com/blevesearch/gtreap v0.1.1 h1:2JWigFrzDMR+42WGIN/V2p0cUvn4UP3C4Q5nmaZG
github.com/blevesearch/gtreap v0.1.1/go.mod h1:QaQyDRAT51sotthUWAH4Sj08awFSSWzgYICSZ3w0tYk=
github.com/blevesearch/mmap-go v1.0.4 h1:OVhDhT5B/M1HNPpYPBKIEJaD0F3Si+CrEKULGCDPWmc=
github.com/blevesearch/mmap-go v1.0.4/go.mod h1:EWmEAOmdAS9z/pi/+Toxu99DnsbhG1TIxUoRmJw/pSs=
github.com/blevesearch/scorch_segment_api/v2 v2.3.9 h1:X6nJXnNHl7nasXW+U6y2Ns2Aw8F9STszkYkyBfQ+p0o=
github.com/blevesearch/scorch_segment_api/v2 v2.3.9/go.mod h1:IrzspZlVjhf4X29oJiEhBxEteTqOY9RlYlk1lCmYHr4=
github.com/blevesearch/scorch_segment_api/v2 v2.3.10 h1:Yqk0XD1mE0fDZAJXTjawJ8If/85JxnLd8v5vG/jWE/s=
github.com/blevesearch/scorch_segment_api/v2 v2.3.10/go.mod h1:Z3e6ChN3qyN35yaQpl00MfI5s8AxUJbpTR/DL8QOQ+8=
github.com/blevesearch/segment v0.9.1 h1:+dThDy+Lvgj5JMxhmOVlgFfkUtZV2kw49xax4+jTfSU=
github.com/blevesearch/segment v0.9.1/go.mod h1:zN21iLm7+GnBHWTao9I+Au/7MBiL8pPFtJBJTsk6kQw=
github.com/blevesearch/snowballstem v0.9.0 h1:lMQ189YspGP6sXvZQ4WZ+MLawfV8wOmPoD/iWeNXm8s=
@@ -170,18 +170,18 @@ github.com/blevesearch/upsidedown_store_api v1.0.2 h1:U53Q6YoWEARVLd1OYNc9kvhBMG
github.com/blevesearch/upsidedown_store_api v1.0.2/go.mod h1:M01mh3Gpfy56Ps/UXHjEO/knbqyQ1Oamg8If49gRwrQ=
github.com/blevesearch/vellum v1.1.0 h1:CinkGyIsgVlYf8Y2LUQHvdelgXr6PYuvoDIajq6yR9w=
github.com/blevesearch/vellum v1.1.0/go.mod h1:QgwWryE8ThtNPxtgWJof5ndPfx0/YMBh+W2weHKPw8Y=
github.com/blevesearch/zapx/v11 v11.4.1 h1:qFCPlFbsEdwbbckJkysptSQOsHn4s6ZOHL5GMAIAVHA=
github.com/blevesearch/zapx/v11 v11.4.1/go.mod h1:qNOGxIqdPC1MXauJCD9HBG487PxviTUUbmChFOAosGs=
github.com/blevesearch/zapx/v12 v12.4.1 h1:K77bhypII60a4v8mwvav7r4IxWA8qxhNjgF9xGdb9eQ=
github.com/blevesearch/zapx/v12 v12.4.1/go.mod h1:QRPrlPOzAxBNMI0MkgdD+xsTqx65zbuPr3Ko4Re49II=
github.com/blevesearch/zapx/v13 v13.4.1 h1:EnkEMZFUK0lsW/jOJJF2xOcp+W8TjEsyeN5BeAZEYYE=
github.com/blevesearch/zapx/v13 v13.4.1/go.mod h1:e6duBMlCvgbH9rkzNMnUa9hRI9F7ri2BRcHfphcmGn8=
github.com/blevesearch/zapx/v14 v14.4.1 h1:G47kGCshknBZzZAtjcnIAMn3oNx8XBLxp8DMq18ogyE=
github.com/blevesearch/zapx/v14 v14.4.1/go.mod h1:O7sDxiaL2r2PnCXbhh1Bvm7b4sP+jp4unE9DDPWGoms=
github.com/blevesearch/zapx/v15 v15.4.1 h1:B5IoTMUCEzFdc9FSQbhVOxAY+BO17c05866fNruiI7g=
github.com/blevesearch/zapx/v15 v15.4.1/go.mod h1:b/MreHjYeQoLjyY2+UaM0hGZZUajEbE0xhnr1A2/Q6Y=
github.com/blevesearch/zapx/v16 v16.2.2 h1:MifKJVRTEhMTgSlle2bDRTb39BGc9jXFRLPZc6r0Rzk=
github.com/blevesearch/zapx/v16 v16.2.2/go.mod h1:B9Pk4G1CqtErgQV9DyCSA9Lb7WZe4olYfGw7fVDZ4sk=
github.com/blevesearch/zapx/v11 v11.4.2 h1:l46SV+b0gFN+Rw3wUI1YdMWdSAVhskYuvxlcgpQFljs=
github.com/blevesearch/zapx/v11 v11.4.2/go.mod h1:4gdeyy9oGa/lLa6D34R9daXNUvfMPZqUYjPwiLmekwc=
github.com/blevesearch/zapx/v12 v12.4.2 h1:fzRbhllQmEMUuAQ7zBuMvKRlcPA5ESTgWlDEoB9uQNE=
github.com/blevesearch/zapx/v12 v12.4.2/go.mod h1:TdFmr7afSz1hFh/SIBCCZvcLfzYvievIH6aEISCte58=
github.com/blevesearch/zapx/v13 v13.4.2 h1:46PIZCO/ZuKZYgxI8Y7lOJqX3Irkc3N8W82QTK3MVks=
github.com/blevesearch/zapx/v13 v13.4.2/go.mod h1:knK8z2NdQHlb5ot/uj8wuvOq5PhDGjNYQQy0QDnopZk=
github.com/blevesearch/zapx/v14 v14.4.2 h1:2SGHakVKd+TrtEqpfeq8X+So5PShQ5nW6GNxT7fWYz0=
github.com/blevesearch/zapx/v14 v14.4.2/go.mod h1:rz0XNb/OZSMjNorufDGSpFpjoFKhXmppH9Hi7a877D8=
github.com/blevesearch/zapx/v15 v15.4.2 h1:sWxpDE0QQOTjyxYbAVjt3+0ieu8NCE0fDRaFxEsp31k=
github.com/blevesearch/zapx/v15 v15.4.2/go.mod h1:1pssev/59FsuWcgSnTa0OeEpOzmhtmr/0/11H0Z8+Nw=
github.com/blevesearch/zapx/v16 v16.2.3 h1:7Y0r+a3diEvlazsncexq1qoFOcBd64xwMS7aDm4lo1s=
github.com/blevesearch/zapx/v16 v16.2.3/go.mod h1:wVJ+GtURAaRG9KQAMNYyklq0egV+XJlGcXNCE0OFjjA=
github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw=
github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
@@ -458,8 +458,6 @@ github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXe
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 h1:gtexQ/VGyN+VVFRXSFiguSNcXmS6rkKT+X7FdIrTtfo=
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=

View File

@@ -143,7 +143,27 @@ func NewGeoShapeFieldFromBytes(name string, arrayPositions []uint64,
func NewGeoShapeFieldWithIndexingOptions(name string, arrayPositions []uint64,
coordinates [][][][]float64, typ string,
options index.FieldIndexingOptions) *GeoShapeField {
shape, encodedValue, err := geo.NewGeoJsonShape(coordinates, typ)
shape := &geojson.GeoShape{
Coordinates: coordinates,
Type: typ,
}
return NewGeoShapeFieldFromShapeWithIndexingOptions(name,
arrayPositions, shape, options)
}
func NewGeoShapeFieldFromShapeWithIndexingOptions(name string, arrayPositions []uint64,
geoShape *geojson.GeoShape, options index.FieldIndexingOptions) *GeoShapeField {
var shape index.GeoJSON
var encodedValue []byte
var err error
if geoShape.Type == geo.CircleType {
shape, encodedValue, err = geo.NewGeoCircleShape(geoShape.Center, geoShape.Radius)
} else {
shape, encodedValue, err = geo.NewGeoJsonShape(geoShape.Coordinates, geoShape.Type)
}
if err != nil {
return nil
}
@@ -158,7 +178,9 @@ func NewGeoShapeFieldWithIndexingOptions(name string, arrayPositions []uint64,
return nil
}
options = options | DefaultGeoShapeIndexingOptions
// docvalues are always enabled for geoshape fields, even if the
// indexing options are set to not include docvalues.
options = options | index.DocValues
return &GeoShapeField{
shape: shape,
@@ -174,7 +196,26 @@ func NewGeoShapeFieldWithIndexingOptions(name string, arrayPositions []uint64,
func NewGeometryCollectionFieldWithIndexingOptions(name string,
arrayPositions []uint64, coordinates [][][][][]float64, types []string,
options index.FieldIndexingOptions) *GeoShapeField {
shape, encodedValue, err := geo.NewGeometryCollection(coordinates, types)
if len(coordinates) != len(types) {
return nil
}
shapes := make([]*geojson.GeoShape, len(types))
for i := range coordinates {
shapes[i] = &geojson.GeoShape{
Coordinates: coordinates[i],
Type: types[i],
}
}
return NewGeometryCollectionFieldFromShapesWithIndexingOptions(name,
arrayPositions, shapes, options)
}
func NewGeometryCollectionFieldFromShapesWithIndexingOptions(name string,
arrayPositions []uint64, geoShapes []*geojson.GeoShape,
options index.FieldIndexingOptions) *GeoShapeField {
shape, encodedValue, err := geo.NewGeometryCollectionFromShapes(geoShapes)
if err != nil {
return nil
}
@@ -189,7 +230,9 @@ func NewGeometryCollectionFieldWithIndexingOptions(name string,
return nil
}
options = options | DefaultGeoShapeIndexingOptions
// docvalues are always enabled for geoshape fields, even if the
// indexing options are set to not include docvalues.
options = options | index.DocValues
return &GeoShapeField{
shape: shape,
@@ -205,32 +248,15 @@ func NewGeometryCollectionFieldWithIndexingOptions(name string,
func NewGeoCircleFieldWithIndexingOptions(name string, arrayPositions []uint64,
centerPoint []float64, radius string,
options index.FieldIndexingOptions) *GeoShapeField {
shape, encodedValue, err := geo.NewGeoCircleShape(centerPoint, radius)
if err != nil {
return nil
shape := &geojson.GeoShape{
Center: centerPoint,
Radius: radius,
Type: geo.CircleType,
}
// extra glue bytes to work around the term splitting logic from interfering
// the custom encoding of the geoshape coordinates inside the docvalues.
encodedValue = append(geo.GlueBytes, append(encodedValue, geo.GlueBytes...)...)
// get the byte value for the circle.
value, err := shape.Value()
if err != nil {
return nil
}
options = options | DefaultGeoShapeIndexingOptions
return &GeoShapeField{
shape: shape,
name: name,
arrayPositions: arrayPositions,
options: options,
encodedValue: encodedValue,
value: value,
numPlainTextBytes: uint64(len(value)),
}
return NewGeoShapeFieldFromShapeWithIndexingOptions(name,
arrayPositions, shape, options)
}
// GeoShape is an implementation of the index.GeoShapeField interface.

View File

@@ -109,7 +109,6 @@ func NewVectorField(name string, arrayPositions []uint64,
func NewVectorFieldWithIndexingOptions(name string, arrayPositions []uint64,
vector []float32, dims int, similarity, vectorIndexOptimizedFor string,
options index.FieldIndexingOptions) *VectorField {
options = options | DefaultVectorIndexingOptions
return &VectorField{
name: name,

View File

@@ -274,4 +274,10 @@ First, all of this geo code is a Go adaptation of the [Lucene 5.3.2 sandbox geo
- All of the APIs will use float64 for lon/lat values.
- When describing a point in function arguments or return values, we always use the order lon, lat.
- High level APIs will use TopLeft and BottomRight to describe bounding boxes. This may not map cleanly to min/max lon/lat when crossing the dateline. The lower level APIs will use min/max lon/lat and require the higher-level code to split boxes accordingly.
- High level APIs will use TopLeft and BottomRight to describe bounding boxes. This may not map cleanly to min/max lon/lat when crossing the dateline. The lower level APIs will use min/max lon/lat and require the higher-level code to split boxes accordingly.
- Points and MultiPoints may only contain Points and MultiPoints.
- LineStrings and MultiLineStrings may only contain Points and MultiPoints.
- Polygons or MultiPolygons intersecting Polygons and MultiPolygons may return arbitrary results when the overlap is only an edge or a vertex.
- Circles containing polygon will return a false positive result if all of the vertices of the polygon are within the circle, but the orientation of those points are clock-wise.
- The edges of an Envelope follows the latitude and logitude lines instead of the shortest path on a globe.
- Envelope intersecting queries with LineStrings, MultiLineStrings, Polygons and MultiPolygons implicitly converts the Envelope into a Polygon which changes the curvature of the edges causing inaccurate results for few edge cases.

View File

@@ -396,8 +396,21 @@ func (pd *pointDistance) QueryTokens(s *S2SpatialAnalyzerPlugin) []string {
// can be used later while filering the doc values.
func NewGeometryCollection(coordinates [][][][][]float64,
typs []string) (index.GeoJSON, []byte, error) {
shapes := make([]*geojson.GeoShape, len(coordinates))
for i := range coordinates {
shapes[i] = &geojson.GeoShape{
Coordinates: coordinates[i],
Type: typs[i],
}
}
return geojson.NewGeometryCollection(coordinates, typs)
return geojson.NewGeometryCollection(shapes)
}
func NewGeometryCollectionFromShapes(shapes []*geojson.GeoShape) (
index.GeoJSON, []byte, error) {
return geojson.NewGeometryCollection(shapes)
}
// NewGeoCircleShape instantiate a circle shape and

View File

@@ -20,6 +20,7 @@ import (
"strings"
"github.com/blevesearch/bleve/v2/util"
"github.com/blevesearch/geo/geojson"
)
// ExtractGeoPoint takes an arbitrary interface{} and tries it's best to
@@ -298,10 +299,15 @@ func ParseGeoShapeField(thing interface{}) (interface{}, string, error) {
return coordValue, strings.ToLower(shape), nil
}
func extractGeoShape(thing interface{}) ([][][][]float64, string, bool) {
func extractGeoShape(thing interface{}) (*geojson.GeoShape, bool) {
coordValue, typ, err := ParseGeoShapeField(thing)
if err != nil {
return nil, "", false
return nil, false
}
if typ == CircleType {
return ExtractCircle(thing)
}
return ExtractGeoShapeCoordinates(coordValue, typ)
@@ -309,13 +315,12 @@ func extractGeoShape(thing interface{}) ([][][][]float64, string, bool) {
// ExtractGeometryCollection takes an interface{} and tries it's best to
// interpret all the member geojson shapes within it.
func ExtractGeometryCollection(thing interface{}) ([][][][][]float64, []string, bool) {
func ExtractGeometryCollection(thing interface{}) ([]*geojson.GeoShape, bool) {
thingVal := reflect.ValueOf(thing)
if !thingVal.IsValid() {
return nil, nil, false
return nil, false
}
var rv [][][][][]float64
var types []string
var rv []*geojson.GeoShape
var f bool
if thingVal.Kind() == reflect.Map {
@@ -331,70 +336,74 @@ func ExtractGeometryCollection(thing interface{}) ([][][][][]float64, []string,
items := reflect.ValueOf(collection)
for j := 0; j < items.Len(); j++ {
coords, shape, found := extractGeoShape(items.Index(j).Interface())
shape, found := extractGeoShape(items.Index(j).Interface())
if found {
f = found
rv = append(rv, coords)
types = append(types, shape)
rv = append(rv, shape)
}
}
}
}
}
return rv, types, f
return rv, f
}
// ExtractCircle takes an interface{} and tries it's best to
// interpret the center point coordinates and the radius for a
// given circle shape.
func ExtractCircle(thing interface{}) ([]float64, string, bool) {
func ExtractCircle(thing interface{}) (*geojson.GeoShape, bool) {
thingVal := reflect.ValueOf(thing)
if !thingVal.IsValid() {
return nil, "", false
return nil, false
}
rv := &geojson.GeoShape{
Type: CircleType,
Center: make([]float64, 0, 2),
}
var rv []float64
var radiusStr string
if thingVal.Kind() == reflect.Map {
iter := thingVal.MapRange()
for iter.Next() {
if iter.Key().String() == "radius" {
radiusStr = iter.Value().Interface().(string)
rv.Radius = iter.Value().Interface().(string)
continue
}
if iter.Key().String() == "coordinates" {
lng, lat, found := ExtractGeoPoint(iter.Value().Interface())
if !found {
return nil, radiusStr, false
return nil, false
}
rv = append(rv, lng)
rv = append(rv, lat)
rv.Center = append(rv.Center, lng, lat)
}
}
}
return rv, radiusStr, true
return rv, true
}
// ExtractGeoShapeCoordinates takes an interface{} and tries it's best to
// interpret the coordinates for any of the given geoshape typ like
// a point, multipoint, linestring, multilinestring, polygon, multipolygon,
func ExtractGeoShapeCoordinates(coordValue interface{},
typ string) ([][][][]float64, string, bool) {
var rv [][][][]float64
typ string) (*geojson.GeoShape, bool) {
rv := &geojson.GeoShape{
Type: typ,
}
if typ == PointType {
point := extractCoordinates(coordValue)
// ignore the contents with invalid entry.
if len(point) < 2 {
return nil, typ, false
return nil, false
}
rv = [][][][]float64{{{point}}}
return rv, typ, true
rv.Coordinates = [][][][]float64{{{point}}}
return rv, true
}
if typ == MultiPointType || typ == LineStringType ||
@@ -403,19 +412,19 @@ func ExtractGeoShapeCoordinates(coordValue interface{},
// ignore the contents with invalid entry.
if len(coords) == 0 {
return nil, typ, false
return nil, false
}
if typ == EnvelopeType && len(coords) != 2 {
return nil, typ, false
return nil, false
}
if typ == LineStringType && len(coords) < 2 {
return nil, typ, false
return nil, false
}
rv = [][][][]float64{{coords}}
return rv, typ, true
rv.Coordinates = [][][][]float64{{coords}}
return rv, true
}
if typ == PolygonType || typ == MultiLineStringType {
@@ -423,33 +432,34 @@ func ExtractGeoShapeCoordinates(coordValue interface{},
// ignore the contents with invalid entry.
if len(coords) == 0 {
return nil, typ, false
return nil, false
}
if typ == PolygonType && len(coords[0]) < 3 ||
typ == MultiLineStringType && len(coords[0]) < 2 {
return nil, typ, false
return nil, false
}
rv = [][][][]float64{coords}
return rv, typ, true
rv.Coordinates = [][][][]float64{coords}
return rv, true
}
if typ == MultiPolygonType {
rv = extract4DCoordinates(coordValue)
coords := extract4DCoordinates(coordValue)
// ignore the contents with invalid entry.
if len(rv) == 0 || len(rv[0]) == 0 {
return nil, typ, false
if len(coords) == 0 || len(coords[0]) == 0 {
return nil, false
}
if len(rv[0][0]) < 3 {
return nil, typ, false
if len(coords[0][0]) < 3 {
return nil, false
}
return rv, typ, true
rv.Coordinates = coords
return rv, true
}
return rv, typ, false
return rv, false
}

View File

@@ -71,3 +71,7 @@ var EventKindPreMergeCheck = EventKind(9)
// EventKindIndexStart is fired when Index() is invoked which
// creates a new Document object from an interface using the index mapping.
var EventKindIndexStart = EventKind(10)
// EventKindPurgerCheck is fired before the purge code is invoked and decides
// whether to execute or not. For unit test purposes
var EventKindPurgerCheck = EventKind(11)

View File

@@ -81,6 +81,10 @@ OUTER:
// Retry instead of blocking/waiting here since a long wait
// can result in more segments introduced i.e. s.root will
// be updated.
// decrement the ref count since its no longer needed in this
// iteration
_ = ourSnapshot.DecRef()
continue OUTER
}
@@ -488,7 +492,11 @@ func closeNewMergedSegments(segs []segment.Segment) error {
return nil
}
func (s *Scorch) mergeSegmentBasesParallel(snapshot *IndexSnapshot, flushableObjs []*flushable) (*IndexSnapshot, []uint64, error) {
// mergeAndPersistInMemorySegments takes an IndexSnapshot and a list of in-memory segments,
// which are merged and persisted to disk concurrently. These are then introduced as
// the new root snapshot in one-shot.
func (s *Scorch) mergeAndPersistInMemorySegments(snapshot *IndexSnapshot,
flushableObjs []*flushable) (*IndexSnapshot, []uint64, error) {
atomic.AddUint64(&s.stats.TotMemMergeBeg, 1)
memMergeZapStartTime := time.Now()
@@ -507,7 +515,8 @@ func (s *Scorch) mergeSegmentBasesParallel(snapshot *IndexSnapshot, flushableObj
var em sync.Mutex
var errs []error
// deploy the workers to merge and flush the batches of segments parallely
// deploy the workers to merge and flush the batches of segments concurrently
// and create a new file segment
for i := 0; i < numFlushes; i++ {
wg.Add(1)
go func(segsBatch []segment.Segment, dropsBatch []*roaring.Bitmap, id int) {
@@ -527,6 +536,11 @@ func (s *Scorch) mergeSegmentBasesParallel(snapshot *IndexSnapshot, flushableObj
atomic.AddUint64(&s.stats.TotMemMergeErr, 1)
return
}
// to prevent accidental cleanup of this newly created file, mark it
// as ineligible for removal. this will be flipped back when the bolt
// is updated - which is valid, since the snapshot updated in bolt is
// cleaned up only if its zero ref'd (MB-66163 for more details)
s.markIneligibleForRemoval(filename)
newMergedSegmentIDs[id] = newSegmentID
newDocIDsSet[id] = newDocIDs
newMergedSegments[id], err = s.segPlugin.Open(path)
@@ -567,6 +581,8 @@ func (s *Scorch) mergeSegmentBasesParallel(snapshot *IndexSnapshot, flushableObj
atomic.StoreUint64(&s.stats.MaxMemMergeZapTime, memMergeZapTime)
}
// update the segmentMerge task with the newly merged + flushed segments which
// are to be introduced atomically.
sm := &segmentMerge{
id: newMergedSegmentIDs,
new: newMergedSegments,
@@ -575,6 +591,10 @@ func (s *Scorch) mergeSegmentBasesParallel(snapshot *IndexSnapshot, flushableObj
newCount: newMergedCount,
}
// create a history map which maps the old in-memory segments with the specific
// persister worker (also the specific file segment its going to be part of)
// which flushed it out. This map will be used on the introducer side to out-ref
// the in-memory segments and also track the new tombstones if present.
for i, flushable := range flushableObjs {
for j, idx := range flushable.sbIdxs {
ss := snapshot.segment[idx]

View File

@@ -137,8 +137,8 @@ func (o *OptimizeVR) Finish() error {
}
func (s *IndexSnapshotVectorReader) VectorOptimize(ctx context.Context,
octx index.VectorOptimizableContext) (index.VectorOptimizableContext, error) {
octx index.VectorOptimizableContext,
) (index.VectorOptimizableContext, error) {
if s.snapshot.parent.segPlugin.Version() < VectorSearchSupportedSegmentVersion {
return nil, fmt.Errorf("vector search not supported for this index, "+
"index's segment version %v, supported segment version for vector search %v",
@@ -146,8 +146,9 @@ func (s *IndexSnapshotVectorReader) VectorOptimize(ctx context.Context,
}
if octx == nil {
octx = &OptimizeVR{snapshot: s.snapshot,
vrs: make(map[string][]*IndexSnapshotVectorReader),
octx = &OptimizeVR{
snapshot: s.snapshot,
vrs: make(map[string][]*IndexSnapshotVectorReader),
}
}

View File

@@ -228,7 +228,9 @@ OUTER:
case s.introducerNotifier <- w:
}
s.removeOldData() // might as well cleanup while waiting
if ok := s.fireEvent(EventKindPurgerCheck, 0); ok {
s.removeOldData() // might as well cleanup while waiting
}
atomic.AddUint64(&s.stats.TotPersistLoopWait, 1)
@@ -296,7 +298,9 @@ func (s *Scorch) pausePersisterForMergerCatchUp(lastPersistedEpoch uint64,
// 1. Too many older snapshots awaiting the clean up.
// 2. The merger could be lagging behind on merging the disk files.
if numFilesOnDisk > uint64(po.PersisterNapUnderNumFiles) {
s.removeOldData()
if ok := s.fireEvent(EventKindPurgerCheck, 0); ok {
s.removeOldData()
}
numFilesOnDisk, _, _ = s.diskFileStats(nil)
}
@@ -481,8 +485,9 @@ func (s *Scorch) persistSnapshotMaybeMerge(snapshot *IndexSnapshot, po *persiste
return false, nil
}
// drains out (after merging in memory) the segments in the flushSet parallely
newSnapshot, newSegmentIDs, err := s.mergeSegmentBasesParallel(snapshot, flushSet)
// the newSnapshot at this point would contain the newly created file segments
// and updated with the root.
newSnapshot, newSegmentIDs, err := s.mergeAndPersistInMemorySegments(snapshot, flushSet)
if err != nil {
return false, err
}
@@ -529,7 +534,7 @@ func (s *Scorch) persistSnapshotMaybeMerge(snapshot *IndexSnapshot, po *persiste
}
}
// append to the equiv the new segment
// append to the equiv the newly merged segments
for _, segment := range newSnapshot.segment {
if _, ok := newMergedSegmentIDs[segment.id]; ok {
equiv.segment = append(equiv.segment, &SegmentSnapshot{
@@ -538,7 +543,6 @@ func (s *Scorch) persistSnapshotMaybeMerge(snapshot *IndexSnapshot, po *persiste
deleted: nil, // nil since merging handled deletions
stats: nil,
})
break
}
}
@@ -842,7 +846,7 @@ var (
)
func (s *Scorch) loadFromBolt() error {
return s.rootBolt.View(func(tx *bolt.Tx) error {
err := s.rootBolt.View(func(tx *bolt.Tx) error {
snapshots := tx.Bucket(boltSnapshotsBucket)
if snapshots == nil {
return nil
@@ -892,6 +896,16 @@ func (s *Scorch) loadFromBolt() error {
}
return nil
})
if err != nil {
return err
}
persistedSnapshots, err := s.rootBoltSnapshotMetaData()
if err != nil {
return err
}
s.checkPoints = persistedSnapshots
return nil
}
// LoadSnapshot loads the segment with the specified epoch
@@ -1113,7 +1127,10 @@ func getProtectedSnapshots(rollbackSamplingInterval time.Duration,
numSnapshotsToKeep int,
persistedSnapshots []*snapshotMetaData,
) map[uint64]time.Time {
lastPoint, protectedEpochs := getTimeSeriesSnapshots(numSnapshotsToKeep,
// keep numSnapshotsToKeep - 1 worth of time series snapshots, because we always
// must preserve the very latest snapshot in bolt as well to avoid accidental
// deletes of bolt entries and cleanups by the purger code.
lastPoint, protectedEpochs := getTimeSeriesSnapshots(numSnapshotsToKeep-1,
rollbackSamplingInterval, persistedSnapshots)
if len(protectedEpochs) < numSnapshotsToKeep {
numSnapshotsNeeded := numSnapshotsToKeep - len(protectedEpochs)
@@ -1276,7 +1293,7 @@ func (s *Scorch) removeOldZapFiles() error {
// duration. This results in all of them being purged from the boltDB
// and the next iteration of the removeOldData() would end up protecting
// latest contiguous snapshot which is a poor pattern in the rollback checkpoints.
// Hence we try to retain atleast retentionFactor portion worth of old snapshots
// Hence we try to retain atmost retentionFactor portion worth of old snapshots
// in such a scenario using the following function
func getBoundaryCheckPoint(retentionFactor float64,
checkPoints []*snapshotMetaData, timeStamp time.Time,
@@ -1284,11 +1301,13 @@ func getBoundaryCheckPoint(retentionFactor float64,
if checkPoints != nil {
boundary := checkPoints[int(math.Floor(float64(len(checkPoints))*
retentionFactor))]
if timeStamp.Sub(boundary.timeStamp) < 0 {
// too less checkPoints would be left.
if timeStamp.Sub(boundary.timeStamp) > 0 {
// return the extended boundary which will dictate the older snapshots
// to be retained
return boundary.timeStamp
}
}
return timeStamp
}
@@ -1300,7 +1319,10 @@ type snapshotMetaData struct {
func (s *Scorch) rootBoltSnapshotMetaData() ([]*snapshotMetaData, error) {
var rv []*snapshotMetaData
currTime := time.Now()
expirationDuration := time.Duration(s.numSnapshotsToKeep) * s.rollbackSamplingInterval
// including the very latest snapshot there should be n snapshots, so the
// very last one would be tc - (n-1) * d
// for eg for n = 3 the checkpoints preserved should be tc, tc - d, tc - 2d
expirationDuration := time.Duration(s.numSnapshotsToKeep-1) * s.rollbackSamplingInterval
err := s.rootBolt.View(func(tx *bolt.Tx) error {
snapshots := tx.Bucket(boltSnapshotsBucket)
@@ -1309,6 +1331,7 @@ func (s *Scorch) rootBoltSnapshotMetaData() ([]*snapshotMetaData, error) {
}
sc := snapshots.Cursor()
var found bool
// traversal order - latest -> oldest epoch
for sk, _ := sc.Last(); sk != nil; sk, _ = sc.Prev() {
_, snapshotEpoch, err := decodeUvarintAscending(sk)
if err != nil {
@@ -1358,7 +1381,6 @@ func (s *Scorch) rootBoltSnapshotMetaData() ([]*snapshotMetaData, error) {
err = nil
}
}
}
return nil
})

View File

@@ -81,6 +81,9 @@ type IndexSnapshot struct {
m2 sync.Mutex // Protects the fields that follow.
fieldTFRs map[string][]*IndexSnapshotTermFieldReader // keyed by field, recycled TFR's
m3 sync.RWMutex // bm25 metrics specific - not to interfere with TFR creation
fieldCardinality map[string]int
}
func (i *IndexSnapshot) Segments() []*SegmentSnapshot {
@@ -202,6 +205,33 @@ func (is *IndexSnapshot) newIndexSnapshotFieldDict(field string,
return rv, nil
}
func (is *IndexSnapshot) FieldCardinality(field string) (rv int, err error) {
is.m3.RLock()
rv, ok := is.fieldCardinality[field]
is.m3.RUnlock()
if ok {
return rv, nil
}
is.m3.Lock()
defer is.m3.Unlock()
if is.fieldCardinality == nil {
is.fieldCardinality = make(map[string]int)
}
// check again to avoid redundant fieldDict creation
if rv, ok := is.fieldCardinality[field]; ok {
return rv, nil
}
fd, err := is.FieldDict(field)
if err != nil {
return rv, err
}
rv = fd.Cardinality()
is.fieldCardinality[field] = rv
return rv, nil
}
func (is *IndexSnapshot) FieldDict(field string) (index.FieldDict, error) {
return is.newIndexSnapshotFieldDict(field, func(is segment.TermDictionary) segment.DictionaryIterator {
return is.AutomatonIterator(nil, nil, nil)
@@ -301,9 +331,10 @@ func (is *IndexSnapshot) fieldDictRegexp(field string,
func (is *IndexSnapshot) getLevAutomaton(term string,
fuzziness uint8,
) (vellum.Automaton, error) {
if fuzziness == 1 {
switch fuzziness {
case 1:
return lb1.BuildDfa(term, fuzziness)
} else if fuzziness == 2 {
case 2:
return lb2.BuildDfa(term, fuzziness)
}
return nil, fmt.Errorf("fuzziness exceeds the max limit")
@@ -1001,32 +1032,22 @@ func (is *IndexSnapshot) CloseCopyReader() error {
}
func (is *IndexSnapshot) ThesaurusTermReader(ctx context.Context, thesaurusName string, term []byte) (index.ThesaurusTermReader, error) {
rv := &IndexSnapshotThesaurusTermReader{}
rv.name = thesaurusName
rv.snapshot = is
if rv.postings == nil {
rv.postings = make([]segment.SynonymsList, len(is.segment))
}
if rv.iterators == nil {
rv.iterators = make([]segment.SynonymsIterator, len(is.segment))
}
rv.segmentOffset = 0
if rv.thesauri == nil {
rv.thesauri = make([]segment.Thesaurus, len(is.segment))
for i, s := range is.segment {
if synSeg, ok := s.segment.(segment.ThesaurusSegment); ok {
thes, err := synSeg.Thesaurus(thesaurusName)
if err != nil {
return nil, err
}
rv.thesauri[i] = thes
}
}
rv := &IndexSnapshotThesaurusTermReader{
name: thesaurusName,
snapshot: is,
postings: make([]segment.SynonymsList, len(is.segment)),
iterators: make([]segment.SynonymsIterator, len(is.segment)),
thesauri: make([]segment.Thesaurus, len(is.segment)),
segmentOffset: 0,
}
for i, s := range is.segment {
if _, ok := s.segment.(segment.ThesaurusSegment); ok {
if synSeg, ok := s.segment.(segment.ThesaurusSegment); ok {
thes, err := synSeg.Thesaurus(thesaurusName)
if err != nil {
return nil, err
}
rv.thesauri[i] = thes
pl, err := rv.thesauri[i].SynonymsList(term, s.deleted, rv.postings[i])
if err != nil {
return nil, err

View File

@@ -42,11 +42,15 @@ func (i *IndexSnapshotThesaurusTermReader) Size() int {
len(i.name) + size.SizeOfString
for _, postings := range i.postings {
sizeInBytes += postings.Size()
if postings != nil {
sizeInBytes += postings.Size()
}
}
for _, iterator := range i.iterators {
sizeInBytes += iterator.Size()
if iterator != nil {
sizeInBytes += iterator.Size()
}
}
return sizeInBytes
@@ -64,8 +68,8 @@ func (i *IndexSnapshotThesaurusTermReader) Next() (string, error) {
synTerm := next.Term()
return synTerm, nil
}
i.segmentOffset++
}
i.segmentOffset++
}
return "", nil
}

View File

@@ -20,7 +20,7 @@ import (
"github.com/blevesearch/bleve/v2/document"
index "github.com/blevesearch/bleve_index_api"
"github.com/blevesearch/upsidedown_store_api"
store "github.com/blevesearch/upsidedown_store_api"
)
var reflectStaticSizeIndexReader int

View File

@@ -634,7 +634,7 @@ func preSearchRequired(ctx context.Context, req *SearchRequest, m mapping.IndexM
func preSearch(ctx context.Context, req *SearchRequest, flags *preSearchFlags, indexes ...Index) (*SearchResult, error) {
// create a dummy request with a match none query
// since we only care about the preSearchData in PreSearch
var dummyQuery = req.Query
dummyQuery := req.Query
if !flags.bm25 && !flags.synonyms {
// create a dummy request with a match none query
// since we only care about the preSearchData in PreSearch
@@ -734,7 +734,8 @@ func constructBM25PreSearchData(rv map[string]map[string]interface{}, sr *Search
}
func constructPreSearchData(req *SearchRequest, flags *preSearchFlags,
preSearchResult *SearchResult, indexes []Index) (map[string]map[string]interface{}, error) {
preSearchResult *SearchResult, indexes []Index,
) (map[string]map[string]interface{}, error) {
if flags == nil || preSearchResult == nil {
return nil, fmt.Errorf("invalid input, flags: %v, preSearchResult: %v", flags, preSearchResult)
}
@@ -762,7 +763,7 @@ func preSearchDataSearch(ctx context.Context, req *SearchRequest, flags *preSear
asyncResults := make(chan *asyncSearchResult, len(indexes))
// run search on each index in separate go routine
var waitGroup sync.WaitGroup
var searchChildIndex = func(in Index, childReq *SearchRequest) {
searchChildIndex := func(in Index, childReq *SearchRequest) {
rv := asyncSearchResult{Name: in.Name()}
rv.Result, rv.Err = in.SearchInContext(ctx, childReq)
asyncResults <- &rv
@@ -827,8 +828,12 @@ func preSearchDataSearch(ctx context.Context, req *SearchRequest, flags *preSear
for indexName, indexErr := range indexErrors {
sr.Status.Errors[indexName] = indexErr
sr.Status.Total++
sr.Status.Failed++
}
// At this point, all errors have been recorded—either from the preSearch phase
// (via status.Merge) or from individual index search failures (indexErrors).
// Since partial results are not allowed, mark the entire request as failed.
sr.Status.Successful = 0
sr.Status.Failed = sr.Status.Total
} else {
prp.finalize(sr)
}
@@ -910,7 +915,6 @@ func hitsInCurrentPage(req *SearchRequest, hits []*search.DocumentMatch) []*sear
// MultiSearch executes a SearchRequest across multiple Index objects,
// then merges the results. The indexes must honor any ctx deadline.
func MultiSearch(ctx context.Context, req *SearchRequest, preSearchData map[string]map[string]interface{}, indexes ...Index) (*SearchResult, error) {
searchStart := time.Now()
asyncResults := make(chan *asyncSearchResult, len(indexes))
@@ -925,7 +929,7 @@ func MultiSearch(ctx context.Context, req *SearchRequest, preSearchData map[stri
// run search on each index in separate go routine
var waitGroup sync.WaitGroup
var searchChildIndex = func(in Index, childReq *SearchRequest) {
searchChildIndex := func(in Index, childReq *SearchRequest) {
rv := asyncSearchResult{Name: in.Name()}
rv.Result, rv.Err = in.SearchInContext(ctx, childReq)
asyncResults <- &rv

View File

@@ -59,11 +59,15 @@ const storePath = "store"
var mappingInternalKey = []byte("_mapping")
const SearchQueryStartCallbackKey = "_search_query_start_callback_key"
const SearchQueryEndCallbackKey = "_search_query_end_callback_key"
const (
SearchQueryStartCallbackKey search.ContextKey = "_search_query_start_callback_key"
SearchQueryEndCallbackKey search.ContextKey = "_search_query_end_callback_key"
)
type SearchQueryStartCallbackFn func(size uint64) error
type SearchQueryEndCallbackFn func(size uint64) error
type (
SearchQueryStartCallbackFn func(size uint64) error
SearchQueryEndCallbackFn func(size uint64) error
)
func indexStorePath(path string) string {
return path + string(os.PathSeparator) + storePath
@@ -412,10 +416,12 @@ func (i *indexImpl) Search(req *SearchRequest) (sr *SearchResult, err error) {
return i.SearchInContext(context.Background(), req)
}
var documentMatchEmptySize int
var searchContextEmptySize int
var facetResultEmptySize int
var documentEmptySize int
var (
documentMatchEmptySize int
searchContextEmptySize int
facetResultEmptySize int
documentEmptySize int
)
func init() {
var dm search.DocumentMatch
@@ -435,8 +441,8 @@ func init() {
// needed to execute a search request.
func memNeededForSearch(req *SearchRequest,
searcher search.Searcher,
topnCollector *collector.TopNCollector) uint64 {
topnCollector *collector.TopNCollector,
) uint64 {
backingSize := req.Size + req.From + 1
if req.Size+req.From > collector.PreAllocSizeSkipCap {
backingSize = collector.PreAllocSizeSkipCap + 1
@@ -509,11 +515,12 @@ func (i *indexImpl) preSearch(ctx context.Context, req *SearchRequest, reader in
return nil, err
}
for field := range fs {
dict, err := reader.FieldDict(field)
if err != nil {
return nil, err
if bm25Reader, ok := reader.(index.BM25Reader); ok {
fieldCardinality[field], err = bm25Reader.FieldCardinality(field)
if err != nil {
return nil, err
}
}
fieldCardinality[field] = dict.Cardinality()
}
}
}
@@ -560,6 +567,16 @@ func (i *indexImpl) SearchInContext(ctx context.Context, req *SearchRequest) (sr
if err != nil {
return nil, err
}
// increment the search count here itself,
// since the presearch may already satisfy
// the search request
atomic.AddUint64(&i.stats.searches, 1)
// increment the search time stat here as well,
// since presearch is part of the overall search
// operation and should be included in the search
// time stat
searchDuration := time.Since(searchStart)
atomic.AddUint64(&i.stats.searchTime, uint64(searchDuration))
return preSearchResult, nil
}
@@ -584,7 +601,7 @@ func (i *indexImpl) SearchInContext(ctx context.Context, req *SearchRequest) (sr
var fts search.FieldTermSynonymMap
var skipSynonymCollector bool
var bm25Data *search.BM25Stats
var bm25Stats *search.BM25Stats
var ok bool
if req.PreSearchData != nil {
for k, v := range req.PreSearchData {
@@ -607,9 +624,9 @@ func (i *indexImpl) SearchInContext(ctx context.Context, req *SearchRequest) (sr
}
case search.BM25PreSearchDataKey:
if v != nil {
bm25Data, ok = v.(*search.BM25Stats)
bm25Stats, ok = v.(*search.BM25Stats)
if !ok {
return nil, fmt.Errorf("bm25 preSearchData must be of type map[string]interface{}")
return nil, fmt.Errorf("bm25 preSearchData must be of type *search.BM25Stats")
}
}
}
@@ -651,10 +668,10 @@ func (i *indexImpl) SearchInContext(ctx context.Context, req *SearchRequest) (sr
ctx = context.WithValue(ctx, search.GetScoringModelCallbackKey,
search.GetScoringModelCallbackFn(scoringModelCallback))
// set the bm25 presearch data (stats important for consistent scoring) in
// set the bm25Stats (stats important for consistent scoring) in
// the context object
if bm25Data != nil {
ctx = context.WithValue(ctx, search.BM25PreSearchDataKey, bm25Data)
if bm25Stats != nil {
ctx = context.WithValue(ctx, search.BM25StatsKey, bm25Stats)
}
// This callback and variable handles the tracking of bytes read
@@ -667,8 +684,7 @@ func (i *indexImpl) SearchInContext(ctx context.Context, req *SearchRequest) (sr
totalSearchCost += bytesRead
}
ctx = context.WithValue(ctx, search.SearchIOStatsCallbackKey,
search.SearchIOStatsCallbackFunc(sendBytesRead))
ctx = context.WithValue(ctx, search.SearchIOStatsCallbackKey, search.SearchIOStatsCallbackFunc(sendBytesRead))
var bufPool *s2.GeoBufferPool
getBufferPool := func() *s2.GeoBufferPool {
@@ -679,8 +695,7 @@ func (i *indexImpl) SearchInContext(ctx context.Context, req *SearchRequest) (sr
return bufPool
}
ctx = context.WithValue(ctx, search.GeoBufferPoolCallbackKey,
search.GeoBufferPoolCallbackFunc(getBufferPool))
ctx = context.WithValue(ctx, search.GeoBufferPoolCallbackKey, search.GeoBufferPoolCallbackFunc(getBufferPool))
searcher, err := req.Query.Searcher(ctx, indexReader, i.m, search.SearcherOptions{
Explain: req.Explain,
@@ -806,7 +821,13 @@ func (i *indexImpl) SearchInContext(ctx context.Context, req *SearchRequest) (sr
totalSearchCost += storedFieldsCost
search.RecordSearchCost(ctx, search.AddM, storedFieldsCost)
atomic.AddUint64(&i.stats.searches, 1)
if req.PreSearchData == nil {
// increment the search count only if this is not a second-phase search
// (e.g., for Hybrid Search), since the first-phase search already increments it
atomic.AddUint64(&i.stats.searches, 1)
}
// increment the search time stat, as the first-phase search is part of
// the overall operation; adding second-phase time later keeps it accurate
searchDuration := time.Since(searchStart)
atomic.AddUint64(&i.stats.searchTime, uint64(searchDuration))
@@ -847,7 +868,8 @@ func (i *indexImpl) SearchInContext(ctx context.Context, req *SearchRequest) (sr
func LoadAndHighlightFields(hit *search.DocumentMatch, req *SearchRequest,
indexName string, r index.IndexReader,
highlighter highlight.Highlighter) (error, uint64) {
highlighter highlight.Highlighter,
) (error, uint64) {
var totalStoredFieldsBytes uint64
if len(req.Fields) > 0 || highlighter != nil {
doc, err := r.Document(hit.ID)
@@ -1238,7 +1260,8 @@ func (i *indexImpl) CopyTo(d index.Directory) (err error) {
}
func (f FileSystemDirectory) GetWriter(filePath string) (io.WriteCloser,
error) {
error,
) {
dir, file := filepath.Split(filePath)
if dir != "" {
err := os.MkdirAll(filepath.Join(string(f), dir), os.ModePerm)
@@ -1248,7 +1271,7 @@ func (f FileSystemDirectory) GetWriter(filePath string) (io.WriteCloser,
}
return os.OpenFile(filepath.Join(string(f), dir, file),
os.O_RDWR|os.O_CREATE, 0600)
os.O_RDWR|os.O_CREATE, 0o600)
}
func (i *indexImpl) FireIndexEvent() {

View File

@@ -52,7 +52,8 @@ type DocumentMapping struct {
}
func (dm *DocumentMapping) Validate(cache *registry.Cache,
parentName string, fieldAliasCtx map[string]*FieldMapping) error {
parentName string, fieldAliasCtx map[string]*FieldMapping,
) error {
var err error
if dm.DefaultAnalyzer != "" {
_, err := cache.AnalyzerNamed(dm.DefaultAnalyzer)
@@ -183,7 +184,8 @@ func (dm *DocumentMapping) fieldDescribedByPath(path string) *FieldMapping {
// document or for an explicitly mapped field; the closest most specific
// document mapping could be one that matches part of the provided path.
func (dm *DocumentMapping) documentMappingForPathElements(pathElements []string) (
*DocumentMapping, *DocumentMapping) {
*DocumentMapping, *DocumentMapping,
) {
var pathElementsCopy []string
if len(pathElements) == 0 {
pathElementsCopy = []string{""}
@@ -217,7 +219,8 @@ OUTER:
// document or for an explicitly mapped field; the closest most specific
// document mapping could be one that matches part of the provided path.
func (dm *DocumentMapping) documentMappingForPath(path string) (
*DocumentMapping, *DocumentMapping) {
*DocumentMapping, *DocumentMapping,
) {
pathElements := decodePath(path)
return dm.documentMappingForPathElements(pathElements)
}
@@ -457,7 +460,6 @@ func (dm *DocumentMapping) walkDocument(data interface{}, path []string, indexes
case reflect.Bool:
dm.processProperty(val.Bool(), path, indexes, context)
}
}
func (dm *DocumentMapping) processProperty(property interface{}, path []string, indexes []uint64, context *walkContext) {
@@ -483,13 +485,14 @@ func (dm *DocumentMapping) processProperty(property interface{}, path []string,
if subDocMapping != nil {
// index by explicit mapping
for _, fieldMapping := range subDocMapping.Fields {
if fieldMapping.Type == "geoshape" {
switch fieldMapping.Type {
case "geoshape":
fieldMapping.processGeoShape(property, pathString, path, indexes, context)
} else if fieldMapping.Type == "geopoint" {
case "geopoint":
fieldMapping.processGeoPoint(property, pathString, path, indexes, context)
} else if fieldMapping.Type == "vector_base64" {
case "vector_base64":
fieldMapping.processVectorBase64(property, pathString, path, indexes, context)
} else {
default:
fieldMapping.processString(propertyValueString, pathString, path, indexes, context)
}
}
@@ -568,9 +571,10 @@ func (dm *DocumentMapping) processProperty(property interface{}, path []string,
default:
if subDocMapping != nil {
for _, fieldMapping := range subDocMapping.Fields {
if fieldMapping.Type == "geopoint" {
switch fieldMapping.Type {
case "geopoint":
fieldMapping.processGeoPoint(property, pathString, path, indexes, context)
} else if fieldMapping.Type == "geoshape" {
case "geoshape":
fieldMapping.processGeoShape(property, pathString, path, indexes, context)
}
}

View File

@@ -26,6 +26,7 @@ import (
"github.com/blevesearch/bleve/v2/geo"
"github.com/blevesearch/bleve/v2/util"
index "github.com/blevesearch/bleve_index_api"
"github.com/blevesearch/geo/geojson"
)
// control the default behavior for dynamic fields (those not explicitly mapped)
@@ -231,7 +232,9 @@ func (fm *FieldMapping) Options() index.FieldIndexingOptions {
func (fm *FieldMapping) processString(propertyValueString string, pathString string, path []string, indexes []uint64, context *walkContext) {
fieldName := getFieldName(pathString, path, fm)
options := fm.Options()
if fm.Type == "text" {
switch fm.Type {
case "text":
analyzer := fm.analyzerForField(path, context)
field := document.NewTextFieldCustom(fieldName, indexes, []byte(propertyValueString), options, analyzer)
context.doc.AddField(field)
@@ -239,7 +242,7 @@ func (fm *FieldMapping) processString(propertyValueString string, pathString str
if !fm.IncludeInAll {
context.excludedFromAll = append(context.excludedFromAll, fieldName)
}
} else if fm.Type == "datetime" {
case "datetime":
dateTimeFormat := context.im.DefaultDateTimeParser
if fm.DateFormat != "" {
dateTimeFormat = fm.DateFormat
@@ -251,7 +254,7 @@ func (fm *FieldMapping) processString(propertyValueString string, pathString str
fm.processTime(parsedDateTime, layout, pathString, path, indexes, context)
}
}
} else if fm.Type == "IP" {
case "IP":
ip := net.ParseIP(propertyValueString)
if ip != nil {
fm.processIP(ip, pathString, path, indexes, context)
@@ -328,32 +331,20 @@ func (fm *FieldMapping) processIP(ip net.IP, pathString string, path []string, i
}
func (fm *FieldMapping) processGeoShape(propertyMightBeGeoShape interface{},
pathString string, path []string, indexes []uint64, context *walkContext) {
pathString string, path []string, indexes []uint64, context *walkContext,
) {
coordValue, shape, err := geo.ParseGeoShapeField(propertyMightBeGeoShape)
if err != nil {
return
}
if shape == geo.CircleType {
center, radius, found := geo.ExtractCircle(propertyMightBeGeoShape)
if shape == geo.GeometryCollectionType {
geoShapes, found := geo.ExtractGeometryCollection(propertyMightBeGeoShape)
if found {
fieldName := getFieldName(pathString, path, fm)
options := fm.Options()
field := document.NewGeoCircleFieldWithIndexingOptions(fieldName,
indexes, center, radius, options)
context.doc.AddField(field)
if !fm.IncludeInAll {
context.excludedFromAll = append(context.excludedFromAll, fieldName)
}
}
} else if shape == geo.GeometryCollectionType {
coordinates, shapes, found := geo.ExtractGeometryCollection(propertyMightBeGeoShape)
if found {
fieldName := getFieldName(pathString, path, fm)
options := fm.Options()
field := document.NewGeometryCollectionFieldWithIndexingOptions(fieldName,
indexes, coordinates, shapes, options)
field := document.NewGeometryCollectionFieldFromShapesWithIndexingOptions(fieldName,
indexes, geoShapes, options)
context.doc.AddField(field)
if !fm.IncludeInAll {
@@ -361,12 +352,20 @@ func (fm *FieldMapping) processGeoShape(propertyMightBeGeoShape interface{},
}
}
} else {
coordinates, shape, found := geo.ExtractGeoShapeCoordinates(coordValue, shape)
var geoShape *geojson.GeoShape
var found bool
if shape == geo.CircleType {
geoShape, found = geo.ExtractCircle(propertyMightBeGeoShape)
} else {
geoShape, found = geo.ExtractGeoShapeCoordinates(coordValue, shape)
}
if found {
fieldName := getFieldName(pathString, path, fm)
options := fm.Options()
field := document.NewGeoShapeFieldWithIndexingOptions(fieldName,
indexes, coordinates, shape, options)
field := document.NewGeoShapeFieldFromShapeWithIndexingOptions(fieldName,
indexes, geoShape, options)
context.doc.AddField(field)
if !fm.IncludeInAll {
@@ -401,7 +400,6 @@ func getFieldName(pathString string, path []string, fieldMapping *FieldMapping)
// UnmarshalJSON offers custom unmarshaling with optional strict validation
func (fm *FieldMapping) UnmarshalJSON(data []byte) error {
var tmp map[string]json.RawMessage
err := util.UnmarshalJSON(data, &tmp)
if err != nil {

View File

@@ -35,8 +35,7 @@ type KNNQuery struct {
BoostVal *Boost `json:"boost,omitempty"`
// see KNNRequest.Params for description
Params json.RawMessage `json:"params"`
FilterQuery Query `json:"filter,omitempty"`
Params json.RawMessage `json:"params"`
// elegibleSelector is used to filter out documents that are
// eligible for the KNN search from a pre-filter query.
elegibleSelector index.EligibleDocumentSelector

View File

@@ -86,17 +86,18 @@ func bm25ScoreMetrics(ctx context.Context, field string,
var fieldCardinality int
var err error
bm25Stats, ok := ctx.Value(search.BM25PreSearchDataKey).(*search.BM25Stats)
bm25Stats, ok := ctx.Value(search.BM25StatsKey).(*search.BM25Stats)
if !ok {
count, err = indexReader.DocCount()
if err != nil {
return 0, 0, err
}
dict, err := indexReader.FieldDict(field)
if err != nil {
return 0, 0, err
if bm25Reader, ok := indexReader.(index.BM25Reader); ok {
fieldCardinality, err = bm25Reader.FieldCardinality(field)
if err != nil {
return 0, 0, err
}
}
fieldCardinality = dict.Cardinality()
} else {
count = uint64(bm25Stats.DocCount)
fieldCardinality, ok = bm25Stats.FieldCardinality[field]
@@ -121,9 +122,9 @@ func newTermSearcherFromReader(ctx context.Context, indexReader index.IndexReade
// as a fallback case we track certain stats for tf-idf scoring
if ctx != nil {
if similaritModelCallback, ok := ctx.Value(search.
if similarityModelCallback, ok := ctx.Value(search.
GetScoringModelCallbackKey).(search.GetScoringModelCallbackFn); ok {
similarityModel = similaritModelCallback()
similarityModel = similarityModelCallback()
}
}
switch similarityModel {

View File

@@ -154,15 +154,18 @@ func ParseSearchSortString(input string) SearchSort {
} else if strings.HasPrefix(input, "+") {
input = input[1:]
}
if input == "_id" {
switch input {
case "_id":
return &SortDocID{
Desc: descending,
}
} else if input == "_score" {
case "_score":
return &SortScore{
Desc: descending,
}
}
return &SortField{
Field: input,
Desc: descending,
@@ -426,7 +429,9 @@ func (s *SortField) filterTermsByMode(terms [][]byte) string {
// prefix coded numbers with shift of 0
func (s *SortField) filterTermsByType(terms [][]byte) [][]byte {
stype := s.Type
if stype == SortFieldAuto {
switch stype {
case SortFieldAuto:
allTermsPrefixCoded := true
termsWithShiftZero := s.tmp[:0]
for _, term := range terms {
@@ -442,7 +447,7 @@ func (s *SortField) filterTermsByType(terms [][]byte) [][]byte {
terms = termsWithShiftZero
s.tmp = termsWithShiftZero[:0]
}
} else if stype == SortFieldAsNumber || stype == SortFieldAsDate {
case SortFieldAsNumber, SortFieldAsDate:
termsWithShiftZero := s.tmp[:0]
for _, term := range terms {
valid, shift := numeric.ValidPrefixCodedTermBytes(term)
@@ -453,6 +458,7 @@ func (s *SortField) filterTermsByType(terms [][]byte) [][]byte {
terms = termsWithShiftZero
s.tmp = termsWithShiftZero[:0]
}
return terms
}

View File

@@ -74,8 +74,6 @@ func MergeFieldTermLocations(dest []FieldTermLocation, matches []*DocumentMatch)
return dest
}
const SearchIOStatsCallbackKey = "_search_io_stats_callback_key"
type SearchIOStatsCallbackFunc func(uint64)
// Implementation of SearchIncrementalCostCallbackFn should handle the following messages
@@ -87,8 +85,11 @@ type SearchIOStatsCallbackFunc func(uint64)
// handled safely by the implementation.
type SearchIncrementalCostCallbackFn func(SearchIncrementalCostCallbackMsg,
SearchQueryType, uint64)
type SearchIncrementalCostCallbackMsg uint
type SearchQueryType uint
type (
SearchIncrementalCostCallbackMsg uint
SearchQueryType uint
)
const (
Term = SearchQueryType(1 << iota)
@@ -103,13 +104,59 @@ const (
DoneM
)
const SearchIncrementalCostKey = "_search_incremental_cost_key"
const QueryTypeKey = "_query_type_key"
const FuzzyMatchPhraseKey = "_fuzzy_match_phrase_key"
const IncludeScoreBreakdownKey = "_include_score_breakdown_key"
// ContextKey is used to identify the context key in the context.Context
type ContextKey string
func (c ContextKey) String() string {
return string(c)
}
const (
SearchIncrementalCostKey ContextKey = "_search_incremental_cost_key"
QueryTypeKey ContextKey = "_query_type_key"
FuzzyMatchPhraseKey ContextKey = "_fuzzy_match_phrase_key"
IncludeScoreBreakdownKey ContextKey = "_include_score_breakdown_key"
// PreSearchKey indicates whether to perform a preliminary search to gather necessary
// information which would be used in the actual search down the line.
PreSearchKey ContextKey = "_presearch_key"
// GetScoringModelCallbackKey is used to help the underlying searcher identify
// which scoring mechanism to use based on index mapping.
GetScoringModelCallbackKey ContextKey = "_get_scoring_model"
// SearchIOStatsCallbackKey is used to help the underlying searcher identify
SearchIOStatsCallbackKey ContextKey = "_search_io_stats_callback_key"
// GeoBufferPoolCallbackKey ContextKey is used to help the underlying searcher
GeoBufferPoolCallbackKey ContextKey = "_geo_buffer_pool_callback_key"
// SearchTypeKey is used to identify type of the search being performed.
//
// for consistent scoring in cases an index is partitioned/sharded (using an
// index alias), GlobalScoring helps in aggregating the necessary stats across
// all the child bleve indexes (shards/partitions) first before the actual search
// is performed, such that the scoring involved using these stats would be at a
// global level.
SearchTypeKey ContextKey = "_search_type_key"
// The following keys are used to invoke the callbacks at the start and end stages
// of optimizing the disjunction/conjunction searcher creation.
SearcherStartCallbackKey ContextKey = "_searcher_start_callback_key"
SearcherEndCallbackKey ContextKey = "_searcher_end_callback_key"
// FieldTermSynonymMapKey is used to store and transport the synonym definitions data
// to the actual search phase which would use the synonyms to perform the search.
FieldTermSynonymMapKey ContextKey = "_field_term_synonym_map_key"
// BM25StatsKey is used to store and transport the BM25 Data
// to the actual search phase which would use it to perform the search.
BM25StatsKey ContextKey = "_bm25_stats_key"
)
func RecordSearchCost(ctx context.Context,
msg SearchIncrementalCostCallbackMsg, bytes uint64) {
msg SearchIncrementalCostCallbackMsg, bytes uint64,
) {
if ctx != nil {
queryType, ok := ctx.Value(QueryTypeKey).(SearchQueryType)
if !ok {
@@ -125,52 +172,30 @@ func RecordSearchCost(ctx context.Context,
}
}
const GeoBufferPoolCallbackKey = "_geo_buffer_pool_callback_key"
// Assigning the size of the largest buffer in the pool to 24KB and
// the smallest buffer to 24 bytes. The pools are used to read a
// sequence of vertices which are always 24 bytes each.
const MaxGeoBufPoolSize = 24 * 1024
const MinGeoBufPoolSize = 24
const (
MaxGeoBufPoolSize = 24 * 1024
MinGeoBufPoolSize = 24
)
type GeoBufferPoolCallbackFunc func() *s2.GeoBufferPool
// PreSearchKey indicates whether to perform a preliminary search to gather necessary
// information which would be used in the actual search down the line.
const PreSearchKey = "_presearch_key"
// *PreSearchDataKey are used to store the data gathered during the presearch phase
// which would be use in the actual search phase.
const KnnPreSearchDataKey = "_knn_pre_search_data_key"
const SynonymPreSearchDataKey = "_synonym_pre_search_data_key"
const BM25PreSearchDataKey = "_bm25_pre_search_data_key"
// SearchTypeKey is used to identify type of the search being performed.
//
// for consistent scoring in cases an index is partitioned/sharded (using an
// index alias), GlobalScoring helps in aggregating the necessary stats across
// all the child bleve indexes (shards/partitions) first before the actual search
// is performed, such that the scoring involved using these stats would be at a
// global level.
const SearchTypeKey = "_search_type_key"
// The following keys are used to invoke the callbacks at the start and end stages
// of optimizing the disjunction/conjunction searcher creation.
const SearcherStartCallbackKey = "_searcher_start_callback_key"
const SearcherEndCallbackKey = "_searcher_end_callback_key"
// FieldTermSynonymMapKey is used to store and transport the synonym definitions data
// to the actual search phase which would use the synonyms to perform the search.
const FieldTermSynonymMapKey = "_field_term_synonym_map_key"
const (
KnnPreSearchDataKey = "_knn_pre_search_data_key"
SynonymPreSearchDataKey = "_synonym_pre_search_data_key"
BM25PreSearchDataKey = "_bm25_pre_search_data_key"
)
const GlobalScoring = "_global_scoring"
// GetScoringModelCallbackKey is used to help the underlying searcher identify
// which scoring mechanism to use based on index mapping.
const GetScoringModelCallbackKey = "_get_scoring_model"
type SearcherStartCallbackFn func(size uint64) error
type SearcherEndCallbackFn func(size uint64) error
type (
SearcherStartCallbackFn func(size uint64) error
SearcherEndCallbackFn func(size uint64) error
)
type GetScoringModelCallbackFn func() string
@@ -199,8 +224,10 @@ func (f FieldTermSynonymMap) MergeWith(fts FieldTermSynonymMap) {
// the default values are as per elastic search's implementation
// - https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-similarity.html#bm25
// - https://www.elastic.co/blog/practical-bm25-part-3-considerations-for-picking-b-and-k1-in-elasticsearch
var BM25_k1 float64 = 1.2
var BM25_b float64 = 0.75
var (
BM25_k1 float64 = 1.2
BM25_b float64 = 0.75
)
type BM25Stats struct {
DocCount float64 `json:"doc_count"`

View File

@@ -203,6 +203,9 @@ func (r *SearchRequest) UnmarshalJSON(input []byte) error {
r.KNN[i].FilterQuery = nil
} else {
r.KNN[i].FilterQuery, err = query.ParseQuery(knnReq.FilterQuery)
if err != nil {
return err
}
}
}
r.KNNOperator = temp.KNNOperator
@@ -306,6 +309,17 @@ func validateKNN(req *SearchRequest) error {
if q.K > BleveMaxK {
return fmt.Errorf("k must be less than %d", BleveMaxK)
}
// since the DefaultField is not applicable for knn,
// the field must be specified.
if q.Field == "" {
return fmt.Errorf("knn query field must be non-empty")
}
if vfq, ok := q.FilterQuery.(query.ValidatableQuery); ok {
err := vfq.Validate()
if err != nil {
return fmt.Errorf("knn filter query is invalid: %v", err)
}
}
}
switch req.KNNOperator {
case knnOperatorAnd, knnOperatorOr, "":

View File

@@ -96,6 +96,11 @@ type IndexReader interface {
Close() error
}
type BM25Reader interface {
IndexReader
FieldCardinality(field string) (int, error)
}
// CopyReader is an extended index reader for backup or online copy operations, replacing the regular index reader.
type CopyReader interface {
IndexReader

View File

@@ -19,18 +19,32 @@ import (
"strings"
index "github.com/blevesearch/bleve_index_api"
"github.com/blevesearch/geo/s1"
"github.com/blevesearch/geo/s2"
"github.com/golang/geo/s1"
)
// ------------------------------------------------------------------------
// creates a shape index with all of the given polygons
// and queries it with vertex model closed which considers
// polygon edges and vertices to be part of the polygon.
func polygonsContainsPoint(s2pgns []*s2.Polygon,
point *s2.Point) bool {
idx := s2.NewShapeIndex()
for _, s2pgn := range s2pgns {
idx.Add(s2pgn)
}
return s2.NewContainsPointQuery(idx, s2.VertexModelClosed).Contains(*point)
}
// project the point to all of the linestrings and check if
// any of the projections are equal to the point.
func polylineIntersectsPoint(pls []*s2.Polyline,
point *s2.Point) bool {
s2cell := s2.CellFromPoint(*point)
for _, pl := range pls {
if pl.IntersectsCell(s2cell) {
closest, _ := pl.Project(*point)
if closest.ApproxEqual(*point) {
return true
}
}
@@ -38,33 +52,36 @@ func polylineIntersectsPoint(pls []*s2.Polyline,
return false
}
// check if any of the polyline vertices lie inside or
// on the boundary of any of the polygons. Then check if
// any of the polylines intersect with any of the edges of
// the polygons
func polylineIntersectsPolygons(pls []*s2.Polyline,
s2pgns []*s2.Polygon) bool {
// Early exit if the polygon contains any of the line's vertices.
idx := s2.NewShapeIndex()
for _, pgn := range s2pgns {
idx.Add(pgn)
}
containsQuery := s2.NewContainsPointQuery(idx, s2.VertexModelClosed)
for _, pl := range pls {
for i := 0; i < pl.NumEdges(); i++ {
edge := pl.Edge(i)
for _, s2pgn := range s2pgns {
if s2pgn.IntersectsCell(s2.CellFromPoint(edge.V0)) ||
s2pgn.IntersectsCell(s2.CellFromPoint(edge.V1)) {
return true
}
for _, point := range *pl {
if containsQuery.Contains(point) {
return true
}
}
}
for _, pl := range pls {
for _, s2pgn := range s2pgns {
for i := 0; i < pl.NumEdges(); i++ {
for i := 0; i < s2pgn.NumEdges(); i++ {
edgeB := s2pgn.Edge(i)
latLng1 := s2.LatLngFromPoint(edgeB.V0)
latLng2 := s2.LatLngFromPoint(edgeB.V1)
pl2 := s2.PolylineFromLatLngs([]s2.LatLng{latLng1, latLng2})
for i := 0; i < s2pgn.NumEdges(); i++ {
edgeB := s2pgn.Edge(i)
latLng1 := s2.LatLngFromPoint(edgeB.V0)
latLng2 := s2.LatLngFromPoint(edgeB.V1)
pl2 := s2.PolylineFromLatLngs([]s2.LatLng{latLng1, latLng2})
if pl.Intersects(pl2) {
return true
}
if pl.Intersects(pl2) {
return true
}
}
}
@@ -73,6 +90,20 @@ func polylineIntersectsPolygons(pls []*s2.Polyline,
return false
}
// check if the point is contained within the polygon.
// polygon contains point will consider vertices to be outside
// so we create a shape index and query it instead
// s2.VertexModelClosed will not consider points on the edges, so
// behaviour there is arbitrary
func polygonIntersectsPoint(s2pgns []*s2.Polygon,
point *s2.Point) bool {
idx := s2.NewShapeIndex()
for _, pgn := range s2pgns {
idx.Add(pgn)
}
return s2.NewContainsPointQuery(idx, s2.VertexModelClosed).Contains(*point)
}
func geometryCollectionIntersectsShape(gc *GeometryCollection,
shapeIn index.GeoJSON) bool {
for _, shape := range gc.Members() {
@@ -140,31 +171,8 @@ func rectangleIntersectsWithPolygons(s2rect *s2.Rect,
func rectangleIntersectsWithLineStrings(s2rect *s2.Rect,
polylines []*s2.Polyline) bool {
// Early exit path if the envelope contains any of the linestring's vertices.
for _, pl := range polylines {
for i := 0; i < pl.NumEdges(); i++ {
edge := pl.Edge(i)
if s2rect.IntersectsCell(s2.CellFromPoint(edge.V0)) ||
s2rect.IntersectsCell(s2.CellFromPoint(edge.V1)) {
return true
}
}
}
for _, pl := range polylines {
for i := 0; i < pl.NumEdges(); i++ {
for j := 0; j < 4; j++ {
pl2 := s2.PolylineFromLatLngs([]s2.LatLng{s2rect.Vertex(j),
s2rect.Vertex((j + 1) % 4)})
if pl.Intersects(pl2) {
return true
}
}
}
}
return false
s2pgnFromRect := s2PolygonFromS2Rectangle(s2rect)
return polylineIntersectsPolygons(polylines, []*s2.Polygon{s2pgnFromRect})
}
func s2PolygonFromCoordinates(coordinates [][][]float64) *s2.Polygon {
@@ -245,20 +253,6 @@ func s2Cap(vertices []float64, radiusInMeter float64) *s2.Cap {
return &cap
}
func max(a, b float64) float64 {
if a >= b {
return a
}
return b
}
func min(a, b float64) float64 {
if a >= b {
return b
}
return a
}
func StripCoveringTerms(terms []string) []string {
rv := make([]string, 0, len(terms))
for _, term := range terms {

View File

@@ -125,16 +125,14 @@ func (p *Point) Marshal() ([]byte, error) {
func (p *Point) Intersects(other index.GeoJSON) (bool, error) {
p.init()
s2cell := s2.CellFromPoint(*p.s2point)
return checkCellIntersectsShape(&s2cell, p, other)
return checkPointIntersectsShape(p.s2point, p, other)
}
func (p *Point) Contains(other index.GeoJSON) (bool, error) {
p.init()
s2cell := s2.CellFromPoint(*p.s2point)
return checkCellContainsShape([]*s2.Cell{&s2cell}, other)
return checkPointContainsShape([]*s2.Point{p.s2point}, other)
}
func (p *Point) Coordinates() []float64 {
@@ -205,8 +203,7 @@ func (p *MultiPoint) Intersects(other index.GeoJSON) (bool, error) {
p.init()
for _, s2point := range p.s2points {
cell := s2.CellFromPoint(*s2point)
rv, err := checkCellIntersectsShape(&cell, p, other)
rv, err := checkPointIntersectsShape(s2point, p, other)
if rv && err == nil {
return rv, nil
}
@@ -217,14 +214,13 @@ func (p *MultiPoint) Intersects(other index.GeoJSON) (bool, error) {
func (p *MultiPoint) Contains(other index.GeoJSON) (bool, error) {
p.init()
s2cells := make([]*s2.Cell, 0, len(p.s2points))
for _, s2point := range p.s2points {
cell := s2.CellFromPoint(*s2point)
s2cells = append(s2cells, &cell)
rv, err := checkPointContainsShape(p.s2points, other)
if rv && err == nil {
return rv, nil
}
return checkCellContainsShape(s2cells, other)
return false, nil
}
func (p *MultiPoint) Coordinates() [][]float64 {
@@ -302,6 +298,7 @@ func (ls *LineString) Intersects(other index.GeoJSON) (bool, error) {
}
func (ls *LineString) Contains(other index.GeoJSON) (bool, error) {
ls.init()
return checkLineStringsContainsShape([]*s2.Polyline{ls.pl}, other)
}
@@ -370,6 +367,7 @@ func (p *MultiLineString) Intersects(other index.GeoJSON) (bool, error) {
}
func (p *MultiLineString) Contains(other index.GeoJSON) (bool, error) {
p.init()
return checkLineStringsContainsShape(p.pls, other)
}
@@ -737,6 +735,22 @@ func (gc *GeometryCollection) UnmarshalJSON(data []byte) error {
}
pgn.init()
gc.Shapes = append(gc.Shapes, &pgn)
case CircleType:
var cir Circle
err := jsoniter.Unmarshal(shape, &cir)
if err != nil {
return err
}
cir.init()
gc.Shapes = append(gc.Shapes, &cir)
case EnvelopeType:
var env Envelope
err := jsoniter.Unmarshal(shape, &env)
if err != nil {
return err
}
env.init()
gc.Shapes = append(gc.Shapes, &env)
}
}
@@ -760,11 +774,15 @@ func NewGeoCircle(points []float64,
if err != nil {
return nil
}
return &Circle{Typ: CircleType,
rv := &Circle{
Typ: CircleType,
Vertices: points,
Radius: radius,
radiusInMeters: r}
radiusInMeters: r,
}
rv.init()
return rv
}
func (c *Circle) Type() string {
@@ -838,7 +856,10 @@ type Envelope struct {
}
func NewGeoEnvelope(points [][]float64) index.GeoJSON {
return &Envelope{Vertices: points, Typ: EnvelopeType}
rv := &Envelope{Vertices: points, Typ: EnvelopeType}
rv.init()
return rv
}
func (e *Envelope) Type() string {
@@ -884,15 +905,13 @@ func (e *Envelope) Contains(other index.GeoJSON) (bool, error) {
//--------------------------------------------------------
// checkCellIntersectsShape checks for intersection between
// the s2cell and the shape in the document.
func checkCellIntersectsShape(cell *s2.Cell, shapeIn,
other index.GeoJSON) (bool, error) {
// checkPointIntersectsShape checks for intersection between
// the point and the shape in the document.
func checkPointIntersectsShape(point *s2.Point, shapeIn, other index.GeoJSON) (bool, error) {
// check if the other shape is a point.
if p2, ok := other.(*Point); ok {
s2cell := s2.CellFromPoint(*p2.s2point)
if cell.IntersectsCell(s2cell) {
// check if the points are equal
if point.ApproxEqual(*p2.s2point) {
return true, nil
}
@@ -901,11 +920,9 @@ func checkCellIntersectsShape(cell *s2.Cell, shapeIn,
// check if the other shape is a multipoint.
if p2, ok := other.(*MultiPoint); ok {
// check the intersection for any point in the array.
for _, point := range p2.s2points {
s2cell := s2.CellFromPoint(*point)
if cell.IntersectsCell(s2cell) {
// check if any of the points are equal
for _, p := range p2.s2points {
if point.ApproxEqual(*p) {
return true, nil
}
}
@@ -915,8 +932,8 @@ func checkCellIntersectsShape(cell *s2.Cell, shapeIn,
// check if the other shape is a polygon.
if p2, ok := other.(*Polygon); ok {
if p2.s2pgn.IntersectsCell(*cell) {
// check if the point is contained within the polygon.
if polygonsContainsPoint([]*s2.Polygon{p2.s2pgn}, point) {
return true, nil
}
@@ -925,12 +942,9 @@ func checkCellIntersectsShape(cell *s2.Cell, shapeIn,
// check if the other shape is a multipolygon.
if p2, ok := other.(*MultiPolygon); ok {
// check the intersection for any polygon in the collection.
for _, s2pgn := range p2.s2pgns {
if s2pgn.IntersectsCell(*cell) {
return true, nil
}
// check if the point is contained within any of the polygons
if polygonsContainsPoint(p2.s2pgns, point) {
return true, nil
}
return false, nil
@@ -938,13 +952,10 @@ func checkCellIntersectsShape(cell *s2.Cell, shapeIn,
// check if the other shape is a linestring.
if p2, ok := other.(*LineString); ok {
for i := 0; i < p2.pl.NumEdges(); i++ {
edge := p2.pl.Edge(i)
start := s2.CellFromPoint(edge.V0)
end := s2.CellFromPoint(edge.V1)
if cell.IntersectsCell(start) || cell.IntersectsCell(end) {
return true, nil
}
// project the point to the linestring and check if
// the projection is equal to the point.
if polylineIntersectsPoint([]*s2.Polyline{p2.pl}, point) {
return true, nil
}
return false, nil
@@ -953,15 +964,8 @@ func checkCellIntersectsShape(cell *s2.Cell, shapeIn,
// check if the other shape is a multilinestring.
if p2, ok := other.(*MultiLineString); ok {
// check the intersection for any linestring in the array.
for _, pl := range p2.pls {
for i := 0; i < pl.NumEdges(); i++ {
edge := pl.Edge(i)
start := s2.CellFromPoint(edge.V0)
end := s2.CellFromPoint(edge.V1)
if cell.IntersectsCell(start) || cell.IntersectsCell(end) {
return true, nil
}
}
if polylineIntersectsPoint(p2.pls, point) {
return true, nil
}
return false, nil
@@ -979,8 +983,10 @@ func checkCellIntersectsShape(cell *s2.Cell, shapeIn,
// check if the other shape is a circle.
if c, ok := other.(*Circle); ok {
if c.s2cap.IntersectsCell(*cell) {
// check if the point is contained within the circle
// by calculating the distance between the point and the
// center of the circle.
if c.s2cap.ContainsPoint(*point) {
return true, nil
}
@@ -989,8 +995,9 @@ func checkCellIntersectsShape(cell *s2.Cell, shapeIn,
// check if the other shape is an envelope.
if e, ok := other.(*Envelope); ok {
if e.r.IntersectsCell(*cell) {
// check if the point is contained by the envelope
// by checking if the point is within its bounds
if e.r.ContainsPoint(*point) {
return true, nil
}
@@ -1001,15 +1008,14 @@ func checkCellIntersectsShape(cell *s2.Cell, shapeIn,
" found in document", other.Type())
}
// checkCellContainsShape checks whether the given shape in
// in the document is contained with the s2cell.
func checkCellContainsShape(cells []*s2.Cell,
// checkPointContainsShape checks whether the given shape in
// in the document is approximately contained with the point.
func checkPointContainsShape(points []*s2.Point,
other index.GeoJSON) (bool, error) {
// check if the other shape is a point.
if p2, ok := other.(*Point); ok {
for _, cell := range cells {
if cell.ContainsPoint(*p2.s2point) {
for _, point := range points {
if point.ApproxEqual(*p2.s2point) {
return true, nil
}
}
@@ -1018,12 +1024,12 @@ func checkCellContainsShape(cells []*s2.Cell,
}
// check if the other shape is a multipoint, if so containment is
// checked for every point in the multipoint with every given cells.
// checked for every point in the multipoint with every given point.
if p2, ok := other.(*MultiPoint); ok {
// check the containment for every point in the collection.
lookup := make(map[int]struct{})
for _, cell := range cells {
for pos, point := range p2.s2points {
for _, qpoint := range points {
for pos, dpoint := range p2.s2points {
if _, done := lookup[pos]; done {
continue
}
@@ -1032,7 +1038,7 @@ func checkCellContainsShape(cells []*s2.Cell,
return true, nil
}
if cell.ContainsPoint(*point) {
if qpoint.ApproxEqual(*dpoint) {
lookup[pos] = struct{}{}
}
}
@@ -1065,7 +1071,6 @@ func checkLineStringsIntersectsShape(pls []*s2.Polyline, shapeIn,
if p2, ok := other.(*MultiPoint); ok {
// check the intersection for any point in the collection.
for _, point := range p2.s2points {
if polylineIntersectsPoint(pls, point) {
return true, nil
}
@@ -1134,7 +1139,9 @@ func checkLineStringsIntersectsShape(pls []*s2.Polyline, shapeIn,
for i := 0; i < pl.NumEdges(); i++ {
edge := pl.Edge(i)
distance := s2.DistanceFromSegment(centre, edge.V0, edge.V1)
return distance <= c.s2cap.Radius(), nil
if distance <= c.s2cap.Radius() {
return true, nil
}
}
}
@@ -1156,6 +1163,27 @@ func checkLineStringsIntersectsShape(pls []*s2.Polyline, shapeIn,
// points and multipoints for the linestring vertices.
func checkLineStringsContainsShape(pls []*s2.Polyline,
other index.GeoJSON) (bool, error) {
// check if the other shape is a point.
if p2, ok := other.(*Point); ok {
if polylineIntersectsPoint(pls, p2.s2point) {
return true, nil
}
return false, nil
}
// check if the other shape is a multipoint.
if p2, ok := other.(*MultiPoint); ok {
// check the containment for every point in the collection.
for _, point := range p2.s2points {
if !polylineIntersectsPoint(pls, point) {
return false, nil
}
}
return true, nil
}
return false, nil
}
@@ -1167,9 +1195,7 @@ func checkPolygonIntersectsShape(s2pgn *s2.Polygon, shapeIn,
other index.GeoJSON) (bool, error) {
// check if the other shape is a point.
if p2, ok := other.(*Point); ok {
s2cell := s2.CellFromPoint(*p2.s2point)
if s2pgn.IntersectsCell(s2cell) {
if polygonIntersectsPoint([]*s2.Polygon{s2pgn}, p2.s2point) {
return true, nil
}
@@ -1178,10 +1204,8 @@ func checkPolygonIntersectsShape(s2pgn *s2.Polygon, shapeIn,
// check if the other shape is a multipoint.
if p2, ok := other.(*MultiPoint); ok {
for _, s2point := range p2.s2points {
s2cell := s2.CellFromPoint(*s2point)
if s2pgn.IntersectsCell(s2cell) {
if polygonIntersectsPoint([]*s2.Polygon{s2pgn}, s2point) {
return true, nil
}
}
@@ -1191,7 +1215,6 @@ func checkPolygonIntersectsShape(s2pgn *s2.Polygon, shapeIn,
// check if the other shape is a polygon.
if p2, ok := other.(*Polygon); ok {
if s2pgn.Intersects(p2.s2pgn) {
return true, nil
}
@@ -1203,7 +1226,6 @@ func checkPolygonIntersectsShape(s2pgn *s2.Polygon, shapeIn,
if p2, ok := other.(*MultiPolygon); ok {
// check the intersection for any polygon in the collection.
for _, s2pgn1 := range p2.s2pgns {
if s2pgn.Intersects(s2pgn1) {
return true, nil
}
@@ -1214,7 +1236,6 @@ func checkPolygonIntersectsShape(s2pgn *s2.Polygon, shapeIn,
// check if the other shape is a linestring.
if ls, ok := other.(*LineString); ok {
if polylineIntersectsPolygons([]*s2.Polyline{ls.pl},
[]*s2.Polygon{s2pgn}) {
return true, nil
@@ -1225,7 +1246,6 @@ func checkPolygonIntersectsShape(s2pgn *s2.Polygon, shapeIn,
// check if the other shape is a multilinestring.
if mls, ok := other.(*MultiLineString); ok {
if polylineIntersectsPolygons(mls.pls, []*s2.Polygon{s2pgn}) {
return true, nil
}
@@ -1256,7 +1276,6 @@ func checkPolygonIntersectsShape(s2pgn *s2.Polygon, shapeIn,
// check if the other shape is a envelope.
if e, ok := other.(*Envelope); ok {
s2pgnInDoc := s2PolygonFromS2Rectangle(e.r)
if s2pgn.Intersects(s2pgnInDoc) {
return true, nil
@@ -1274,9 +1293,8 @@ func checkMultiPolygonContainsShape(s2pgns []*s2.Polygon,
shapeIn, other index.GeoJSON) (bool, error) {
// check if the other shape is a point.
if p2, ok := other.(*Point); ok {
for _, s2pgn := range s2pgns {
if s2pgn.ContainsPoint(*p2.s2point) {
if polygonIntersectsPoint([]*s2.Polygon{s2pgn}, p2.s2point) {
return true, nil
}
}
@@ -1287,34 +1305,22 @@ func checkMultiPolygonContainsShape(s2pgns []*s2.Polygon,
// check if the other shape is a multipoint.
if p2, ok := other.(*MultiPoint); ok {
// check the containment for every point in the collection.
pointsWithIn := make(map[int]struct{})
nextPoint:
for pointIndex, point := range p2.s2points {
idx := s2.NewShapeIndex()
for _, s2pgn := range s2pgns {
idx.Add(s2pgn)
}
for _, s2pgn := range s2pgns {
if s2pgn.ContainsPoint(*point) {
pointsWithIn[pointIndex] = struct{}{}
continue nextPoint
} else {
// double check for containment with the vertices.
for _, loop := range s2pgn.Loops() {
for i := 0; i < loop.NumVertices(); i++ {
if point.ApproxEqual(loop.Vertex(i)) {
pointsWithIn[pointIndex] = struct{}{}
continue nextPoint
}
}
}
}
for _, point := range p2.s2points {
if !s2.NewContainsPointQuery(idx, s2.VertexModelClosed).Contains(*point) {
return false, nil
}
}
return len(p2.s2points) == len(pointsWithIn), nil
return true, nil
}
// check if the other shape is a polygon.
if p2, ok := other.(*Polygon); ok {
for _, s2pgn := range s2pgns {
if s2pgn.Contains(p2.s2pgn) {
return true, nil
@@ -1328,7 +1334,6 @@ func checkMultiPolygonContainsShape(s2pgns []*s2.Polygon,
if p2, ok := other.(*MultiPolygon); ok {
// check the intersection for every polygon in the collection.
polygonsWithIn := make(map[int]struct{})
nextPolygon:
for pgnIndex, pgn := range p2.s2pgns {
for _, s2pgn := range s2pgns {
@@ -1344,7 +1349,6 @@ func checkMultiPolygonContainsShape(s2pgns []*s2.Polygon,
// check if the other shape is a linestring.
if ls, ok := other.(*LineString); ok {
if polygonsContainsLineStrings(s2pgns,
[]*s2.Polyline{ls.pl}) {
return true, nil
@@ -1385,7 +1389,6 @@ func checkMultiPolygonContainsShape(s2pgns []*s2.Polygon,
radius := c.s2cap.Radius()
for _, s2pgn := range s2pgns {
if s2pgn.ContainsPoint(cp) {
projected := s2pgn.ProjectToBoundary(&cp)
distance := projected.Distance(cp)
@@ -1423,9 +1426,7 @@ func checkCircleIntersectsShape(s2cap *s2.Cap, shapeIn,
other index.GeoJSON) (bool, error) {
// check if the other shape is a point.
if p2, ok := other.(*Point); ok {
s2cell := s2.CellFromPoint(*p2.s2point)
if s2cap.IntersectsCell(s2cell) {
if s2cap.ContainsPoint(*p2.s2point) {
return true, nil
}
@@ -1436,9 +1437,7 @@ func checkCircleIntersectsShape(s2cap *s2.Cap, shapeIn,
if p2, ok := other.(*MultiPoint); ok {
// check the intersection for any point in the collection.
for _, point := range p2.s2points {
s2cell := s2.CellFromPoint(*point)
if s2cap.IntersectsCell(s2cell) {
if s2cap.ContainsPoint(*point) {
return true, nil
}
}
@@ -1461,7 +1460,9 @@ func checkCircleIntersectsShape(s2cap *s2.Cap, shapeIn,
centerPoint := s2cap.Center()
projected := s2pgn.Project(&centerPoint)
distance := projected.Distance(centerPoint)
return distance <= s2cap.Radius(), nil
if distance <= s2cap.Radius() {
return true, nil
}
}
return false, nil
@@ -1506,7 +1507,6 @@ func checkCircleIntersectsShape(s2cap *s2.Cap, shapeIn,
// check if the other shape is a envelope.
if e, ok := other.(*Envelope); ok {
if e.r.ContainsPoint(s2cap.Center()) {
return true, nil
}
@@ -1533,7 +1533,6 @@ func checkCircleContainsShape(s2cap *s2.Cap,
shapeIn, other index.GeoJSON) (bool, error) {
// check if the other shape is a point.
if p2, ok := other.(*Point); ok {
if s2cap.ContainsPoint(*p2.s2point) {
return true, nil
}
@@ -1620,7 +1619,6 @@ func checkCircleContainsShape(s2cap *s2.Cap,
// check if the other shape is a circle.
if c, ok := other.(*Circle); ok {
if s2cap.Contains(*c.s2cap) {
return true, nil
}
@@ -1630,7 +1628,6 @@ func checkCircleContainsShape(s2cap *s2.Cap,
// check if the other shape is a envelope.
if e, ok := other.(*Envelope); ok {
for i := 0; i < 4; i++ {
if !s2cap.ContainsPoint(
s2.PointFromLatLng(e.r.Vertex(i))) {
@@ -1653,9 +1650,7 @@ func checkEnvelopeIntersectsShape(s2rect *s2.Rect, shapeIn,
other index.GeoJSON) (bool, error) {
// check if the other shape is a point.
if p2, ok := other.(*Point); ok {
s2cell := s2.CellFromPoint(*p2.s2point)
if s2rect.IntersectsCell(s2cell) {
if s2rect.ContainsPoint(*p2.s2point) {
return true, nil
}
@@ -1666,9 +1661,7 @@ func checkEnvelopeIntersectsShape(s2rect *s2.Rect, shapeIn,
if p2, ok := other.(*MultiPoint); ok {
// check the intersection for any point in the collection.
for _, point := range p2.s2points {
s2cell := s2.CellFromPoint(*point)
if s2rect.IntersectsCell(s2cell) {
if s2rect.ContainsPoint(*point) {
return true, nil
}
}
@@ -1678,7 +1671,6 @@ func checkEnvelopeIntersectsShape(s2rect *s2.Rect, shapeIn,
// check if the other shape is a polygon.
if pgn, ok := other.(*Polygon); ok {
if rectangleIntersectsWithPolygons(s2rect,
[]*s2.Polygon{pgn.s2pgn}) {
return true, nil
@@ -1699,7 +1691,6 @@ func checkEnvelopeIntersectsShape(s2rect *s2.Rect, shapeIn,
// check if the other shape is a linestring.
if ls, ok := other.(*LineString); ok {
if rectangleIntersectsWithLineStrings(s2rect,
[]*s2.Polyline{ls.pl}) {
return true, nil
@@ -1710,7 +1701,6 @@ func checkEnvelopeIntersectsShape(s2rect *s2.Rect, shapeIn,
// check if the other shape is a multilinestring.
if mls, ok := other.(*MultiLineString); ok {
if rectangleIntersectsWithLineStrings(s2rect, mls.pls) {
return true, nil
}
@@ -1738,7 +1728,6 @@ func checkEnvelopeIntersectsShape(s2rect *s2.Rect, shapeIn,
// check if the other shape is a envelope.
if e, ok := other.(*Envelope); ok {
if s2rect.Intersects(*e.r) {
return true, nil
}
@@ -1756,9 +1745,7 @@ func checkEnvelopeContainsShape(s2rect *s2.Rect, shapeIn,
other index.GeoJSON) (bool, error) {
// check if the other shape is a point.
if p2, ok := other.(*Point); ok {
s2LatLng := s2.LatLngFromPoint(*p2.s2point)
if s2rect.ContainsLatLng(s2LatLng) {
if s2rect.ContainsPoint(*p2.s2point) {
return true, nil
}
@@ -1769,9 +1756,7 @@ func checkEnvelopeContainsShape(s2rect *s2.Rect, shapeIn,
if p2, ok := other.(*MultiPoint); ok {
// check the intersection for any point in the collection.
for _, point := range p2.s2points {
s2LatLng := s2.LatLngFromPoint(*point)
if !s2rect.ContainsLatLng(s2LatLng) {
if !s2rect.ContainsPoint(*point) {
return false, nil
}
}
@@ -1781,17 +1766,14 @@ func checkEnvelopeContainsShape(s2rect *s2.Rect, shapeIn,
// check if the other shape is a polygon.
if p2, ok := other.(*Polygon); ok {
s2pgnRect := s2PolygonFromS2Rectangle(s2rect)
return s2pgnRect.Contains(p2.s2pgn), nil
return s2rect.Contains(p2.s2pgn.RectBound()), nil
}
// check if the other shape is a multipolygon.
if p2, ok := other.(*MultiPolygon); ok {
s2pgnRect := s2PolygonFromS2Rectangle(s2rect)
// check the containment for every polygon in the collection.
for _, s2pgn := range p2.s2pgns {
if !s2pgnRect.Contains(s2pgn) {
if !s2rect.Contains(s2pgn.RectBound()) {
return false, nil
}
}
@@ -1801,26 +1783,14 @@ func checkEnvelopeContainsShape(s2rect *s2.Rect, shapeIn,
// check if the other shape is a linestring.
if p2, ok := other.(*LineString); ok {
for i := 0; i < p2.pl.NumEdges(); i++ {
edge := p2.pl.Edge(i)
if !s2rect.ContainsPoint(edge.V0) ||
!s2rect.ContainsPoint(edge.V1) {
return false, nil
}
}
return true, nil
return s2rect.Contains(p2.pl.RectBound()), nil
}
// check if the other shape is a multilinestring.
if p2, ok := other.(*MultiLineString); ok {
for _, pl := range p2.pls {
for i := 0; i < pl.NumEdges(); i++ {
edge := pl.Edge(i)
if !s2rect.ContainsPoint(edge.V0) ||
!s2rect.ContainsPoint(edge.V1) {
return false, nil
}
if !s2rect.Contains(pl.RectBound()) {
return false, nil
}
}
return true, nil
@@ -1838,7 +1808,6 @@ func checkEnvelopeContainsShape(s2rect *s2.Rect, shapeIn,
// check if the other shape is a circle.
if c, ok := other.(*Circle); ok {
if s2rect.Contains(c.s2cap.RectBound()) {
return true, nil
}
@@ -1848,7 +1817,6 @@ func checkEnvelopeContainsShape(s2rect *s2.Rect, shapeIn,
// check if the other shape is a envelope.
if e, ok := other.(*Envelope); ok {
if s2rect.Contains(*e.r) {
return true, nil
}

View File

@@ -27,9 +27,24 @@ import (
var jsoniter = jsoniterator.ConfigCompatibleWithStandardLibrary
type GeoShape struct {
// Type of the shape
Type string
// Coordinates of the shape
// Used for all shapes except Circles
Coordinates [][][][]float64
// Radius of the circle
Radius string
// Center of the circle
Center []float64
}
// FilterGeoShapesOnRelation extracts the shapes in the document, apply
// the `relation` filter and confirms whether the shape in the document
// satisfies the given relation.
// satisfies the given relation.
func FilterGeoShapesOnRelation(shape index.GeoJSON, targetShapeBytes []byte,
relation string, reader **bytes.Reader, bufPool *s2.GeoBufferPool) (bool, error) {
@@ -394,26 +409,41 @@ var GlueBytes = []byte("##")
// NewGeometryCollection instantiate a geometrycollection
// and prefix the byte contents with certain glue bytes that
// can be used later while filering the doc values.
func NewGeometryCollection(coordinates [][][][][]float64,
typs []string) (index.GeoJSON, []byte, error) {
if typs == nil {
return nil, nil, fmt.Errorf("nil type information")
}
if len(typs) < len(coordinates) {
return nil, nil, fmt.Errorf("missing type information for some shapes")
}
shapes := make([]index.GeoJSON, 0, len(coordinates))
for i, vertices := range coordinates {
s, _, err := NewGeoJsonShape(vertices, typs[i])
if err != nil {
continue
func NewGeometryCollection(shapes []*GeoShape) (
index.GeoJSON, []byte, error) {
for _, shape := range shapes {
if shape == nil {
return nil, nil, fmt.Errorf("nil shape")
}
if shape.Type == CircleType && shape.Radius == "" && shape.Center == nil {
return nil, nil, fmt.Errorf("missing radius or center information for some circles")
}
if shape.Type != CircleType && shape.Coordinates == nil {
return nil, nil, fmt.Errorf("missing coordinates for some shapes")
}
}
childShapes := make([]index.GeoJSON, 0, len(shapes))
for _, shape := range shapes {
if shape.Type == CircleType {
circle, _, err := NewGeoCircleShape(shape.Center, shape.Radius)
if err != nil {
continue
}
childShapes = append(childShapes, circle)
} else {
geoShape, _, err := NewGeoJsonShape(shape.Coordinates, shape.Type)
if err != nil {
continue
}
childShapes = append(childShapes, geoShape)
}
shapes = append(shapes, s)
}
var gc GeometryCollection
gc.Typ = GeometryCollectionType
gc.Shapes = shapes
gc.Shapes = childShapes
vbytes, err := gc.Marshal()
if err != nil {
return nil, nil, err

View File

@@ -165,7 +165,8 @@ func (i Interval) ApproxEqual(other Interval) bool {
// DirectedHausdorffDistance returns the Hausdorff distance to the given interval. For two
// intervals x and y, this distance is defined as
// h(x, y) = max_{p in x} min_{q in y} d(p, q).
//
// h(x, y) = max_{p in x} min_{q in y} d(p, q).
func (i Interval) DirectedHausdorffDistance(other Interval) float64 {
if i.IsEmpty() {
return 0

View File

@@ -18,7 +18,7 @@ import (
"fmt"
"math"
"github.com/golang/geo/r1"
"github.com/blevesearch/geo/r1"
)
// Point represents a point in ℝ².

View File

@@ -20,10 +20,18 @@ import (
)
const (
// prec is the number of bits of precision to use for the Float values.
// To keep things simple, we use the maximum allowable precision on big
// values. This allows us to handle all values we expect in the s2 library.
prec = big.MaxPrec
// MaxPrec is the number of bits of precision to use for the Float values.
// To keep things simple, we match the limit used in the C++ library.
// This allows us to handle all values we expect in the s2 library.
MaxPrec = 64 << 20 // see C++'s util/math/exactfloat/exactfloat.h
// MaxExp is the maximum exponent supported. If a value has an exponent larger than
// this, it is replaced by infinity (with the appropriate sign).
MaxExp = 200 * 1000 * 1000 // About 10**(60 million)
// MinExp is the minimum exponent supported. If a value has an exponent less than
// this, it is replaced by zero (with the appropriate sign).
MinExp = -MaxExp // About 10**(-60 million)
)
// define some commonly referenced values.
@@ -37,28 +45,28 @@ var (
// are integer multiples of integer powers of 2.
func precStr(s string) *big.Float {
// Explicitly ignoring the bool return for this usage.
f, _ := new(big.Float).SetPrec(prec).SetString(s)
f, _ := new(big.Float).SetPrec(MaxPrec).SetString(s)
return f
}
func precInt(i int64) *big.Float {
return new(big.Float).SetPrec(prec).SetInt64(i)
return new(big.Float).SetPrec(MaxPrec).SetInt64(i)
}
func precFloat(f float64) *big.Float {
return new(big.Float).SetPrec(prec).SetFloat64(f)
return new(big.Float).SetPrec(MaxPrec).SetFloat64(f)
}
func precAdd(a, b *big.Float) *big.Float {
return new(big.Float).SetPrec(prec).Add(a, b)
return new(big.Float).SetPrec(MaxPrec).Add(a, b)
}
func precSub(a, b *big.Float) *big.Float {
return new(big.Float).SetPrec(prec).Sub(a, b)
return new(big.Float).SetPrec(MaxPrec).Sub(a, b)
}
func precMul(a, b *big.Float) *big.Float {
return new(big.Float).SetPrec(prec).Mul(a, b)
return new(big.Float).SetPrec(MaxPrec).Mul(a, b)
}
// PreciseVector represents a point in ℝ³ using high-precision values.
@@ -196,3 +204,8 @@ func (v PreciseVector) SmallestComponent() Axis {
}
return ZAxis
}
// IsZero reports if this vector is exactly 0 efficiently.
func (v PreciseVector) IsZero() bool {
return v.X.Sign() == 0 && v.Y.Sign() == 0 && v.Z.Sign() == 0
}

View File

@@ -18,7 +18,7 @@ import (
"fmt"
"math"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/s1"
)
// Vector represents a point in ℝ³.
@@ -68,14 +68,16 @@ func (v Vector) Sub(ov Vector) Vector { return Vector{v.X - ov.X, v.Y - ov.Y, v.
func (v Vector) Mul(m float64) Vector { return Vector{m * v.X, m * v.Y, m * v.Z} }
// Dot returns the standard dot product of v and ov.
func (v Vector) Dot(ov Vector) float64 { return v.X*ov.X + v.Y*ov.Y + v.Z*ov.Z }
func (v Vector) Dot(ov Vector) float64 {
return float64(v.X*ov.X) + float64(v.Y*ov.Y) + float64(v.Z*ov.Z)
}
// Cross returns the standard cross product of v and ov.
func (v Vector) Cross(ov Vector) Vector {
return Vector{
v.Y*ov.Z - v.Z*ov.Y,
v.Z*ov.X - v.X*ov.Z,
v.X*ov.Y - v.Y*ov.X,
float64(v.Y*ov.Z) - float64(v.Z*ov.Y),
float64(v.Z*ov.X) - float64(v.X*ov.Z),
float64(v.X*ov.Y) - float64(v.Y*ov.X),
}
}
@@ -100,13 +102,13 @@ const (
// Ortho returns a unit vector that is orthogonal to v.
// Ortho(-v) = -Ortho(v) for all v.
func (v Vector) Ortho() Vector {
ov := Vector{0.012, 0.0053, 0.00457}
ov := Vector{}
switch v.LargestComponent() {
case XAxis:
ov.Z = 1
case YAxis:
ov.X = 1
default:
case ZAxis:
ov.Y = 1
}
return v.Cross(ov).Normalize()
@@ -146,9 +148,9 @@ func (v Vector) SmallestComponent() Axis {
// Cmp compares v and ov lexicographically and returns:
//
// -1 if v < ov
// 0 if v == ov
// +1 if v > ov
// -1 if v < ov
// 0 if v == ov
// +1 if v > ov
//
// This method is based on C++'s std::lexicographical_compare. Two entities
// are compared element by element with the given operator. The first mismatch

View File

@@ -26,26 +26,26 @@ import (
//
// The following conversions between degrees and radians are exact:
//
// Degree*180 == Radian*math.Pi
// Degree*(180/n) == Radian*(math.Pi/n) for n == 0..8
// Degree*180 == Radian*math.Pi
// Degree*(180/n) == Radian*(math.Pi/n) for n == 0..8
//
// These identities hold when the arguments are scaled up or down by any power
// of 2. Some similar identities are also true, for example,
//
// Degree*60 == Radian*(math.Pi/3)
// Degree*60 == Radian*(math.Pi/3)
//
// But be aware that this type of identity does not hold in general. For example,
//
// Degree*3 != Radian*(math.Pi/60)
// Degree*3 != Radian*(math.Pi/60)
//
// Similarly, the conversion to radians means that (Angle(x)*Degree).Degrees()
// does not always equal x. For example,
//
// (Angle(45*n)*Degree).Degrees() == 45*n for n == 0..8
// (Angle(45*n)*Degree).Degrees() == 45*n for n == 0..8
//
// but
//
// (60*Degree).Degrees() != 60
// (60*Degree).Degrees() != 60
//
// When testing for equality, you should allow for numerical errors (ApproxEqual)
// or convert to discrete E5/E6/E7 values first.

View File

@@ -29,7 +29,7 @@ import (
// There are several different ways to measure this error, including the
// representational error (i.e., how accurately ChordAngle can represent
// angles near π radians), the conversion error (i.e., how much precision is
// lost when an Angle is converted to an ChordAngle), and the measurement
// lost when an Angle is converted to a ChordAngle), and the measurement
// error (i.e., how accurate the ChordAngle(a, b) constructor is when the
// points A and B are separated by angles close to π radians). All of these
// errors differ by a small constant factor.
@@ -39,43 +39,43 @@ import (
// radians, i.e. A and B are within "x" radians of being antipodal. The
// corresponding chord length is
//
// r = 2 * sin((π - x) / 2) = 2 * cos(x / 2)
// r = 2 * sin((π - x) / 2) = 2 * cos(x / 2)
//
// For values of x not close to π the relative error in the squared chord
// length is at most 4.5 * dblEpsilon (see MaxPointError below).
// The relative error in "r" is thus at most 2.25 * dblEpsilon ~= 5e-16. To
// convert this error into an equivalent angle, we have
//
// |dr / dx| = sin(x / 2)
// |dr / dx| = sin(x / 2)
//
// and therefore
//
// |dx| = dr / sin(x / 2)
// = 5e-16 * (2 * cos(x / 2)) / sin(x / 2)
// = 1e-15 / tan(x / 2)
// |dx| = dr / sin(x / 2)
// = 5e-16 * (2 * cos(x / 2)) / sin(x / 2)
// = 1e-15 / tan(x / 2)
//
// The maximum error is attained when
//
// x = |dx|
// = 1e-15 / tan(x / 2)
// ~= 1e-15 / (x / 2)
// ~= sqrt(2e-15)
// x = |dx|
// = 1e-15 / tan(x / 2)
// ~= 1e-15 / (x / 2)
// ~= sqrt(2e-15)
//
// In summary, the measurement error for an angle (π - x) is at most
//
// dx = min(1e-15 / tan(x / 2), sqrt(2e-15))
// (~= min(2e-15 / x, sqrt(2e-15)) when x is small)
// dx = min(1e-15 / tan(x / 2), sqrt(2e-15))
// (~= min(2e-15 / x, sqrt(2e-15)) when x is small)
//
// On the Earth's surface (assuming a radius of 6371km), this corresponds to
// the following worst-case measurement errors:
//
// Accuracy: Unless antipodal to within:
// --------- ---------------------------
// 6.4 nanometers 10,000 km (90 degrees)
// 1 micrometer 81.2 kilometers
// 1 millimeter 81.2 meters
// 1 centimeter 8.12 meters
// 28.5 centimeters 28.5 centimeters
// Accuracy: Unless antipodal to within:
// --------- ---------------------------
// 6.4 nanometers 10,000 km (90 degrees)
// 1 micrometer 81.2 kilometers
// 1 millimeter 81.2 meters
// 1 centimeter 8.12 meters
// 28.5 centimeters 28.5 centimeters
//
// The representational and conversion errors referred to earlier are somewhat
// smaller than this. For example, maximum distance between adjacent
@@ -84,11 +84,11 @@ import (
// r^2 = 4 * (1 - dblEpsilon / 2). Thus r = 2 * (1 - dblEpsilon / 4) and
// the angle between these two representable values is
//
// x = 2 * acos(r / 2)
// = 2 * acos(1 - dblEpsilon / 4)
// ~= 2 * asin(sqrt(dblEpsilon / 2)
// ~= sqrt(2 * dblEpsilon)
// ~= 2.1e-8
// x = 2 * acos(r / 2)
// = 2 * acos(1 - dblEpsilon / 4)
// ~= 2 * asin(sqrt(dblEpsilon / 2)
// ~= sqrt(2 * dblEpsilon)
// ~= 2.1e-8
//
// which is 13.5 cm on the Earth's surface.
//
@@ -97,11 +97,11 @@ import (
// r^2 = (4 * (1 - dblEpsilon / 4)), thus r = 2 * (1 - dblEpsilon / 8) and
// the worst case rounding error is
//
// x = 2 * acos(r / 2)
// = 2 * acos(1 - dblEpsilon / 8)
// ~= 2 * asin(sqrt(dblEpsilon / 4)
// ~= sqrt(dblEpsilon)
// ~= 1.5e-8
// x = 2 * acos(r / 2)
// = 2 * acos(1 - dblEpsilon / 8)
// ~= 2 * asin(sqrt(dblEpsilon / 4)
// ~= sqrt(dblEpsilon)
// ~= 1.5e-8
//
// which is 9.5 cm on the Earth's surface.
type ChordAngle float64
@@ -148,8 +148,9 @@ func ChordAngleFromSquaredLength(length2 float64) ChordAngle {
// Expanded returns a new ChordAngle that has been adjusted by the given error
// bound (which can be positive or negative). Error should be the value
// returned by either MaxPointError or MaxAngleError. For example:
// a := ChordAngleFromPoints(x, y)
// a1 := a.Expanded(a.MaxPointError())
//
// a := ChordAngleFromPoints(x, y)
// a1 := a.Expanded(a.MaxPointError())
func (c ChordAngle) Expanded(e float64) ChordAngle {
// If the angle is special, don't change it. Otherwise clamp it to the valid range.
if c.isSpecial() {
@@ -163,7 +164,7 @@ func (c ChordAngle) Angle() Angle {
if c < 0 {
return -1 * Radian
}
if c.isInf() {
if c.IsInfinity() {
return InfAngle()
}
return Angle(2 * math.Asin(0.5*math.Sqrt(float64(c))))
@@ -176,14 +177,14 @@ func InfChordAngle() ChordAngle {
return ChordAngle(math.Inf(1))
}
// isInf reports whether this ChordAngle is infinite.
func (c ChordAngle) isInf() bool {
// IsInfinity reports whether this ChordAngle is infinite.
func (c ChordAngle) IsInfinity() bool {
return math.IsInf(float64(c), 1)
}
// isSpecial reports whether this ChordAngle is one of the special cases.
func (c ChordAngle) isSpecial() bool {
return c < 0 || c.isInf()
return c < 0 || c.IsInfinity()
}
// isValid reports whether this ChordAngle is valid or not.
@@ -195,9 +196,10 @@ func (c ChordAngle) isValid() bool {
// This can be used to convert a "<" comparison to a "<=" comparison.
//
// Note the following special cases:
// NegativeChordAngle.Successor == 0
// StraightChordAngle.Successor == InfChordAngle
// InfChordAngle.Successor == InfChordAngle
//
// NegativeChordAngle.Successor == 0
// StraightChordAngle.Successor == InfChordAngle
// InfChordAngle.Successor == InfChordAngle
func (c ChordAngle) Successor() ChordAngle {
if c >= maxLength2 {
return InfChordAngle()
@@ -211,9 +213,10 @@ func (c ChordAngle) Successor() ChordAngle {
// Predecessor returns the largest representable ChordAngle less than this one.
//
// Note the following special cases:
// InfChordAngle.Predecessor == StraightChordAngle
// ChordAngle(0).Predecessor == NegativeChordAngle
// NegativeChordAngle.Predecessor == NegativeChordAngle
//
// InfChordAngle.Predecessor == StraightChordAngle
// ChordAngle(0).Predecessor == NegativeChordAngle
// NegativeChordAngle.Predecessor == NegativeChordAngle
func (c ChordAngle) Predecessor() ChordAngle {
if c <= 0 {
return NegativeChordAngle

View File

@@ -35,8 +35,9 @@ import (
// of normal intervals are in the range (-π, π]. We normalize the latter to
// the former in IntervalFromEndpoints. However, we take advantage of the point
// -π to construct two special intervals:
// The full interval is [-π, π]
// The empty interval is [π, -π].
//
// The full interval is [-π, π]
// The empty interval is [π, -π].
//
// Treat the exported fields as read-only.
type Interval struct {
@@ -414,7 +415,9 @@ func (i Interval) ComplementCenter() float64 {
// DirectedHausdorffDistance returns the Hausdorff distance to the given interval.
// For two intervals i and y, this distance is defined by
// h(i, y) = max_{p in i} min_{q in y} d(p, q),
//
// h(i, y) = max_{p in i} min_{q in y} d(p, q),
//
// where d(.,.) is measured along S1.
func (i Interval) DirectedHausdorffDistance(y Interval) Angle {
if y.ContainsInterval(i) {

35
vendor/github.com/blevesearch/geo/s2/builder.go generated vendored Normal file
View File

@@ -0,0 +1,35 @@
// Copyright 2023 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
const (
// maxEdgeDeviationRatio is set so that MaxEdgeDeviation will be large enough
// compared to snapRadius such that edge splitting is rare.
//
// Using spherical trigonometry, if the endpoints of an edge of length L
// move by at most a distance R, the center of the edge moves by at most
// asin(sin(R) / cos(L / 2)). Thus the (MaxEdgeDeviation / SnapRadius)
// ratio increases with both the snap radius R and the edge length L.
//
// We arbitrarily limit the edge deviation to be at most 10% more than the
// snap radius. With the maximum allowed snap radius of 70 degrees, this
// means that edges up to 30.6 degrees long are never split. For smaller
// snap radii, edges up to 49 degrees long are never split. (Edges of any
// length are not split unless their endpoints move far enough so that the
// actual edge deviation exceeds the limit; in practice, splitting is rare
// even with long edges.) Note that it is always possible to split edges
// when MaxEdgeDeviation is exceeded.
maxEdgeDeviationRatio = 1.1
)

505
vendor/github.com/blevesearch/geo/s2/builder_snapper.go generated vendored Normal file
View File

@@ -0,0 +1,505 @@
// Copyright 2023 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
import (
"math"
"github.com/blevesearch/geo/s1"
)
// A Snapper restricts the locations of the output vertices. For
// example, there are predefined snap functions that require vertices to be
// located at CellID centers or at E5/E6/E7 coordinates. The Snapper
// can also specify a minimum spacing between vertices (i.e. the snap radius).
//
// A Snapper defines the following methods:
//
// 1. The SnapPoint method, which snaps a point P to a nearby point (the
// candidate snap site). Any point may be returned, including P
// itself (the identity snap function).
//
// 2. SnapRadius, the maximum distance that vertices can move when
// snapped. The snapRadius must be at least as large as the maximum
// distance between P and SnapPoint(P) for any point P.
//
// Note that the maximum distance that edge interiors can move when
// snapped is slightly larger than "snapRadius", and is reported by
// the Builder options maxEdgeDeviation (see there for details).
//
// 3. MinVertexSeparation, the guaranteed minimum distance between
// vertices in the output. This is generally a fraction of
// snapRadius where the fraction depends on the snap function.
//
// 4. A MinEdgeVertexSeparation, the guaranteed minimum distance
// between edges and non-incident vertices in the output. This is
// generally a fraction of snapRadius where the fraction depends on
// the snap function.
//
// It is important to note that SnapPoint does not define the actual
// mapping from input vertices to output vertices, since the points it
// returns (the candidate snap sites) are further filtered to ensure that
// they are separated by at least the snap radius. For example, if you
// specify E7 coordinates (2cm resolution) and a snap radius of 10m, then a
// subset of points returned by SnapPoint will be chosen (the snap sites),
// and each input vertex will be mapped to the closest site. Therefore you
// cannot assume that P is necessarily snapped to SnapPoint(P).
//
// Builder makes the following guarantees (within a small error margin):
//
// 1. Every vertex is at a location returned by SnapPoint.
//
// 2. Vertices are within snapRadius of the corresponding input vertex.
//
// 3. Edges are within maxEdgeDeviation of the corresponding input edge
// (a distance slightly larger than snapRadius).
//
// 4. Vertices are separated by at least minVertexSeparation
// (a fraction of snapRadius that depends on the snap function).
//
// 5. Edges and non-incident vertices are separated by at least
// minEdgeVertexSeparation (a fraction of snapRadius).
//
// 6. Vertex and edge locations do not change unless one of the conditions
// above is not already met (idempotency / stability).
//
// 7. The topology of the input geometry is preserved (up to the creation
// of degeneracies). This means that there exists a continuous
// deformation from the input to the output such that no vertex
// crosses an edge.
type Snapper interface {
// SnapRadius reports the maximum distance that vertices can move when
// snapped. The snap radius can be any value between 0 and maxSnapRadius.
//
// If the snap radius is zero, then vertices are snapped together only if
// they are identical. Edges will not be snapped to any vertices other
// than their endpoints, even if there are vertices whose distance to the
// edge is zero, unless split_crossing_edges() is true (see below).
SnapRadius() s1.Angle
// TODO(rsned): Add SetSnapRadius method to allow users to update value.
// MinVertexSeparation returns the guaranteed minimum distance between
// vertices in the output. This is generally some fraction of SnapRadius.
MinVertexSeparation() s1.Angle
// MinEdgeVertexSeparation returns the guaranteed minimum spacing between
// edges and non-incident vertices in the output. This is generally some
// fraction of SnapRadius.
MinEdgeVertexSeparation() s1.Angle
// SnapPoint returns a candidate snap site for the given point. The final vertex
// locations are a subset of the snap sites returned by this function
// (spaced at least MinVertexSeparation apart).
//
// The only requirement is that SnapPoint(x) must return a point whose
// distance from x is no greater than SnapRadius.
SnapPoint(point Point) Point
}
// Ensure the various snapping function types satisfy the interface.
var (
_ Snapper = IdentitySnapper{}
_ Snapper = CellIDSnapper{}
_ Snapper = IntLatLngSnapper{}
)
// maxSnapRadius defines the maximum supported snap radius (equivalent to about 7800km).
// This value can't be larger than 85.7 degrees without changing the code
// related to minEdgeLengthToSplitChordAngle, and increasing it to 90 degrees
// or more would most likely require significant changes to the algorithm.
const maxSnapRadius = 70 * s1.Degree
// IdentitySnapper is a Snapper that snaps every vertex to itself.
// It should be used when vertices do not need to be snapped to a discrete set
// of locations (such as E7 lat/lngs), or when maximum accuracy is desired.
//
// If the snapRadius is zero, then all input vertices are preserved
// exactly. Otherwise, Builder merges nearby vertices to ensure that no
// vertex pair is closer than snapRadius. Furthermore, vertices are
// separated from non-incident edges by at least MinEdgeVertexSeparation,
// equal to (0.5 * snapRadius). For example, if the snapRadius is 1km, then
// vertices will be separated from non-incident edges by at least 500m.
type IdentitySnapper struct {
snapRadius s1.Angle
}
// NewIdentitySnapper returns an IdentitySnapper with the given snap radius.
func NewIdentitySnapper(snapRadius s1.Angle) IdentitySnapper {
return IdentitySnapper{
snapRadius: snapRadius,
}
}
// SnapRadius returns this types snapping radius.
func (sf IdentitySnapper) SnapRadius() s1.Angle {
return sf.snapRadius
}
// MinVertexSeparation returns the minimum vertex separation for this snap type.
func (sf IdentitySnapper) MinVertexSeparation() s1.Angle {
// Since SnapFunction does not move the input point, output vertices are
// separated by the full snapRadius.
return sf.snapRadius
}
// MinEdgeVertexSeparation returns the minimum edge vertex separation.
// For the identity snap function, edges are separated from all non-incident
// vertices by at least 0.5 * snapRadius.
func (sf IdentitySnapper) MinEdgeVertexSeparation() s1.Angle {
// In the worst case configuration, the edge-vertex separation is half of the
// vertex separation.
return 0.5 * sf.snapRadius
}
// SnapPoint snaps the given point to the appropriate level for this type.
func (sf IdentitySnapper) SnapPoint(point Point) Point {
return point
}
// CellIDSnapper is a type that snaps vertices to CellID centers. This can
// be useful if you want to encode your geometry compactly for example. You can
// snap to the centers of cells at any level.
//
// Every snap level has a corresponding minimum snap radius, which is simply
// the maximum distance that a vertex can move when snapped. It is
// approximately equal to half of the maximum diagonal length for cells at the
// chosen level. You can also set the snap radius to a larger value; for
// example, you could snap to the centers of leaf cells (1cm resolution) but
// set the snapRadius to 10m. This would result in significant extra
// simplification, without moving vertices unnecessarily (i.e., vertices that
// are at least 10m away from all other vertices will move by less than 1cm).
type CellIDSnapper struct {
level int
snapRadius s1.Angle
}
// TODO(rsned): Add SetLevel method to allow changes to the type.
// NewCellIDSnapper returns a snap function with the default level set.
func NewCellIDSnapper() CellIDSnapper {
return CellIDSnapper{
level: MaxLevel,
}
}
// CellIDSnapperForLevel returns a snap function at the given level.
func CellIDSnapperForLevel(level int) CellIDSnapper {
sf := CellIDSnapper{
level: level,
}
sf.snapRadius = sf.minSnapRadiusForLevel(level)
return sf
}
// SnapRadius reports the maximum distance that vertices can move when snapped.
// This requires that SnapRadius <= maxSnapRadius
func (sf CellIDSnapper) SnapRadius() s1.Angle {
return sf.snapRadius
}
// minSnapRadiusForLevel returns the minimum allowable snap radius for the given level
// (approximately equal to half of the maximum cell diagonal length).
func (sf CellIDSnapper) minSnapRadiusForLevel(level int) s1.Angle {
// snapRadius needs to be an upper bound on the true distance that a
// point can move when snapped, taking into account numerical errors.
//
// The maximum error when converting from a Point to a CellID is
// MaxDiagMetric.Deriv * dblEpsilon. The maximum error when converting a
// CellID center back to a Point is 1.5 * dblEpsilon. These add up to
// just slightly less than 4 * dblEpsilon.
return s1.Angle(0.5*MaxDiagMetric.Value(level) + 4*dblEpsilon)
}
// levelForMaxSnapRadius reports the minimum Cell level (i.e., largest Cells) such
// that vertices will not move by more than snapRadius. This can be useful
// when choosing an appropriate level to snap to. The return value is
// always a valid level (out of range values are silently clamped).
//
// If you want to choose the snap level based on a distance, and then use
// the minimum possible snap radius for the chosen level, do this:
//
// sf := CellIDSnapperForLevel(f.levelForMaxSnapRadius(distance));
//
// TODO(rsned): pop this method out to standalone.
func (sf CellIDSnapper) levelForMaxSnapRadius(snapRadius s1.Angle) int {
// When choosing a level, we need to account for the error bound of
// 4 * dblEpsilon that is added by MinSnapRadiusForLevel.
return MaxDiagMetric.MinLevel(2 * (snapRadius.Radians() - 4*dblEpsilon))
}
// MinVertexSeparation reports the minimum separation between vertices depending
// on level and snapRadius. It can vary between 0.5 * snapRadius and snapRadius.
func (sf CellIDSnapper) MinVertexSeparation() s1.Angle {
// We have three different bounds for the minimum vertex separation: one is
// a constant bound, one is proportional to snapRadius, and one is equal to
// snapRadius minus a constant. These bounds give the best results for
// small, medium, and large snap radii respectively. We return the maximum
// of the three bounds.
//
// 1. Constant bound: Vertices are always separated by at least
// MinEdgeMetric.Value(level), the minimum edge length for the chosen
// snap level.
//
// 2. Proportional bound: It can be shown that in the plane, the worst-case
// configuration has a vertex separation of 2 / sqrt(13) * snapRadius.
// This is verified in the unit test, except that on the sphere the ratio
// is slightly smaller at cell level 2 (0.54849 vs. 0.55470). We reduce
// that value a bit more below to be conservative.
//
// 3. Best asymptotic bound: This bound is derived by observing we
// only select a new site when it is at least snapRadius away from all
// existing sites, and the site can move by at most
// 0.5 * MaxDiagMetric.Value(level) when snapped.
minEdge := s1.Angle(MinEdgeMetric.Value(sf.level))
maxDiag := s1.Angle(MaxDiagMetric.Value(sf.level))
return maxAngle(minEdge,
// per 2 above, a little less than 2 / sqrt(13)
maxAngle(0.548*sf.snapRadius,
sf.snapRadius-0.5*maxDiag))
}
// MinEdgeVertexSeparation returns the guaranteed minimum spacing between
// edges and non-incident vertices in the output depending on level and snapRadius..
// It can be as low as 0.219 * snapRadius, but is typically 0.5 * snapRadius
// or more.
func (sf CellIDSnapper) MinEdgeVertexSeparation() s1.Angle {
// Similar to MinVertexSeparation, in this case we have four bounds: a
// constant bound that holds only at the minimum snap radius, a constant
// bound that holds for any snap radius, a bound that is proportional to
// snapRadius, and a bound that is equal to snapRadius minus a constant.
//
// 1. Constant bounds:
// (a) At the minimum snap radius for a given level, it can be shown
// that vertices are separated from edges by at least 0.5 *a
// MinDiagMetric.Value(level) in the plane. The unit test verifies this,
// except that on the sphere the worst case is slightly better:
// 0.5652980068 * MinDiagMetric.Value(level).
//
// (b) Otherwise, for arbitrary snap radii the worst-case configuration
// in the plane has an edge-vertex separation of sqrt(3/19) *
// MinDiagMetric.Value(level), where sqrt(3/19) is about 0.3973597071.
// The unit test verifies that the bound is slightly better on the sphere:
// 0.3973595687 * MinDiagMetric.Value(level).
//
// 2. Proportional bound: In the plane, the worst-case configuration has an
// edge-vertex separation of 2 * sqrt(3/247) * snapRadius, which is
// about 0.2204155075. The unit test verifies this, except that on the
// sphere the bound is slightly worse for certain large Cells: the
// minimum ratio occurs at cell level 6, and is about 0.2196666953.
//
// 3. Best asymptotic bound: If snapRadius is large compared to the
// minimum snap radius, then the best bound is achieved by 3 sites on a
// circular arc of radius snapRadius, spaced MinVertexSeparation
// apart. An input edge passing just to one side of the center of the
// circle intersects the Voronoi regions of the two end sites but not the
// Voronoi region of the center site, and gives an edge separation of
// (MinVertexSeparation ** 2) / (2 * snapRadius). This bound
// approaches 0.5 * snapRadius for large snap radii, i.e. the minimum
// edge-vertex separation approaches half of the minimum vertex
// separation as the snap radius becomes large compared to the cell size.
minDiag := s1.Angle(MinDiagMetric.Value(sf.level))
if sf.snapRadius == sf.minSnapRadiusForLevel(sf.level) {
// This bound only holds when the minimum snap radius is being used.
return 0.565 * minDiag // 0.500 in the plane
}
// Otherwise, these bounds hold for any snapRadius.
vertexSep := sf.MinVertexSeparation()
return maxAngle(0.397*minDiag, // sqrt(3/19) in the plane
maxAngle(0.219*sf.snapRadius, // 2*sqrt(3/247) in the plane
0.5*(vertexSep/sf.snapRadius)*vertexSep))
}
// SnapPoint returns a candidate snap site for the given point.
func (sf CellIDSnapper) SnapPoint(point Point) Point {
return CellFromPoint(point).id.Parent(sf.level).Point()
}
const (
// The minimum exponent supported for snapping.
minIntSnappingExponent = 0
// The maximum exponent supported for snapping.
maxIntSnappingExponent = 10
)
// IntLatLngSnapper is a Snapper that snaps vertices to LatLngs in
// E5, E6, or E7 coordinates. These coordinates are expressed in degrees
// multiplied by a power of 10 and then rounded to the nearest integer. For
// example, in E6 coordinates the point (23.12345651, -45.65432149) would
// become (23123457, -45654321).
//
// The main argument of the Snapper is the exponent for the power of 10
// that coordinates should be multiplied by before rounding. For example,
// NewIntLatLngSnapper(7) is a function that snaps to E7 coordinates. The
// exponent can range from 0 to 10.
//
// Each exponent has a corresponding minimum snap radius, which is simply the
// maximum distance that a vertex can move when snapped. It is approximately
// equal to 1/sqrt(2) times the nominal point spacing; for example, for
// snapping to E7 the minimum snap radius is (1e-7 / sqrt(2)) degrees.
// You can also set the snap radius to any value larger than this; this can
// result in significant extra simplification (similar to using a larger
// exponent) but does not move vertices unnecessarily.
type IntLatLngSnapper struct {
exponent int
snapRadius s1.Angle
from, to s1.Angle
}
// NewIntLatLngSnapper returns a Snapper with the specified exponent.
func NewIntLatLngSnapper(exponent int) IntLatLngSnapper {
// Precompute the scale factors needed for snapping. Note that these
// calculations need to exactly match the ones in s1.Angle to ensure
// that the same Points are generated.
power := s1.Angle(math.Pow10(exponent))
sf := IntLatLngSnapper{
exponent: exponent,
from: power,
to: 1 / power,
}
sf.snapRadius = sf.minSnapRadiusForExponent(exponent)
return sf
}
// TODO(rsned): Add SetExponent() method.
// SnapRadius reports the snap radius to be used. The snap radius
// must be at least the minimum value for the current exponent, but larger
// values can also be used (e.g., to simplify the geometry).
//
// This requires snapRadius >= minSnapRadiusForExponent(sh.exponent)
// and snapRadius <= maxSnapRadius
func (sf IntLatLngSnapper) SnapRadius() s1.Angle {
return sf.snapRadius
}
// minSnapRadiusForExponent returns the minimum allowable snap radius for the given
// exponent (approximately equal to 10**(-exponent) / sqrt(2)) degrees).
//
// TODO(rsned): Pop this method out so it can be used by other callers.
func (sf IntLatLngSnapper) minSnapRadiusForExponent(exponent int) s1.Angle {
// snapRadius needs to be an upper bound on the true distance that a
// point can move when snapped, taking into account numerical errors.
//
// The maximum errors in latitude and longitude can be bounded as
// follows (as absolute errors in terms of dblEpsilon):
//
// Latitude Longitude
// Convert to LatLng: 1.000 1.000
// Convert to degrees: 1.032 2.063
// Scale by 10**exp: 0.786 1.571
// Round to integer: 0.5 * s1.Degrees(sf.to)
// Scale by 10**(-exp): 1.375 2.749
// Convert to radians: 1.252 1.503
// ------------------------------------------------------------
// Total (except for rounding) 5.445 8.886
//
// The maximum error when converting the LatLng back to a Point is
//
// sqrt(2) * (maximum error in latitude or longitude) + 1.5 * dblEpsilon
//
// which works out to (9 * sqrt(2) + 1.5) * dblEpsilon radians. Finally
// we need to consider the effect of rounding to integer coordinates
// (much larger than the errors above), which can change the position by
// up to (sqrt(2) * 0.5 * sf.to) radians.
power := math.Pow10(exponent)
return (s1.Degree*s1.Angle((1/math.Sqrt2)/power) +
s1.Angle((9*math.Sqrt2+1.5)*dblEpsilon))
}
// exponentForMaxSnapRadius returns the minimum exponent such that vertices will
// not move by more than snapRadius. This can be useful when choosing an appropriate
// exponent for snapping. The return value is always a valid exponent (out of
// range values are silently clamped).
//
// TODO(rsned): Pop this method out so it can be used by other callers.
func (sf IntLatLngSnapper) exponentForMaxSnapRadius(snapRadius s1.Angle) int {
// When choosing an exponent, we need to account for the error bound of
// (9 * sqrt(2) + 1.5) * dblEpsilon added by minSnapRadiusForExponent.
snapRadius -= (9*math.Sqrt2 + 1.5) * dblEpsilon
snapRadius = maxAngle(snapRadius, 1e-30)
exponent := math.Log10((1 / math.Sqrt2) / snapRadius.Degrees())
// There can be small errors in the calculation above, so to ensure that
// this function is the inverse of minSnapRadiusForExponent we subtract a
// small error tolerance.
return maxInt(minIntSnappingExponent,
minInt(maxIntSnappingExponent, int(math.Ceil(exponent-2*dblEpsilon))))
}
// MinVertexSeparation reports the minimum separation between vertices depending on
// exponent and snapRadius. It can vary between 0.471 * snapRadius and snapRadius.
func (sf IntLatLngSnapper) MinVertexSeparation() s1.Angle {
// We have two bounds for the minimum vertex separation: one is proportional
// to snapRadius, and one is equal to snapRadius minus a constant. These
// bounds give the best results for small and large snap radii respectively.
// We return the maximum of the two bounds.
//
// 1. Proportional bound: It can be shown that in the plane, the worst-case
// configuration has a vertex separation of (sqrt(2) / 3) * snapRadius.
// This is verified in the unit test, except that on the sphere the ratio
// is slightly smaller (0.471337 vs. 0.471404). We reduce that value a
// bit more below to be conservative.
//
// 2. Best asymptotic bound: This bound bound is derived by observing we
// only select a new site when it is at least snapRadius away from all
// existing sites, and snapping a vertex can move it by up to
// ((1 / sqrt(2)) * sf.to) degrees.
return maxAngle(0.471*sf.snapRadius, // sqrt(2)/3 in the plane
sf.snapRadius-s1.Degree*s1.Angle(1/math.Sqrt2)*sf.to)
}
// MinEdgeVertexSeparation reports the minimum separation between edges
// and non-incident vertices in the output depending on the level and
// snapRadius. It can be as low as 0.222 * snapRadius, but is typically
// 0.39 * snapRadius or more.
func (sf IntLatLngSnapper) MinEdgeVertexSeparation() s1.Angle {
// Similar to MinVertexSeparation, in this case we have three bounds:
// one is a constant bound, one is proportional to snapRadius, and one is
// approaches 0.5 * snapRadius asymptotically.
//
// 1. Constant bound: In the plane, the worst-case configuration has an
// edge-vertex separation of ((1 / sqrt(13)) * sf.to) degrees.
// The unit test verifies this, except that on the sphere the ratio is
// slightly lower when small exponents such as E1 are used
// (0.2772589 vs 0.2773501).
//
// 2. Proportional bound: In the plane, the worst-case configuration has an
// edge-vertex separation of (2 / 9) * snapRadius (0.222222222222). The
// unit test verifies this, except that on the sphere the bound can be
// slightly worse with large exponents (e.g., E9) due to small numerical
// errors (0.222222126756717).
//
// 3. Best asymptotic bound: If snapRadius is large compared to the
// minimum snap radius, then the best bound is achieved by 3 sites on a
// circular arc of radius snapRadius, spaced MinVertexSeparation
// apart (see CellIDSnapper.MinEdgeVertexSeparation). This
// bound approaches 0.5 * snapRadius as the snap radius becomes large
// relative to the grid spacing.
vertexSep := sf.MinVertexSeparation()
return maxAngle(0.277*s1.Degree*sf.to, // 1/sqrt(13) in the plane
maxAngle(0.222*sf.snapRadius, // 2/9 in the plane
0.5*(vertexSep/sf.snapRadius)*vertexSep))
}
// SnapPoint returns a candidate snap site for the given point.
func (sf IntLatLngSnapper) SnapPoint(point Point) Point {
// TODO(rsned): C++ DCHECK's on exponent being in valid range. What should we
// do when it's bad here.
input := LatLngFromPoint(point)
lat := s1.Angle(roundAngle(input.Lat * sf.from))
lng := s1.Angle(roundAngle(input.Lng * sf.from))
return PointFromLatLng(LatLng{lat * sf.to, lng * sf.to})
}

View File

@@ -19,8 +19,8 @@ import (
"io"
"math"
"github.com/golang/geo/r1"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/r1"
"github.com/blevesearch/geo/s1"
)
var (
@@ -52,10 +52,10 @@ var (
// radius (r), the maximum chord length from the cap's center (d), and the
// radius of cap's base (a).
//
// h = 1 - cos(r)
// = 2 * sin^2(r/2)
// d^2 = 2 * h
// = a^2 + h^2
// h = 1 - cos(r)
// = 2 * sin^2(r/2)
// d^2 = 2 * h
// = a^2 + h^2
//
// The zero value of Cap is an invalid cap. Use EmptyCap to get a valid empty cap.
type Cap struct {
@@ -109,7 +109,7 @@ func FullCap() Cap {
// IsValid reports whether the Cap is considered valid.
func (c Cap) IsValid() bool {
return c.center.Vector.IsUnit() && c.radius <= s1.StraightChordAngle
return c.center.IsUnit() && c.radius <= s1.StraightChordAngle
}
// IsEmpty reports whether the cap is empty, i.e. it contains no points.
@@ -391,7 +391,7 @@ func (c Cap) intersects(cell Cell, vertices [4]Point) bool {
sin2Angle := c.radius.Sin2()
for k := 0; k < 4; k++ {
edge := cell.Edge(k).Vector
dot := c.center.Vector.Dot(edge)
dot := c.center.Dot(edge)
if dot > 0 {
// The center is in the interior half-space defined by the edge. We do not need
// to consider these edges, since if the cap intersects this edge then it also

View File

@@ -18,10 +18,30 @@ import (
"io"
"math"
"github.com/golang/geo/r1"
"github.com/golang/geo/r2"
"github.com/golang/geo/r3"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/r1"
"github.com/blevesearch/geo/r2"
"github.com/blevesearch/geo/r3"
"github.com/blevesearch/geo/s1"
)
// CellBoundary represents canonical identifiers for the boundaries of the cell.
// It's promised that both Vertex() and BoundUV().Vertices() return vertices in this
// order.
//
// That is, for a given boundary k, the edge defining the boundary is:
//
// {Vertex(k), Vertex(k+1)}
//
// The boundaries are defined in UV coordinates. The orientation may be
// rotated relative to other face cells, but are consistent within a face
// (i.e. a cell's left edge is its left-ward neighbor's right edge).
type CellBoundary int
const (
CellBoundaryBottomEdge CellBoundary = iota
CellBoundaryRightEdge
CellBoundaryTopEdge
CellBoundaryLeftEdge
)
// Cell is an S2 region object that represents a cell. Unlike CellIDs,
@@ -79,7 +99,7 @@ func (c Cell) ID() CellID {
// IsLeaf returns whether this Cell is a leaf or not.
func (c Cell) IsLeaf() bool {
return c.level == maxLevel
return c.level == MaxLevel
}
// SizeIJ returns the edge length of this cell in (i,j)-space.
@@ -92,24 +112,47 @@ func (c Cell) SizeST() float64 {
return c.id.sizeST(int(c.level))
}
// Vertex returns the k-th vertex of the cell (k = 0,1,2,3) in CCW order
// Vertex returns the normalized k-th vertex of the cell (k = 0,1,2,3) in CCW order
// (lower left, lower right, upper right, upper left in the UV plane).
func (c Cell) Vertex(k int) Point {
return Point{faceUVToXYZ(int(c.face), c.uv.Vertices()[k].X, c.uv.Vertices()[k].Y).Normalize()}
return Point{c.VertexRaw(k).Normalize()}
}
// Edge returns the inward-facing normal of the great circle passing through
// the CCW ordered edge from vertex k to vertex k+1 (mod 4) (for k = 0,1,2,3).
// VertexRaw returns the unnormalized k-th vertex of the cell (k = 0,1,2,3) in CCW order
// (lower left, lower right, upper right, upper left in the UV plane).
func (c Cell) VertexRaw(k int) Point {
return Point{faceUVToXYZ(int(c.face), c.uv.Vertices()[k].X, c.uv.Vertices()[k].Y)}
}
// Edge returns the normalized inward-facing normal of the great circle passing through
// the CCW ordered edge from vertex k to vertex k+1 (mod 4) (for k = 0,1,2,3)
func (c Cell) Edge(k int) Point {
return Point{c.EdgeRaw(k).Normalize()}
}
// EdgeRaw returns the inward-facing normal of the great circle passing through
// the CCW ordered edge from vertex k to vertex k+1 (mod 4) (for k = 0,1,2,3).
//
// The normals returned by EdgeRaw are not necessarily unit length, but their
// length is bounded by Sqrt(2) since the worst case is two components of magnitude 1.
//
// The vertices returned by Vertex are not guaranteed to actually be on the
// boundary of the cell exactly. Instead, they're the nearest representable
// point to the corner.
//
// Cell edge normals returned by EdgeRaw, however, are computed exactly and
// can be used with exact predicates to determine spatial relationships to the
// cell exactly.
func (c Cell) EdgeRaw(k int) Point {
switch k {
case 0:
return Point{vNorm(int(c.face), c.uv.Y.Lo).Normalize()} // Bottom
return Point{vNorm(int(c.face), c.uv.Y.Lo)} // Bottom
case 1:
return Point{uNorm(int(c.face), c.uv.X.Hi).Normalize()} // Right
return Point{uNorm(int(c.face), c.uv.X.Hi)} // Right
case 2:
return Point{vNorm(int(c.face), c.uv.Y.Hi).Mul(-1.0).Normalize()} // Top
return Point{vNorm(int(c.face), c.uv.Y.Hi).Mul(-1.0)} // Top
default:
return Point{uNorm(int(c.face), c.uv.X.Lo).Mul(-1.0).Normalize()} // Left
return Point{uNorm(int(c.face), c.uv.X.Lo).Mul(-1.0)} // Left
}
}
@@ -118,6 +161,44 @@ func (c Cell) BoundUV() r2.Rect {
return c.uv
}
// UVCoordOfEdge returns either U or V for the given edge, whichever is constant along it.
//
// E.g. boundaries 0 and 2 are constant in the V axis so we return those
// coordinates, but boundaries 1 and 3 are constant in the U axis, so we
// return those coordinates.
//
// For convenience, the argument is reduced modulo 4 to the range [0..3].
func (c Cell) UVCoordOfEdge(k int) float64 {
k %= 4
if k%2 == 0 {
return c.BoundUV().Vertices()[k].Y
}
return c.BoundUV().Vertices()[k].X
}
// IJCoordOfEdge returns either I or J for the given edge, whichever is constant along it.
//
// E.g. boundaries 0 and 2 are constant in the J axis so we return those
// coordinates, but boundaries 1 and 3 are constant in the I axis, so we
// return those coordinates.
//
// The returned value is not clamped to kLimitIJ-1 as in StToIJ, so
// that cell edges at the maximum extent of a face are properly returned as
// kLimitIJ.
//
// For convenience, the argument is reduced modulo 4 to the range [0..3].
func (c Cell) IJCoordOfEdge(k int) int {
// We can just convert UV->ST->IJ for this because the IJ coordinates only
// have 30 bits of resolution in each axis. The error in the conversion
// will be a couple of epsilon which is <<< 2^-30, so if we use a proper
// round-to-nearest operation, we'll always round to the correct IJ value.
//
// Intel CPUs that support SSE4.1 have the ROUNDSD instruction, and ARM CPUs
// with VFP have the VCVT instruction, both of which can implement correct
// rounding efficiently regardless of the current FPU rounding mode.
return int(math.Round(MaxSize * uvToST(c.UVCoordOfEdge(k))))
}
// Center returns the direction vector corresponding to the center in
// (s,t)-space of the given cell. This is the point at which the cell is
// divided into four subcells; it is not necessarily the centroid of the
@@ -337,17 +418,17 @@ func (c Cell) RectBound() Rect {
var bound Rect
switch c.face {
case 0:
bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{-math.Pi / 4, math.Pi / 4}}
bound = Rect{r1.Interval{Lo: -math.Pi / 4, Hi: math.Pi / 4}, s1.Interval{Lo: -math.Pi / 4, Hi: math.Pi / 4}}
case 1:
bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{math.Pi / 4, 3 * math.Pi / 4}}
bound = Rect{r1.Interval{Lo: -math.Pi / 4, Hi: math.Pi / 4}, s1.Interval{Lo: math.Pi / 4, Hi: 3 * math.Pi / 4}}
case 2:
bound = Rect{r1.Interval{poleMinLat, math.Pi / 2}, s1.FullInterval()}
bound = Rect{r1.Interval{Lo: poleMinLat, Hi: math.Pi / 2}, s1.FullInterval()}
case 3:
bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{3 * math.Pi / 4, -3 * math.Pi / 4}}
bound = Rect{r1.Interval{Lo: -math.Pi / 4, Hi: math.Pi / 4}, s1.Interval{Lo: 3 * math.Pi / 4, Hi: -3 * math.Pi / 4}}
case 4:
bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{-3 * math.Pi / 4, -math.Pi / 4}}
bound = Rect{r1.Interval{Lo: -math.Pi / 4, Hi: math.Pi / 4}, s1.Interval{Lo: -3 * math.Pi / 4, Hi: -math.Pi / 4}}
default:
bound = Rect{r1.Interval{-math.Pi / 2, -poleMinLat}, s1.FullInterval()}
bound = Rect{r1.Interval{Lo: -math.Pi / 2, Hi: -poleMinLat}, s1.FullInterval()}
}
// Finally, we expand the bound to account for the error when a point P is
@@ -379,6 +460,18 @@ func (c Cell) CapBound() Cap {
// If you want every point to be contained by exactly one Cell,
// you will need to convert the Cell to a Loop.
func (c Cell) ContainsPoint(p Point) bool {
// We can't just call XYZtoFaceUV, because for points that lie on the
// boundary between two faces (i.e. u or v is +1/-1) we need to return
// true for both adjacent cells.
//
// We can get away with not checking the face if the point matches the face of
// the cell here because, for the 4 faces adjacent to c.face, p will be
// projected outside the range of ([-1,1]x[-1,1]) and thus can't intersect the
// cell bounds (except on the face boundary which we want).
//
// For the face opposite c.face, the sign of the UV coordinates of P will be
// flipped so it will automatically fall outside the cell boundary as no cells
// cross the origin.
var uv r2.Point
var ok bool
if uv.X, uv.Y, ok = faceXYZToUV(int(c.face), p); !ok {
@@ -444,8 +537,8 @@ func (c Cell) uEdgeIsClosest(p Point, vHi bool) bool {
}
// These are the normals to the planes that are perpendicular to the edge
// and pass through one of its two endpoints.
dir0 := r3.Vector{v*v + 1, -u0 * v, -u0}
dir1 := r3.Vector{v*v + 1, -u1 * v, -u1}
dir0 := r3.Vector{X: v*v + 1, Y: -u0 * v, Z: -u0}
dir1 := r3.Vector{X: v*v + 1, Y: -u1 * v, Z: -u1}
return p.Dot(dir0) > 0 && p.Dot(dir1) < 0
}
@@ -458,8 +551,8 @@ func (c Cell) vEdgeIsClosest(p Point, uHi bool) bool {
if uHi {
u = c.uv.X.Hi
}
dir0 := r3.Vector{-u * v0, u*u + 1, -v0}
dir1 := r3.Vector{-u * v1, u*u + 1, -v1}
dir0 := r3.Vector{X: -u * v0, Y: u*u + 1, Z: -v0}
dir1 := r3.Vector{X: -u * v1, Y: u*u + 1, Z: -v1}
return p.Dot(dir0) > 0 && p.Dot(dir1) < 0
}
@@ -569,7 +662,7 @@ func (c Cell) MaxDistance(target Point) s1.ChordAngle {
// Otherwise, find the minimum distance dMin to the antipodal point and the
// maximum distance will be pi - dMin.
return s1.StraightChordAngle - c.BoundaryDistance(Point{target.Mul(-1)})
return s1.StraightChordAngle - c.Distance(Point{target.Mul(-1)})
}
// BoundaryDistance reports the distance from the cell boundary to the given point.
@@ -670,7 +763,7 @@ func (c Cell) MaxDistanceToCell(target Cell) s1.ChordAngle {
// Need to check the antipodal target for intersection with the cell. If it
// intersects, the distance is the straight ChordAngle.
// antipodalUV is the transpose of the original UV, interpreted within the opposite face.
antipodalUV := r2.Rect{target.uv.Y, target.uv.X}
antipodalUV := r2.Rect{X: target.uv.Y, Y: target.uv.X}
if int(c.face) == oppositeFace(int(target.face)) && c.uv.Intersects(antipodalUV) {
return s1.StraightChordAngle
}

View File

@@ -365,7 +365,7 @@ func (c *CellIndexContentsIterator) StartUnion(r *CellIndexRangeIterator) {
// There is also a helper method that adds all elements of CellUnion with the
// same label:
//
// index.AddCellUnion(cellUnion, label)
// index.AddCellUnion(cellUnion, label)
//
// Note that the index is not dynamic; the contents of the index cannot be
// changed once it has been built. Adding more after calling Build results in
@@ -375,7 +375,7 @@ func (c *CellIndexContentsIterator) StartUnion(r *CellIndexRangeIterator) {
// is to use a built-in method such as IntersectingLabels (which returns
// the labels of all cells that intersect a given target CellUnion):
//
// labels := index.IntersectingLabels(targetUnion);
// labels := index.IntersectingLabels(targetUnion);
//
// Alternatively, you can use a ClosestCellQuery which computes the cell(s)
// that are closest to a given target geometry.
@@ -466,12 +466,12 @@ func (c *CellIndex) Build() {
// We also create two special deltas to ensure that a RangeNode is emitted at
// the beginning and end of the CellID range.
deltas = append(deltas, delta{
startID: CellIDFromFace(0).ChildBeginAtLevel(maxLevel),
startID: CellIDFromFace(0).ChildBeginAtLevel(MaxLevel),
cellID: CellID(0),
label: -1,
})
deltas = append(deltas, delta{
startID: CellIDFromFace(5).ChildEndAtLevel(maxLevel),
startID: CellIDFromFace(5).ChildEndAtLevel(MaxLevel),
cellID: CellID(0),
label: -1,
})

View File

@@ -23,10 +23,10 @@ import (
"strconv"
"strings"
"github.com/golang/geo/r1"
"github.com/golang/geo/r2"
"github.com/golang/geo/r3"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/r1"
"github.com/blevesearch/geo/r2"
"github.com/blevesearch/geo/r3"
"github.com/blevesearch/geo/s1"
)
// CellID uniquely identifies a cell in the S2 cell decomposition.
@@ -39,15 +39,15 @@ import (
// Sequentially increasing cell IDs follow a continuous space-filling curve
// over the entire sphere. They have the following properties:
//
// - The ID of a cell at level k consists of a 3-bit face number followed
// by k bit pairs that recursively select one of the four children of
// each cell. The next bit is always 1, and all other bits are 0.
// Therefore, the level of a cell is determined by the position of its
// lowest-numbered bit that is turned on (for a cell at level k, this
// position is 2 * (maxLevel - k)).
// - The ID of a cell at level k consists of a 3-bit face number followed
// by k bit pairs that recursively select one of the four children of
// each cell. The next bit is always 1, and all other bits are 0.
// Therefore, the level of a cell is determined by the position of its
// lowest-numbered bit that is turned on (for a cell at level k, this
// position is 2 * (MaxLevel - k)).
//
// - The ID of a parent cell is at the midpoint of the range of IDs spanned
// by its children (or by its descendants at any level).
// - The ID of a parent cell is at the midpoint of the range of IDs spanned
// by its children (or by its descendants at any level).
//
// Leaf cells are often used to represent points on the unit sphere, and
// this type provides methods for converting directly between these two
@@ -73,38 +73,40 @@ func (c cellIDs) Len() int { return len(c) }
func (c cellIDs) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c cellIDs) Less(i, j int) bool { return c[i] < c[j] }
// TODO(dsymonds): Some of these constants should probably be exported.
const (
faceBits = 3
numFaces = 6
// FaceBits is the number of bits used to encode the face number.
FaceBits = 3
// NumFaces is the number of faces.
NumFaces = 6
// This is the number of levels needed to specify a leaf cell.
maxLevel = 30
// MaxLevel is the number of levels needed to specify a leaf cell.
MaxLevel = 30
// The extra position bit (61 rather than 60) lets us encode each cell as its
// Hilbert curve position at the cell center (which is halfway along the
// portion of the Hilbert curve that fills that cell).
posBits = 2*maxLevel + 1
// PosBits is the total number of position bits. The extra bit (61 rather
// than 60) lets us encode each cell as its Hilbert curve position at the
// cell center (which is halfway along the portion of the Hilbert curve that
// fills that cell).
PosBits = 2*MaxLevel + 1
// The maximum index of a valid leaf cell plus one. The range of valid leaf
// cell indices is [0..maxSize-1].
maxSize = 1 << maxLevel
// MaxSize is the maximum index of a valid leaf cell plus one. The range of
// valid leaf cell indices is [0..MaxSize-1].
MaxSize = 1 << MaxLevel
wrapOffset = uint64(numFaces) << posBits
wrapOffset = uint64(NumFaces) << PosBits
)
// CellIDFromFacePosLevel returns a cell given its face in the range
// [0,5], the 61-bit Hilbert curve position pos within that face, and
// the level in the range [0,maxLevel]. The position in the cell ID
// the level in the range [0,MaxLevel]. The position in the cell ID
// will be truncated to correspond to the Hilbert curve position at
// the center of the returned cell.
func CellIDFromFacePosLevel(face int, pos uint64, level int) CellID {
return CellID(uint64(face)<<posBits + pos | 1).Parent(level)
return CellID(uint64(face)<<PosBits + pos | 1).Parent(level)
}
// CellIDFromFace returns the cell corresponding to a given S2 cube face.
func CellIDFromFace(face int) CellID {
return CellID((uint64(face) << posBits) + lsbForLevel(0))
return CellID((uint64(face) << PosBits) + lsbForLevel(0))
}
// CellIDFromLatLng returns the leaf cell containing ll.
@@ -140,18 +142,18 @@ func (ci CellID) ToToken() string {
// IsValid reports whether ci represents a valid cell.
func (ci CellID) IsValid() bool {
return ci.Face() < numFaces && (ci.lsb()&0x1555555555555555 != 0)
return ci.Face() < NumFaces && (ci.lsb()&0x1555555555555555 != 0)
}
// Face returns the cube face for this cell ID, in the range [0,5].
func (ci CellID) Face() int { return int(uint64(ci) >> posBits) }
func (ci CellID) Face() int { return int(uint64(ci) >> PosBits) }
// Pos returns the position along the Hilbert curve of this cell ID, in the range [0,2^posBits-1].
func (ci CellID) Pos() uint64 { return uint64(ci) & (^uint64(0) >> faceBits) }
// Pos returns the position along the Hilbert curve of this cell ID, in the range [0,2^PosBits-1].
func (ci CellID) Pos() uint64 { return uint64(ci) & (^uint64(0) >> FaceBits) }
// Level returns the subdivision level of this cell ID, in the range [0, maxLevel].
// Level returns the subdivision level of this cell ID, in the range [0, MaxLevel].
func (ci CellID) Level() int {
return maxLevel - findLSBSetNonZero64(uint64(ci))>>1
return MaxLevel - findLSBSetNonZero64(uint64(ci))>>1
}
// IsLeaf returns whether this cell ID is at the deepest level;
@@ -164,11 +166,11 @@ func (ci CellID) IsLeaf() bool { return uint64(ci)&1 != 0 }
// ChildPosition(1) returns the position of this cell's level-1
// ancestor within its top-level face cell.
func (ci CellID) ChildPosition(level int) int {
return int(uint64(ci)>>uint64(2*(maxLevel-level)+1)) & 3
return int(uint64(ci)>>uint64(2*(MaxLevel-level)+1)) & 3
}
// lsbForLevel returns the lowest-numbered bit that is on for cells at the given level.
func lsbForLevel(level int) uint64 { return 1 << uint64(2*(maxLevel-level)) }
func lsbForLevel(level int) uint64 { return 1 << uint64(2*(MaxLevel-level)) }
// Parent returns the cell at the given level, which must be no greater than the current level.
func (ci CellID) Parent(level int) CellID {
@@ -201,8 +203,9 @@ func (ci CellID) Children() [4]CellID {
return ch
}
// sizeIJ reports the edge length of cells at the given level in (i,j)-space.
func sizeIJ(level int) int {
return 1 << uint(maxLevel-level)
return 1 << uint(MaxLevel-level)
}
// EdgeNeighbors returns the four cells that are adjacent across the cell's four edges.
@@ -220,9 +223,9 @@ func (ci CellID) EdgeNeighbors() [4]CellID {
}
}
// VertexNeighbors returns the neighboring cellIDs with vertex closest to this cell at the given level.
// (Normally there are four neighbors, but the closest vertex may only have three neighbors if it is one of
// the 8 cube vertices.)
// VertexNeighbors returns the cellIDs of the neighbors of the closest vertex to
// this cell at the given level. Normally there are four neighbors, but the closest
// vertex may only have three neighbors if it is one of the 8 cube vertices.
func (ci CellID) VertexNeighbors(level int) []CellID {
halfSize := sizeIJ(level + 1)
size := halfSize << 1
@@ -232,14 +235,14 @@ func (ci CellID) VertexNeighbors(level int) []CellID {
var ioffset, joffset int
if i&halfSize != 0 {
ioffset = size
isame = (i + size) < maxSize
isame = (i + size) < MaxSize
} else {
ioffset = -size
isame = (i - size) >= 0
}
if j&halfSize != 0 {
joffset = size
jsame = (j + size) < maxSize
jsame = (j + size) < MaxSize
} else {
joffset = -size
jsame = (j - size) >= 0
@@ -265,8 +268,13 @@ func (ci CellID) VertexNeighbors(level int) []CellID {
// same neighbor may be returned more than once. There could be up to eight
// neighbors including the diagonal ones that share the vertex.
//
// This requires level >= ci.Level().
// Returns nil if level < ci.Level() (cells would not be neighboring) or
// level > MaxLevel (no such cells exist).
func (ci CellID) AllNeighbors(level int) []CellID {
if level < ci.Level() || level > MaxLevel {
return nil
}
var neighbors []CellID
face, i, j, _ := ci.faceIJOrientation()
@@ -287,21 +295,21 @@ func (ci CellID) AllNeighbors(level int) []CellID {
if k < 0 {
sameFace = (j+k >= 0)
} else if k >= size {
sameFace = (j+k < maxSize)
sameFace = (j+k < MaxSize)
} else {
sameFace = true
// Top and bottom neighbors.
neighbors = append(neighbors, cellIDFromFaceIJSame(face, i+k, j-nbrSize,
j-size >= 0).Parent(level))
neighbors = append(neighbors, cellIDFromFaceIJSame(face, i+k, j+size,
j+size < maxSize).Parent(level))
j+size < MaxSize).Parent(level))
}
// Left, right, and diagonal neighbors.
neighbors = append(neighbors, cellIDFromFaceIJSame(face, i-nbrSize, j+k,
sameFace && i-size >= 0).Parent(level))
neighbors = append(neighbors, cellIDFromFaceIJSame(face, i+size, j+k,
sameFace && i+size < maxSize).Parent(level))
sameFace && i+size < MaxSize).Parent(level))
if k >= size {
break
@@ -341,10 +349,10 @@ func (ci CellID) String() string {
return b.String()
}
// cellIDFromString returns a CellID from a string in the form "1/3210".
func cellIDFromString(s string) CellID {
// CellIDFromString returns a CellID from a string in the form "1/3210".
func CellIDFromString(s string) CellID {
level := len(s) - 2
if level < 0 || level > maxLevel {
if level < 0 || level > MaxLevel {
return CellID(0)
}
face := int(s[0] - '0')
@@ -353,8 +361,9 @@ func cellIDFromString(s string) CellID {
}
id := CellIDFromFace(face)
for i := 2; i < len(s); i++ {
childPos := s[i] - '0'
if childPos < 0 || childPos > 3 {
var childPos = s[i] - '0'
// Bytes are non-negative.
if childPos > 3 {
return CellID(0)
}
id = id.Children()[childPos]
@@ -373,9 +382,9 @@ func (ci CellID) LatLng() LatLng { return LatLngFromPoint(Point{ci.rawPoint()})
// ChildBegin returns the first child in a traversal of the children of this cell, in Hilbert curve order.
//
// for ci := c.ChildBegin(); ci != c.ChildEnd(); ci = ci.Next() {
// ...
// }
// for ci := c.ChildBegin(); ci != c.ChildEnd(); ci = ci.Next() {
// ...
// }
func (ci CellID) ChildBegin() CellID {
ol := ci.lsb()
return CellID(uint64(ci) - ol + ol>>2)
@@ -445,7 +454,7 @@ func (ci CellID) AdvanceWrap(steps int64) CellID {
// We clamp the number of steps if necessary to ensure that we do not
// advance past the End() or before the Begin() of this level.
shift := uint(2*(maxLevel-ci.Level()) + 1)
shift := uint(2*(MaxLevel-ci.Level()) + 1)
if steps < 0 {
if min := -int64(uint64(ci) >> shift); steps < min {
wrap := int64(wrapOffset >> shift)
@@ -501,14 +510,14 @@ func (ci *CellID) decode(d *decoder) {
// for this cell's level.
// The return value is always non-negative.
func (ci CellID) distanceFromBegin() int64 {
return int64(ci >> uint64(2*(maxLevel-ci.Level())+1))
return int64(ci >> uint64(2*(MaxLevel-ci.Level())+1))
}
// rawPoint returns an unnormalized r3 vector from the origin through the center
// of the s2 cell on the sphere.
func (ci CellID) rawPoint() r3.Vector {
face, si, ti := ci.faceSiTi()
return faceUVToXYZ(face, stToUV((0.5/maxSize)*float64(si)), stToUV((0.5/maxSize)*float64(ti)))
return faceUVToXYZ(face, stToUV((0.5/MaxSize)*float64(si)), stToUV((0.5/MaxSize)*float64(ti)))
}
// faceSiTi returns the Face/Si/Ti coordinates of the center of the cell.
@@ -529,7 +538,7 @@ func (ci CellID) faceSiTi() (face int, si, ti uint32) {
func (ci CellID) faceIJOrientation() (f, i, j, orientation int) {
f = ci.Face()
orientation = f & swapMask
nbits := maxLevel - 7*lookupBits // first iteration
nbits := MaxLevel - 7*lookupBits // first iteration
// Each iteration maps 8 bits of the Hilbert curve position into
// 4 bits of "i" and "j". The lookup table transforms a key of the
@@ -550,9 +559,9 @@ func (ci CellID) faceIJOrientation() (f, i, j, orientation int) {
// The position of a non-leaf cell at level "n" consists of a prefix of
// 2*n bits that identifies the cell, followed by a suffix of
// 2*(maxLevel-n)+1 bits of the form 10*. If n==maxLevel, the suffix is
// 2*(MaxLevel-n)+1 bits of the form 10*. If n==MaxLevel, the suffix is
// just "1" and has no effect. Otherwise, it consists of "10", followed
// by (maxLevel-n-1) repetitions of "00", followed by "0". The "10" has
// by (MaxLevel-n-1) repetitions of "00", followed by "0". The "10" has
// no effect, while each occurrence of "00" has the effect of reversing
// the swapMask bit.
if ci.lsb()&0x1111111111111110 != 0 {
@@ -566,7 +575,7 @@ func (ci CellID) faceIJOrientation() (f, i, j, orientation int) {
func cellIDFromFaceIJ(f, i, j int) CellID {
// Note that this value gets shifted one bit to the left at the end
// of the function.
n := uint64(f) << (posBits - 1)
n := uint64(f) << (PosBits - 1)
// Alternating faces have opposite Hilbert curve orientations; this
// is necessary in order for all faces to have a right-handed
// coordinate system.
@@ -591,8 +600,8 @@ func cellIDFromFaceIJWrap(f, i, j int) CellID {
// Convert i and j to the coordinates of a leaf cell just beyond the
// boundary of this face. This prevents 32-bit overflow in the case
// of finding the neighbors of a face cell.
i = clampInt(i, -1, maxSize)
j = clampInt(j, -1, maxSize)
i = clampInt(i, -1, MaxSize)
j = clampInt(j, -1, MaxSize)
// We want to wrap these coordinates onto the appropriate adjacent face.
// The easiest way to do this is to convert the (i,j) coordinates to (x,y,z)
@@ -607,10 +616,10 @@ func cellIDFromFaceIJWrap(f, i, j int) CellID {
// [-1,1]x[-1,1] face rectangle, since otherwise the reprojection step
// (which divides by the new z coordinate) might change the other
// coordinates enough so that we end up in the wrong leaf cell.
const scale = 1.0 / maxSize
const scale = 1.0 / MaxSize
limit := math.Nextafter(1, 2)
u := math.Max(-limit, math.Min(limit, scale*float64((i<<1)+1-maxSize)))
v := math.Max(-limit, math.Min(limit, scale*float64((j<<1)+1-maxSize)))
u := math.Max(-limit, math.Min(limit, scale*float64((i<<1)+1-MaxSize)))
v := math.Max(-limit, math.Min(limit, scale*float64((j<<1)+1-MaxSize)))
// Find the leaf cell coordinates on the adjacent face, and convert
// them to a cell id at the appropriate level.
@@ -625,30 +634,17 @@ func cellIDFromFaceIJSame(f, i, j int, sameFace bool) CellID {
return cellIDFromFaceIJWrap(f, i, j)
}
// ijToSTMin converts the i- or j-index of a leaf cell to the minimum corresponding
// s- or t-value contained by that cell. The argument must be in the range
// [0..2**30], i.e. up to one position beyond the normal range of valid leaf
// cell indices.
func ijToSTMin(i int) float64 {
return float64(i) / float64(maxSize)
}
// stToIJ converts value in ST coordinates to a value in IJ coordinates.
func stToIJ(s float64) int {
return clampInt(int(math.Floor(maxSize*s)), 0, maxSize-1)
}
// cellIDFromPoint returns a leaf cell containing point p. Usually there is
// exactly one such cell, but for points along the edge of a cell, any
// adjacent cell may be (deterministically) chosen. This is because
// s2.CellIDs are considered to be closed sets. The returned cell will
// always contain the given point, i.e.
//
// CellFromPoint(p).ContainsPoint(p)
// CellFromPoint(p).ContainsPoint(p)
//
// is always true.
func cellIDFromPoint(p Point) CellID {
f, u, v := xyzToFaceUV(r3.Vector{p.X, p.Y, p.Z})
f, u, v := xyzToFaceUV(r3.Vector{X: p.X, Y: p.Y, Z: p.Z})
i := stToIJ(uvToST(u))
j := stToIJ(uvToST(v))
return cellIDFromFaceIJ(f, i, j)
@@ -768,7 +764,7 @@ func (ci CellID) Advance(steps int64) CellID {
// We clamp the number of steps if necessary to ensure that we do not
// advance past the End() or before the Begin() of this level. Note that
// minSteps and maxSteps always fit in a signed 64-bit integer.
stepShift := uint(2*(maxLevel-ci.Level()) + 1)
stepShift := uint(2*(MaxLevel-ci.Level()) + 1)
if steps < 0 {
minSteps := -int64(uint64(ci) >> stepShift)
if steps < minSteps {
@@ -786,7 +782,7 @@ func (ci CellID) Advance(steps int64) CellID {
// centerST return the center of the CellID in (s,t)-space.
func (ci CellID) centerST() r2.Point {
_, si, ti := ci.faceSiTi()
return r2.Point{siTiToST(si), siTiToST(ti)}
return r2.Point{X: siTiToST(si), Y: siTiToST(ti)}
}
// sizeST returns the edge length of this CellID in (s,t)-space at the given level.
@@ -797,7 +793,7 @@ func (ci CellID) sizeST(level int) float64 {
// boundST returns the bound of this CellID in (s,t)-space.
func (ci CellID) boundST() r2.Rect {
s := ci.sizeST(ci.Level())
return r2.RectFromCenterSize(ci.centerST(), r2.Point{s, s})
return r2.RectFromCenterSize(ci.centerST(), r2.Point{X: s, Y: s})
}
// centerUV returns the center of this CellID in (u,v)-space. Note that
@@ -806,7 +802,7 @@ func (ci CellID) boundST() r2.Rect {
// the (u,v) rectangle covered by the cell.
func (ci CellID) centerUV() r2.Point {
_, si, ti := ci.faceSiTi()
return r2.Point{stToUV(siTiToST(si)), stToUV(siTiToST(ti))}
return r2.Point{X: stToUV(siTiToST(si)), Y: stToUV(siTiToST(ti))}
}
// boundUV returns the bound of this CellID in (u,v)-space.
@@ -835,23 +831,23 @@ func expandEndpoint(u, maxV, sinDist float64) float64 {
// of the boundary.
//
// Distances are measured *on the sphere*, not in (u,v)-space. For example,
// you can use this method to expand the (u,v)-bound of an CellID so that
// you can use this method to expand the (u,v)-bound of a CellID so that
// it contains all points within 5km of the original cell. You can then
// test whether a point lies within the expanded bounds like this:
//
// if u, v, ok := faceXYZtoUV(face, point); ok && bound.ContainsPoint(r2.Point{u,v}) { ... }
// if u, v, ok := faceXYZtoUV(face, point); ok && bound.ContainsPoint(r2.Point{u,v}) { ... }
//
// Limitations:
//
// - Because the rectangle is drawn on one of the six cube-face planes
// (i.e., {x,y,z} = +/-1), it can cover at most one hemisphere. This
// limits the maximum amount that a rectangle can be expanded. For
// example, CellID bounds can be expanded safely by at most 45 degrees
// (about 5000 km on the Earth's surface).
// - Because the rectangle is drawn on one of the six cube-face planes
// (i.e., {x,y,z} = +/-1), it can cover at most one hemisphere. This
// limits the maximum amount that a rectangle can be expanded. For
// example, CellID bounds can be expanded safely by at most 45 degrees
// (about 5000 km on the Earth's surface).
//
// - The implementation is not exact for negative distances. The resulting
// rectangle will exclude all points within the given distance of the
// boundary but may be slightly smaller than necessary.
// - The implementation is not exact for negative distances. The resulting
// rectangle will exclude all points within the given distance of the
// boundary but may be slightly smaller than necessary.
func expandedByDistanceUV(uv r2.Rect, distance s1.Angle) r2.Rect {
// Expand each of the four sides of the rectangle just enough to include all
// points within the given distance of that side. (The rectangle may be
@@ -860,10 +856,10 @@ func expandedByDistanceUV(uv r2.Rect, distance s1.Angle) r2.Rect {
maxV := math.Max(math.Abs(uv.Y.Lo), math.Abs(uv.Y.Hi))
sinDist := math.Sin(float64(distance))
return r2.Rect{
X: r1.Interval{expandEndpoint(uv.X.Lo, maxV, -sinDist),
expandEndpoint(uv.X.Hi, maxV, sinDist)},
Y: r1.Interval{expandEndpoint(uv.Y.Lo, maxU, -sinDist),
expandEndpoint(uv.Y.Hi, maxU, sinDist)}}
X: r1.Interval{Lo: expandEndpoint(uv.X.Lo, maxV, -sinDist),
Hi: expandEndpoint(uv.X.Hi, maxV, sinDist)},
Y: r1.Interval{Lo: expandEndpoint(uv.Y.Lo, maxU, -sinDist),
Hi: expandEndpoint(uv.Y.Hi, maxU, sinDist)}}
}
// MaxTile returns the largest cell with the same RangeMin such that
@@ -872,7 +868,7 @@ func expandedByDistanceUV(uv r2.Rect, distance s1.Angle) r2.Rect {
// a given range (a tiling). This example shows how to generate a tiling
// for a semi-open range of leaf cells [start, limit):
//
// for id := start.MaxTile(limit); id != limit; id = id.Next().MaxTile(limit)) { ... }
// for id := start.MaxTile(limit); id != limit; id = id.Next().MaxTile(limit)) { ... }
//
// Note that in general the cells in the tiling will be of different sizes;
// they gradually get larger (near the middle of the range) and then

View File

@@ -19,7 +19,7 @@ import (
"io"
"sort"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/s1"
)
// A CellUnion is a collection of CellIDs.
@@ -277,9 +277,9 @@ func (cu *CellUnion) Denormalize(minLevel, levelMod int) {
newLevel = minLevel
}
if levelMod > 1 {
newLevel += (maxLevel - (newLevel - minLevel)) % levelMod
if newLevel > maxLevel {
newLevel = maxLevel
newLevel += (MaxLevel - (newLevel - minLevel)) % levelMod
if newLevel > MaxLevel {
newLevel = MaxLevel
}
}
if newLevel == level {
@@ -361,7 +361,7 @@ func (cu *CellUnion) CellUnionBound() []CellID {
func (cu *CellUnion) LeafCellsCovered() int64 {
var numLeaves int64
for _, c := range *cu {
numLeaves += 1 << uint64((maxLevel-int64(c.Level()))<<1)
numLeaves += 1 << uint64((MaxLevel-int64(c.Level()))<<1)
}
return numLeaves
}
@@ -489,7 +489,7 @@ func (cu *CellUnion) ExpandAtLevel(level int) {
// the number of cells in the output can be up to 4 * (1 + 2 ** maxLevelDiff) times
// larger than the number of cells in the input.
func (cu *CellUnion) ExpandByRadius(minRadius s1.Angle, maxLevelDiff int) {
minLevel := maxLevel
minLevel := MaxLevel
for _, cid := range *cu {
minLevel = minInt(minLevel, cid.Level())
}
@@ -520,7 +520,7 @@ func (cu CellUnion) Equal(o CellUnion) bool {
// AverageArea returns the average area of this CellUnion.
// This is accurate to within a factor of 1.7.
func (cu *CellUnion) AverageArea() float64 {
return AvgAreaMetric.Value(maxLevel) * float64(cu.LeafCellsCovered())
return AvgAreaMetric.Value(MaxLevel) * float64(cu.LeafCellsCovered())
}
// ApproxArea returns the approximate area of this CellUnion. This method is accurate

View File

@@ -17,7 +17,7 @@ package s2
import (
"math"
"github.com/golang/geo/r3"
"github.com/blevesearch/geo/r3"
)
// There are several notions of the "centroid" of a triangle. First, there
@@ -89,12 +89,12 @@ func TrueCentroid(a, b, c Point) Point {
// This code still isn't as numerically stable as it could be.
// The biggest potential improvement is to compute B-A and C-A more
// accurately so that (B-A)x(C-A) is always inside triangle ABC.
x := r3.Vector{a.X, b.X - a.X, c.X - a.X}
y := r3.Vector{a.Y, b.Y - a.Y, c.Y - a.Y}
z := r3.Vector{a.Z, b.Z - a.Z, c.Z - a.Z}
r := r3.Vector{ra, rb - ra, rc - ra}
x := r3.Vector{X: a.X, Y: b.X - a.X, Z: c.X - a.X}
y := r3.Vector{X: a.Y, Y: b.Y - a.Y, Z: c.Y - a.Y}
z := r3.Vector{X: a.Z, Y: b.Z - a.Z, Z: c.Z - a.Z}
r := r3.Vector{X: ra, Y: rb - ra, Z: rc - ra}
return Point{r3.Vector{y.Cross(z).Dot(r), z.Cross(x).Dot(r), x.Cross(y).Dot(r)}.Mul(0.5)}
return Point{r3.Vector{X: y.Cross(z).Dot(r), Y: z.Cross(x).Dot(r), Z: x.Cross(y).Dot(r)}.Mul(0.5)}
}
// EdgeTrueCentroid returns the true centroid of the spherical geodesic edge AB

View File

@@ -45,16 +45,16 @@ func (q *ContainsVertexQuery) AddEdge(v Point, direction int) {
// not contained, and 0 if the incident edges consisted of matched sibling pairs.
func (q *ContainsVertexQuery) ContainsVertex() int {
// Find the unmatched edge that is immediately clockwise from Ortho(P).
referenceDir := Point{q.target.Ortho()}
refDir := q.target.referenceDir()
bestPoint := referenceDir
bestPoint := refDir
bestDir := 0
for k, v := range q.edgeMap {
if v == 0 {
continue // This is a "matched" edge.
}
if OrderedCCW(referenceDir, bestPoint, k, q.target) {
if OrderedCCW(refDir, bestPoint, k, q.target) {
bestPoint = k
bestDir = v
}

View File

@@ -17,7 +17,7 @@ package s2
import (
"sort"
"github.com/golang/geo/r3"
"github.com/blevesearch/geo/r3"
)
// ConvexHullQuery builds the convex hull of any collection of points,
@@ -31,16 +31,16 @@ import (
//
// Containment of input geometry is defined as follows:
//
// - Each input loop and polygon is contained by the convex hull exactly
// (i.e., according to Polygon's Contains(Polygon)).
// - Each input loop and polygon is contained by the convex hull exactly
// (i.e., according to Polygon's Contains(Polygon)).
//
// - Each input point is either contained by the convex hull or is a vertex
// of the convex hull. (Recall that S2Loops do not necessarily contain their
// vertices.)
// - Each input point is either contained by the convex hull or is a vertex
// of the convex hull. (Recall that S2Loops do not necessarily contain their
// vertices.)
//
// - For each input polyline, the convex hull contains all of its vertices
// according to the rule for points above. (The definition of convexity
// then ensures that the convex hull also contains the polyline edges.)
// - For each input polyline, the convex hull contains all of its vertices
// according to the rule for points above. (The definition of convexity
// then ensures that the convex hull also contains the polyline edges.)
//
// To use this type, call the various Add... methods to add your input geometry, and
// then call ConvexHull. Note that ConvexHull does *not* reset the

View File

@@ -17,7 +17,7 @@ package s2
import (
"sort"
"github.com/golang/geo/r2"
"github.com/blevesearch/geo/r2"
)
// CrossingEdgeQuery is used to find the Edge IDs of Shapes that are crossed by
@@ -277,16 +277,20 @@ func (c *CrossingEdgeQuery) getCellsForEdge(a, b Point) {
// 3. edgeRoot does not intersect any index cells. In this case there
// is nothing to do.
relation := c.iter.LocateCellID(edgeRoot)
if relation == Indexed {
// edgeRoot is an index cell or is contained by an index cell (case 1).
switch relation {
case Indexed:
// edgeRoot is an index cell or is contained by an
// index cell (case 1).
c.cells = append(c.cells, c.iter.IndexCell())
} else if relation == Subdivided {
// edgeRoot is subdivided into one or more index cells (case 2). We
// find the cells intersected by AB using recursive subdivision.
case Subdivided:
// edgeRoot is subdivided into one or more index cells
// (case 2). We find the cells intersected by AB using
// recursive subdivision.
if !edgeRoot.isFace() {
pcell = PaddedCellFromCellID(edgeRoot, 0)
}
c.computeCellsIntersected(pcell, edgeBound)
case Disjoint:
}
}
}
@@ -356,7 +360,7 @@ func (c *CrossingEdgeQuery) clipVAxis(edgeBound r2.Rect, center float64, i int,
}
}
// splitUBound returns the bound for two children as a result of spliting the
// splitUBound returns the bound for two children as a result of splitting the
// current edge at the given value U.
func (c *CrossingEdgeQuery) splitUBound(edgeBound r2.Rect, u float64) [2]r2.Rect {
v := edgeBound.Y.ClampPoint(interpolateFloat64(u, c.a.X, c.b.X, c.a.Y, c.b.Y))
@@ -369,7 +373,7 @@ func (c *CrossingEdgeQuery) splitUBound(edgeBound r2.Rect, u float64) [2]r2.Rect
return splitBound(edgeBound, 0, diag, u, v)
}
// splitVBound returns the bound for two children as a result of spliting the
// splitVBound returns the bound for two children as a result of splitting the
// current edge into two child edges at the given value V.
func (c *CrossingEdgeQuery) splitVBound(edgeBound r2.Rect, v float64) [2]r2.Rect {
u := edgeBound.X.ClampPoint(interpolateFloat64(v, c.a.Y, c.b.Y, c.a.X, c.b.X))
@@ -380,7 +384,7 @@ func (c *CrossingEdgeQuery) splitVBound(edgeBound r2.Rect, v float64) [2]r2.Rect
return splitBound(edgeBound, diag, 0, u, v)
}
// splitBound returns the bounds for the two childrenn as a result of spliting
// splitBound returns the bounds for the two childrenn as a result of splitting
// the current edge into two child edges at the given point (u,v). uEnd and vEnd
// indicate which bound endpoints of the first child will be updated.
func splitBound(edgeBound r2.Rect, uEnd, vEnd int, u, v float64) [2]r2.Rect {

View File

@@ -15,7 +15,7 @@
package s2
import (
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/s1"
)
// The distance interface represents a set of common methods used by algorithms

View File

@@ -20,7 +20,7 @@ r2 (operates on ℝ²) and r3 (operates on ℝ³).
This package provides types and functions for the S2 cell hierarchy and coordinate systems.
The S2 cell hierarchy is a hierarchical decomposition of the surface of a unit sphere (S²)
into ``cells''; it is highly efficient, scales from continental size to under 1 cm²
into cells; it is highly efficient, scales from continental size to under 1 cm²
and preserves spatial locality (nearby cells have close IDs).
More information including an in-depth introduction to S2 can be found on the

View File

@@ -27,9 +27,9 @@ package s2
import (
"math"
"github.com/golang/geo/r1"
"github.com/golang/geo/r2"
"github.com/golang/geo/r3"
"github.com/blevesearch/geo/r1"
"github.com/blevesearch/geo/r2"
"github.com/blevesearch/geo/r3"
)
const (
@@ -76,6 +76,11 @@ const (
// This method guarantees that the clipped vertices lie within the [-1,1]x[-1,1]
// cube face rectangle and are within faceClipErrorUVDist of the line AB, but
// the results may differ from those produced by FaceSegments.
//
// Returns false if AB does not intersect the given face.
//
// The test for face intersection is exact, so if this function returns false
// then the edge definitively does not intersect the face.
func ClipToFace(a, b Point, face int) (aUV, bUV r2.Point, intersects bool) {
return ClipToPaddedFace(a, b, face, 0.0)
}
@@ -89,7 +94,7 @@ func ClipToPaddedFace(a, b Point, f int, padding float64) (aUV, bUV r2.Point, in
if face(a.Vector) == f && face(b.Vector) == f {
au, av := validFaceXYZToUV(f, a.Vector)
bu, bv := validFaceXYZToUV(f, b.Vector)
return r2.Point{au, av}, r2.Point{bu, bv}, true
return r2.Point{X: au, Y: av}, r2.Point{X: bu, Y: bv}, true
}
// Convert everything into the (u,v,w) coordinates of the given face. Note
@@ -204,7 +209,7 @@ func (p pointUVW) intersectsFace() bool {
}
// intersectsOppositeEdges reports whether a directed line L intersects two
// opposite edges of a cube face F. This includs the case where L passes
// opposite edges of a cube face F. This includes the case where L passes
// exactly through a corner vertex of F. The directed line L is defined
// by its normal N in the (u,v,w) coordinates of F.
func (p pointUVW) intersectsOppositeEdges() bool {
@@ -283,14 +288,14 @@ func (p pointUVW) exitPoint(a axis) r2.Point {
if p.Y > 0 {
u = 1.0
}
return r2.Point{u, (-u*p.X - p.Z) / p.Y}
return r2.Point{X: u, Y: (-u*p.X - p.Z) / p.Y}
}
v := -1.0
if p.X < 0 {
v = 1.0
}
return r2.Point{(-v*p.Y - p.Z) / p.X, v}
return r2.Point{X: (-v*p.Y - p.Z) / p.X, Y: v}
}
// clipDestination returns a score which is used to indicate if the clipped edge AB
@@ -310,7 +315,7 @@ func clipDestination(a, b, scaledN, aTan, bTan pointUVW, scaleUV float64) (r2.Po
// Optimization: if B is within the safe region of the face, use it.
maxSafeUVCoord := 1 - faceClipErrorUVCoord
if b.Z > 0 {
uv = r2.Point{b.X / b.Z, b.Y / b.Z}
uv = r2.Point{X: b.X / b.Z, Y: b.Y / b.Z}
if math.Max(math.Abs(uv.X), math.Abs(uv.Y)) <= maxSafeUVCoord {
return uv, 0
}
@@ -319,7 +324,7 @@ func clipDestination(a, b, scaledN, aTan, bTan pointUVW, scaleUV float64) (r2.Po
// Otherwise find the point B' where the line AB exits the face.
uv = scaledN.exitPoint(scaledN.exitAxis()).Mul(scaleUV)
p := pointUVW(Point{r3.Vector{uv.X, uv.Y, 1.0}})
p := pointUVW(Point{r3.Vector{X: uv.X, Y: uv.Y, Z: 1.0}})
// Determine if the exit point B' is contained within the segment. We do this
// by computing the dot products with two inward-facing tangent vectors at A
@@ -351,7 +356,7 @@ func clipDestination(a, b, scaledN, aTan, bTan pointUVW, scaleUV float64) (r2.Po
if b.Z <= 0 {
score = 3 // B cannot be projected onto this face.
} else {
uv = r2.Point{b.X / b.Z, b.Y / b.Z}
uv = r2.Point{X: b.X / b.Z, Y: b.Y / b.Z}
}
}
@@ -475,18 +480,28 @@ func clipEdgeBound(a, b r2.Point, clip, bound r2.Rect) (r2.Rect, bool) {
}
b1y, b1x, up2 := clipBoundAxis(a.Y, b.Y, b0y, a.X, b.X, b0x, negSlope, clip.Y)
if !up2 {
return r2.Rect{b0x, b0y}, false
return r2.Rect{X: b0x, Y: b0y}, false
}
return r2.Rect{X: b1x, Y: b1y}, true
}
// interpolateFloat64 returns a value with the same combination of a1 and b1 as the
// given value x is of a and b. This function makes the following guarantees:
// - If x == a, then x1 = a1 (exactly).
// - If x == b, then x1 = b1 (exactly).
// - If a <= x <= b, then a1 <= x1 <= b1 (even if a1 == b1).
// - If x == a, then x1 = a1 (exactly).
// - If x == b, then x1 = b1 (exactly).
// - If a <= x <= b and a1 <= b1, then a1 <= x1 <= b1 (even if a1 == b1).
// - More generally, if x is between a and b, then x1 is between a1 and b1.
//
// This requires a != b.
//
// When a <= x <= b or b <= x <= a we can prove the error bound on the resulting
// value is 2.25*dblEpsilon. The error for extrapolating an x value outside of
// a and b can be much worse. See the gappa proof at the end of the file.
func interpolateFloat64(x, a, b, a1, b1 float64) float64 {
// If A == B == X all we can return is the single point.
if a == b {
return a1
}
// To get results that are accurate near both A and B, we interpolate
// starting from the closer of the two points.
if math.Abs(a-x) <= math.Abs(b-x) {
@@ -549,7 +564,7 @@ func FaceSegments(a, b Point) []FaceSegment {
// Complete the current segment by finding the point where AB
// exits the current face.
z := faceXYZtoUVW(face, ab)
n := pointUVW{z.Vector}
n := pointUVW(z)
exitAxis := n.exitAxis()
segment.b = n.exitPoint(exitAxis)
@@ -562,7 +577,7 @@ func FaceSegments(a, b Point) []FaceSegment {
face = nextFace(face, segment.b, exitAxis, n, bFace)
exitUvw := faceXYZtoUVW(face, Point{exitXyz})
segment.face = face
segment.a = r2.Point{exitUvw.X, exitUvw.Y}
segment.a = r2.Point{X: exitUvw.X, Y: exitUvw.Y}
}
// Finish the last segment.
segment.b = bSaved
@@ -590,7 +605,7 @@ func moveOriginToValidFace(face int, a, ab Point, aUV r2.Point) (int, r2.Point)
// Otherwise check whether the normal AB even intersects this face.
z := faceXYZtoUVW(face, ab)
n := pointUVW{z.Vector}
n := pointUVW(z)
if n.intersectsFace() {
// Check whether the point where the line AB exits this face is on the
// wrong side of A (by more than the acceptable error tolerance).

View File

@@ -34,7 +34,6 @@ import (
// }
// return count
// }
//
type EdgeCrosser struct {
a Point
b Point
@@ -70,10 +69,10 @@ func NewEdgeCrosser(a, b Point) *EdgeCrosser {
//
// Properties of CrossingSign:
//
// (1) CrossingSign(b,a,c,d) == CrossingSign(a,b,c,d)
// (2) CrossingSign(c,d,a,b) == CrossingSign(a,b,c,d)
// (3) CrossingSign(a,b,c,d) == MaybeCross if a==c, a==d, b==c, b==d
// (3) CrossingSign(a,b,c,d) == DoNotCross or MaybeCross if a==b or c==d
// (1) CrossingSign(b,a,c,d) == CrossingSign(a,b,c,d)
// (2) CrossingSign(c,d,a,b) == CrossingSign(a,b,c,d)
// (3) CrossingSign(a,b,c,d) == MaybeCross if a==c, a==d, b==c, b==d
// (3) CrossingSign(a,b,c,d) == DoNotCross or MaybeCross if a==b or c==d
//
// Note that if you want to check an edge against a chain of other edges,
// it is slightly more efficient to use the single-argument version
@@ -155,6 +154,8 @@ func (e *EdgeCrosser) EdgeOrVertexChainCrossing(d Point) bool {
return false
case Cross:
return true
case MaybeCross:
// fall through
}
return VertexCrossing(e.a, e.b, c, d)
}

View File

@@ -18,8 +18,8 @@ import (
"fmt"
"math"
"github.com/golang/geo/r3"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/r3"
"github.com/blevesearch/geo/s1"
)
const (
@@ -74,10 +74,10 @@ func (c Crossing) String() string {
//
// Properties of CrossingSign:
//
// (1) CrossingSign(b,a,c,d) == CrossingSign(a,b,c,d)
// (2) CrossingSign(c,d,a,b) == CrossingSign(a,b,c,d)
// (3) CrossingSign(a,b,c,d) == MaybeCross if a==c, a==d, b==c, b==d
// (3) CrossingSign(a,b,c,d) == DoNotCross or MaybeCross if a==b or c==d
// (1) CrossingSign(b,a,c,d) == CrossingSign(a,b,c,d)
// (2) CrossingSign(c,d,a,b) == CrossingSign(a,b,c,d)
// (3) CrossingSign(a,b,c,d) == MaybeCross if a==c, a==d, b==c, b==d
// (3) CrossingSign(a,b,c,d) == DoNotCross or MaybeCross if a==b or c==d
//
// This method implements an exact, consistent perturbation model such
// that no three points are ever considered to be collinear. This means
@@ -107,11 +107,11 @@ func CrossingSign(a, b, c, d Point) Crossing {
//
// Useful properties of VertexCrossing (VC):
//
// (1) VC(a,a,c,d) == VC(a,b,c,c) == false
// (2) VC(a,b,a,b) == VC(a,b,b,a) == true
// (3) VC(a,b,c,d) == VC(a,b,d,c) == VC(b,a,c,d) == VC(b,a,d,c)
// (3) If exactly one of a,b equals one of c,d, then exactly one of
// VC(a,b,c,d) and VC(c,d,a,b) is true
// (1) VC(a,a,c,d) == VC(a,b,c,c) == false
// (2) VC(a,b,a,b) == VC(a,b,b,a) == true
// (3) VC(a,b,c,d) == VC(a,b,d,c) == VC(b,a,c,d) == VC(b,a,d,c)
// (3) If exactly one of a,b equals one of c,d, then exactly one of
// VC(a,b,c,d) and VC(c,d,a,b) is true
//
// It is an error to call this method with 4 distinct vertices.
func VertexCrossing(a, b, c, d Point) bool {
@@ -129,13 +129,13 @@ func VertexCrossing(a, b, c, d Point) bool {
// Optimization: if AB=CD or AB=DC, we can avoid most of the calculations.
switch {
case a == c:
return (b == d) || OrderedCCW(Point{a.Ortho()}, d, b, a)
return (b == d) || OrderedCCW(a.referenceDir(), d, b, a)
case b == d:
return OrderedCCW(Point{b.Ortho()}, c, a, b)
return OrderedCCW(b.referenceDir(), c, a, b)
case a == d:
return (b == c) || OrderedCCW(Point{a.Ortho()}, c, b, a)
return (b == c) || OrderedCCW(a.referenceDir(), c, b, a)
case b == c:
return OrderedCCW(Point{b.Ortho()}, d, a, b)
return OrderedCCW(b.referenceDir(), d, a, b)
}
return false
@@ -152,9 +152,10 @@ func EdgeOrVertexCrossing(a, b, c, d Point) bool {
return false
case Cross:
return true
default:
return VertexCrossing(a, b, c, d)
case MaybeCross:
// Fall through to the final return.
}
return VertexCrossing(a, b, c, d)
}
// Intersection returns the intersection point of two edges AB and CD that cross
@@ -162,8 +163,8 @@ func EdgeOrVertexCrossing(a, b, c, d Point) bool {
//
// Useful properties of Intersection:
//
// (1) Intersection(b,a,c,d) == Intersection(a,b,d,c) == Intersection(a,b,c,d)
// (2) Intersection(c,d,a,b) == Intersection(a,b,c,d)
// (1) Intersection(b,a,c,d) == Intersection(a,b,d,c) == Intersection(a,b,c,d)
// (2) Intersection(c,d,a,b) == Intersection(a,b,c,d)
//
// The returned intersection point X is guaranteed to be very close to the
// true intersection point of AB and CD, even if the edges intersect at a
@@ -185,7 +186,7 @@ func Intersection(a0, a1, b0, b1 Point) Point {
// error.
//
// - intersectionExact computes the intersection point using precision
// arithmetic and converts the final result back to an Point.
// arithmetic and converts the final result back to a Point.
pt, ok := intersectionStable(a0, a1, b0, b1)
if !ok {
pt = intersectionExact(a0, a1, b0, b1)
@@ -257,10 +258,11 @@ func projection(x, aNorm r3.Vector, aNormLen float64, a0, a1 Point) (proj, bound
// normalized in double precision.
//
// For reference, the bounds that went into this calculation are:
// ||N'-N|| <= ((1 + 2 * sqrt(3))||N|| + 32 * sqrt(3) * dblError) * epsilon
// |(A.B)'-(A.B)| <= (1.5 * (A.B) + 1.5 * ||A|| * ||B||) * epsilon
// ||(X-Y)'-(X-Y)|| <= ||X-Y|| * epsilon
bound = (((3.5+2*math.Sqrt(3))*aNormLen+32*math.Sqrt(3)*dblError)*dist + 1.5*math.Abs(proj)) * epsilon
// ||N'-N|| <= ((1 + 2 * sqrt(3))||N|| + 32 * sqrt(3) * dblError) * tErr
// |(A.B)'-(A.B)| <= (1.5 * (A.B) + 1.5 * ||A|| * ||B||) * tErr
// ||(X-Y)'-(X-Y)|| <= ||X-Y|| * tErr
tErr := roundingEpsilon(x.X)
bound = (((3.5+2*math.Sqrt(3))*aNormLen+32*math.Sqrt(3)*dblError)*dist + 1.5*math.Abs(proj)) * tErr
return proj, bound
}
@@ -334,14 +336,25 @@ func intersectionStableSorted(a0, a1, b0, b1 Point) (Point, bool) {
}
x := b1.Mul(b0Dist).Sub(b0.Mul(b1Dist))
tErr := roundingEpsilon(x.X)
err := bLen*math.Abs(b0Dist*b1Error-b1Dist*b0Error)/
(distSum-errorSum) + 2*distSum*epsilon
(distSum-errorSum) + 2*distSum*tErr
// Finally we normalize the result, compute the corresponding error, and
// check whether the total error is acceptable.
// TODO(rsned): C++ checks Norm2 > some small amount to prevent precision loss.
// xLen2 := x.Norm2()
// if xLen2 < math.SmallestNonzeroFloat64 {
// // If x.Norm2() is less than the minimum normalized value of T, xLen might
// // lose precision and the result might fail to satisfy IsUnitLength().
// // TODO(rsned): Implement RobustNormalize().
// return pt, false
// }
xLen := x.Norm()
maxError := intersectionError
if err > (float64(maxError)-epsilon)*xLen {
if err > (float64(maxError)-tErr)*xLen {
return pt, false
}
@@ -354,7 +367,7 @@ func intersectionStableSorted(a0, a1, b0, b1 Point) (Point, bool) {
// is not guaranteed to have the correct sign (i.e., the return value may need
// to be negated).
func intersectionExact(a0, a1, b0, b1 Point) Point {
// Since we are using presice arithmetic, we don't need to worry about
// Since we are using precise arithmetic, we don't need to worry about
// numerical stability.
a0P := r3.PreciseVectorFromVector(a0.Vector)
a1P := r3.PreciseVectorFromVector(a1.Vector)
@@ -374,7 +387,7 @@ func intersectionExact(a0, a1, b0, b1 Point) Point {
// "crossing" because of simulation of simplicity. Out of the four
// endpoints, exactly two lie in the interior of the other edge. Of
// those two we return the one that is lexicographically smallest.
x = r3.Vector{10, 10, 10} // Greater than any valid S2Point
x = r3.Vector{X: 10, Y: 10, Z: 10} // Greater than any valid S2Point
aNorm := Point{aNormP.Vector()}
bNorm := Point{bNormP.Vector()}
@@ -394,3 +407,47 @@ func intersectionExact(a0, a1, b0, b1 Point) Point {
return Point{x}
}
// AngleContainsVertex reports if the angle ABC contains its vertex B.
// Containment is defined such that if several polygons tile the region around
// a vertex, then exactly one of those polygons contains that vertex.
// Returns false for degenerate angles of the form ABA.
//
// Note that this method is not sufficient to determine vertex containment in
// polygons with duplicate vertices (such as the polygon ABCADE). Use
// ContainsVertexQuery for such polygons. AngleContainsVertex(a, b, c)
// is equivalent to using ContainsVertexQuery as follows:
//
// ContainsVertexQuery query(b);
// query.AddEdge(a, -1); // incoming
// query.AddEdge(c, 1); // outgoing
// return query.ContainsVertex() > 0;
//
// Useful properties of AngleContainsVertex:
//
// (1) AngleContainsVertex(a,b,a) == false
// (2) AngleContainsVertex(a,b,c) == !AngleContainsVertex(c,b,a) unless a == c
// (3) Given vertices v_1 ... v_k ordered cyclically CCW around vertex b,
// AngleContainsVertex(v_{i+1}, b, v_i) is true for exactly one value of i.
//
// REQUIRES: a != b && b != c
func AngleContainsVertex(a, b, c Point) bool {
// A loop with consecutive vertices A, B, C contains vertex B if and only if
// the fixed vector R = referenceDir(B) is contained by the wedge ABC. The
// wedge is closed at A and open at C, i.e. the point B is inside the loop
// if A = R but not if C = R.
//
// Note that the test below is written so as to get correct results when the
// angle ABC is degenerate. If A = C or C = R it returns false, and
// otherwise if A = R it returns true.
return !OrderedCCW(b.referenceDir(), c, a, b)
}
// TODO(roberts): Differences from C++
// func RobustCrossProd(a, b Point) Point
// func symbolicCrossProd(a, b Point) Point
// func exactCrossProd(a, b Point) Point
// func SignedVertexCrossing(a, b, c, d Point) int
// func isNormalizable(p Point) bool
// func ensureNormalizable(p Point) Point
// func normalizableFromPrecise(p r3.PreciseVector) Point

View File

@@ -20,7 +20,7 @@ package s2
import (
"math"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/s1"
)
// DistanceFromSegment returns the distance of point X from line segment AB.
@@ -96,7 +96,7 @@ func UpdateMinInteriorDistance(x, a, b Point, minDist s1.ChordAngle) (s1.ChordAn
func Project(x, a, b Point) Point {
aXb := a.PointCross(b)
// Find the closest point to X along the great circle through AB.
p := x.Sub(aXb.Mul(x.Dot(aXb.Vector) / aXb.Vector.Norm2()))
p := x.Sub(aXb.Mul(x.Dot(aXb.Vector) / aXb.Norm2()))
// If this point is on the edge AB, then it's the closest point.
if Sign(aXb, a, Point{p}) && Sign(Point{p}, b, aXb) {
@@ -146,7 +146,7 @@ func InterpolateAtDistance(ax s1.Angle, a, b Point) Point {
// result is always perpendicular to A, even if A=B or A=-B, but it is not
// necessarily unit length. (We effectively normalize it below.)
normal := a.PointCross(b)
tangent := normal.Vector.Cross(a.Vector)
tangent := normal.Cross(a.Vector)
// Now compute the appropriate linear combination of A and "tangent". With
// infinite precision the result would always be unit length, but we
@@ -162,6 +162,14 @@ func InterpolateAtDistance(ax s1.Angle, a, b Point) Point {
// input points are normalized to within the bounds guaranteed by r3.Vector's
// Normalize. The error can be added or subtracted from an s1.ChordAngle
// using its Expanded method.
//
// Note that accuracy goes down as the distance approaches 0 degrees or 180
// degrees (for different reasons). Near 0 degrees the error is acceptable
// for all practical purposes (about 1.2e-15 radians ~= 8 nanometers). For
// exactly antipodal points the maximum error is quite high (0.5 meters), but
// this error drops rapidly as the points move away from antipodality
// (approximately 1 millimeter for points that are 50 meters from antipodal,
// and 1 micrometer for points that are 50km from antipodal).
func minUpdateDistanceMaxError(dist s1.ChordAngle) float64 {
// There are two cases for the maximum error in UpdateMinDistance(),
// depending on whether the closest point is interior to the edge.
@@ -196,9 +204,9 @@ func minUpdateInteriorDistanceMaxError(dist s1.ChordAngle) float64 {
// perpendicular and parallel to a plane containing the edge respectively.
b := math.Min(1.0, 0.5*float64(dist))
a := math.Sqrt(b * (2 - b))
return ((2.5+2*math.Sqrt(3)+8.5*a)*a +
(2+2*math.Sqrt(3)/3+6.5*(1-b))*b +
(23+16/math.Sqrt(3))*dblEpsilon) * dblEpsilon
return ((2.5+2*sqrt3+8.5*a)*a +
(2+2*sqrt3/3+6.5*(1-b))*b +
(23+16/sqrt3)*dblEpsilon) * dblEpsilon
}
// updateMinDistance computes the distance from a point X to a line segment AB,
@@ -328,7 +336,6 @@ func updateEdgePairMinDistance(a0, a1, b0, b1 Point, minDist s1.ChordAngle) (s1.
return 0, false
}
if CrossingSign(a0, a1, b0, b1) == Cross {
minDist = 0
return 0, true
}
@@ -382,7 +389,7 @@ func EdgePairClosestPoints(a0, a1, b0, b1 Point) (Point, Point) {
var minDist s1.ChordAngle
var ok bool
minDist, ok = updateMinDistance(a0, b0, b1, minDist, true)
minDist, _ = updateMinDistance(a0, b0, b1, minDist, true)
closestVertex := 0
if minDist, ok = UpdateMinDistance(a1, b0, b1, minDist); ok {
closestVertex = 1
@@ -390,7 +397,7 @@ func EdgePairClosestPoints(a0, a1, b0, b1 Point) (Point, Point) {
if minDist, ok = UpdateMinDistance(b0, a0, a1, minDist); ok {
closestVertex = 2
}
if minDist, ok = UpdateMinDistance(b1, a0, a1, minDist); ok {
if _, ok = UpdateMinDistance(b1, a0, a1, minDist); ok {
closestVertex = 3
}
switch closestVertex {
@@ -406,3 +413,68 @@ func EdgePairClosestPoints(a0, a1, b0, b1 Point) (Point, Point) {
panic("illegal case reached")
}
}
// PointOnLine Returns the point at distance "r" from A along the line AB.
//
// Note that the line AB has a well-defined direction even when A and B are
// antipodal or nearly so. If A == B then an arbitrary direction is chosen.
func PointOnLine(a, b Point, r s1.Angle) Point {
// Use RobustCrossProd() to compute the tangent vector at A towards B. This
// technique is robust even when A and B are antipodal or nearly so.
dir := Point{a.PointCross(b).Cross(a.Vector).Normalize()}
return PointOnRay(a, dir, r)
}
// PointToLeft returns a Point to the left of the edge from `a` to `b` which
// is distance 'r' away from `a` orthogonal to the specified edge.
//
// c (result)
// |
// |
// a --------> b
func PointToLeft(a, b Point, r s1.Angle) Point {
return PointOnRay(a, Point{a.PointCross(b).Normalize()}, r)
}
// PointToRight returns a Point to the right of the edge from `a` to `b` which
// is distance 'r' away from `a` orthogonal to the specified edge.
//
// a --------> b
// |
// |
// c (result)
func PointToRight(a, b Point, r s1.Angle) Point {
return PointOnRay(a, Point{b.PointCross(a).Normalize()}, r)
}
// PointOnRay returns the point at distance "r" along the ray with the given
// origin and direction. "dir" is required to be perpendicular to "origin" (since
// this is how directions on the sphere are represented).
//
// This function is similar to PointOnLine() except that (1) the first
// two arguments are required to be perpendicular and (2) it is much faster.
// It can be used as an alternative to repeatedly calling PointOnLine() by
// computing "dir" as
//
// dir = a.PointCross(b).Cross(a.Vector).Normalize();
//
// REQUIRES: "origin" and "dir" are perpendicular to within the tolerance
//
// of the calculation above.
//
// "origin" and "dir" should also be normalized.
func PointOnRay(origin, dir Point, r s1.Angle) Point {
// Mathematically the result should already be unit length, but we normalize
// it anyway to ensure that the error is within acceptable bounds.
// (Otherwise errors can build up when the result of one interpolation is
// fed into another interpolation.)
//
// Note that it is much cheaper to compute the sine and cosine of an
// s1.ChordAngle than an s1.Angle.
return Point{origin.Mul(math.Cos(float64(r))).Add(dir.Mul(math.Sin(float64(r)))).Normalize()}
}
// TODO(rsned): Differences from C++
// IsEdgeBNearEdgeA
// PointOnLineError
// PointOnRayError

View File

@@ -17,23 +17,23 @@ package s2
import (
"sort"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/s1"
)
// EdgeQueryOptions holds the options for controlling how EdgeQuery operates.
//
// Options can be chained together builder-style:
//
// opts = NewClosestEdgeQueryOptions().
// MaxResults(1).
// DistanceLimit(s1.ChordAngleFromAngle(3 * s1.Degree)).
// MaxError(s1.ChordAngleFromAngle(0.001 * s1.Degree))
// query = NewClosestEdgeQuery(index, opts)
// opts = NewClosestEdgeQueryOptions().
// MaxResults(1).
// DistanceLimit(s1.ChordAngleFromAngle(3 * s1.Degree)).
// MaxError(s1.ChordAngleFromAngle(0.001 * s1.Degree))
// query = NewClosestEdgeQuery(index, opts)
//
// or set individually:
// or set individually:
//
// opts = NewClosestEdgeQueryOptions()
// opts.IncludeInteriors(true)
// opts = NewClosestEdgeQueryOptions()
// opts.IncludeInteriors(true)
//
// or just inline:
//
@@ -102,11 +102,11 @@ func NewFurthestEdgeQueryOptions() *EdgeQueryOptions {
// EdgeQueryResult represents an edge that meets the target criteria for the
// query. Note the following special cases:
//
// - ShapeID >= 0 && EdgeID < 0 represents the interior of a shape.
// Such results may be returned when the option IncludeInteriors is true.
// - ShapeID >= 0 && EdgeID < 0 represents the interior of a shape.
// Such results may be returned when the option IncludeInteriors is true.
//
// - ShapeID < 0 && EdgeID < 0 is returned to indicate that no edge
// satisfies the requested query options.
// - ShapeID < 0 && EdgeID < 0 is returned to indicate that no edge
// satisfies the requested query options.
type EdgeQueryResult struct {
distance distance
shapeID int32
@@ -162,9 +162,9 @@ func (e EdgeQueryResult) Less(other EdgeQueryResult) bool {
//
// By using the appropriate options, this type can answer questions such as:
//
// - Find the minimum distance between two geometries A and B.
// - Find all edges of geometry A that are within a distance D of geometry B.
// - Find the k edges of geometry A that are closest to a given point P.
// - Find the minimum distance between two geometries A and B.
// - Find all edges of geometry A that are within a distance D of geometry B.
// - Find the k edges of geometry A that are closest to a given point P.
//
// You can also specify whether polygons should include their interiors (i.e.,
// if a point is contained by a polygon, should the distance be zero or should
@@ -214,7 +214,7 @@ type EdgeQuery struct {
// testedEdges tracks the set of shape and edges that have already been tested.
testedEdges map[ShapeEdgeID]uint32
// For the optimized algorihm we precompute the top-level CellIDs that
// For the optimized algorithm we precompute the top-level CellIDs that
// will be added to the priority queue. There can be at most 6 of these
// cells. Essentially this is just a covering of the indexed edges, except
// that we also store pointers to the corresponding ShapeIndexCells to
@@ -322,7 +322,6 @@ func (e *EdgeQuery) Distance(target distanceTarget) s1.ChordAngle {
// If you wish to check if the distance is less than or equal to the limit, use:
//
// query.IsDistanceLess(target, limit.Successor())
//
func (e *EdgeQuery) IsDistanceLess(target distanceTarget, limit s1.ChordAngle) bool {
opts := e.opts
opts = opts.MaxResults(1).
@@ -339,7 +338,6 @@ func (e *EdgeQuery) IsDistanceLess(target distanceTarget, limit s1.ChordAngle) b
// If you wish to check if the distance is less than or equal to the limit, use:
//
// query.IsDistanceGreater(target, limit.Predecessor())
//
func (e *EdgeQuery) IsDistanceGreater(target distanceTarget, limit s1.ChordAngle) bool {
return e.IsDistanceLess(target, limit)
}
@@ -500,28 +498,28 @@ func (e *EdgeQuery) addResult(r EdgeQueryResult) {
// is used for the results.
}
func (e *EdgeQuery) maybeAddResult(shape Shape, edgeID int32) {
if _, ok := e.testedEdges[ShapeEdgeID{e.index.idForShape(shape), edgeID}]; e.avoidDuplicates && !ok {
func (e *EdgeQuery) maybeAddResult(shape Shape, shapeID, edgeID int32) {
if _, ok := e.testedEdges[ShapeEdgeID{shapeID, edgeID}]; e.avoidDuplicates && !ok {
return
}
edge := shape.Edge(int(edgeID))
dist := e.distanceLimit
if dist, ok := e.target.updateDistanceToEdge(edge, dist); ok {
e.addResult(EdgeQueryResult{dist, e.index.idForShape(shape), edgeID})
e.addResult(EdgeQueryResult{dist, shapeID, edgeID})
}
}
func (e *EdgeQuery) findEdgesBruteForce() {
// Range over all shapes in the index. Does order matter here? if so
// switch to for i = 0 .. n?
for _, shape := range e.index.shapes {
for shapeID, shape := range e.index.shapes {
// TODO(roberts): can this happen if we are only ranging over current entries?
if shape == nil {
continue
}
for edgeID := int32(0); edgeID < int32(shape.NumEdges()); edgeID++ {
e.maybeAddResult(shape, edgeID)
e.maybeAddResult(shape, shapeID, edgeID)
}
}
}
@@ -622,7 +620,7 @@ func (e *EdgeQuery) initQueue() {
} else {
// Compute a covering of the search disc and intersect it with the
// precomputed index covering.
coverer := &RegionCoverer{MaxCells: 4, LevelMod: 1, MaxLevel: maxLevel}
coverer := &RegionCoverer{MaxCells: 4, LevelMod: 1, MaxLevel: MaxLevel}
radius := cb.Radius() + e.distanceLimit.chordAngleBound().Angle()
searchCB := CapFromCenterAngle(cb.Center(), radius)
@@ -751,7 +749,7 @@ func (e *EdgeQuery) processEdges(entry *queryQueueEntry) {
for _, clipped := range entry.indexCell.shapes {
shape := e.index.Shape(clipped.shapeID)
for j := 0; j < clipped.numEdges(); j++ {
e.maybeAddResult(shape, int32(clipped.edges[j]))
e.maybeAddResult(shape, clipped.shapeID, int32(clipped.edges[j]))
}
}
}

View File

@@ -15,8 +15,8 @@
package s2
import (
"github.com/golang/geo/r2"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/r2"
"github.com/blevesearch/geo/s1"
)
// Tessellation is implemented by subdividing the edge until the estimated
@@ -180,10 +180,10 @@ const (
// of edges in a given 2D projection such that the maximum distance between the
// geodesic edge and the chain of projected edges is at most the requested tolerance.
//
// Method | Input | Output
// ------------|------------------------|-----------------------
// Projected | S2 geodesics | Planar projected edges
// Unprojected | Planar projected edges | S2 geodesics
// Method | Input | Output
// ------------|------------------------|-----------------------
// Projected | S2 geodesics | Planar projected edges
// Unprojected | Planar projected edges | S2 geodesics
type EdgeTessellator struct {
projection Projection

View File

@@ -15,6 +15,7 @@
package s2
import (
"bufio"
"encoding/binary"
"io"
"math"
@@ -124,23 +125,11 @@ type byteReader interface {
io.ByteReader
}
// byteReaderAdapter embellishes an io.Reader with a ReadByte method,
// so that it implements the io.ByteReader interface.
type byteReaderAdapter struct {
io.Reader
}
func (b byteReaderAdapter) ReadByte() (byte, error) {
buf := []byte{0}
_, err := io.ReadFull(b, buf)
return buf[0], err
}
func asByteReader(r io.Reader) byteReader {
if br, ok := r.(byteReader); ok {
return br
}
return byteReaderAdapter{r}
return bufio.NewReader(r)
}
type decoder struct {

View File

@@ -18,8 +18,8 @@ import (
"fmt"
"math"
"github.com/golang/geo/r3"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/r3"
"github.com/blevesearch/geo/s1"
)
const (
@@ -79,14 +79,14 @@ func longitude(p Point) s1.Angle {
return s1.Angle(math.Atan2(p.Y, p.X)) * s1.Radian
}
// PointFromLatLng returns an Point for the given LatLng.
// PointFromLatLng returns a Point for the given LatLng.
// The maximum error in the result is 1.5 * dblEpsilon. (This does not
// include the error of converting degrees, E5, E6, or E7 into radians.)
func PointFromLatLng(ll LatLng) Point {
phi := ll.Lat.Radians()
theta := ll.Lng.Radians()
cosphi := math.Cos(phi)
return Point{r3.Vector{math.Cos(theta) * cosphi, math.Sin(theta) * cosphi, math.Sin(phi)}}
return Point{r3.Vector{X: math.Cos(theta) * cosphi, Y: math.Sin(theta) * cosphi, Z: math.Sin(phi)}}
}
// LatLngFromPoint returns an LatLng for a given Point.

89
vendor/github.com/blevesearch/geo/s2/lax_loop.go generated vendored Normal file
View File

@@ -0,0 +1,89 @@
// Copyright 2023 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
// Shape interface enforcement
var _ Shape = (*LaxLoop)(nil)
// LaxLoop represents a closed loop of edges surrounding an interior
// region. It is similar to Loop except that this class allows
// duplicate vertices and edges. Loops may have any number of vertices,
// including 0, 1, or 2. (A one-vertex loop defines a degenerate edge
// consisting of a single point.)
//
// Note that LaxLoop is faster to initialize and more compact than
// Loop, but does not support the same operations as Loop.
type LaxLoop struct {
numVertices int
vertices []Point
}
// LaxLoopFromPoints creates a LaxLoop from the given points.
func LaxLoopFromPoints(vertices []Point) *LaxLoop {
l := &LaxLoop{
numVertices: len(vertices),
vertices: make([]Point, len(vertices)),
}
copy(l.vertices, vertices)
return l
}
// LaxLoopFromLoop creates a LaxLoop from the given Loop, copying its points.
func LaxLoopFromLoop(loop *Loop) *LaxLoop {
if loop.IsFull() {
panic("FullLoops are not yet supported")
}
if loop.IsEmpty() {
return &LaxLoop{}
}
l := &LaxLoop{
numVertices: len(loop.vertices),
vertices: make([]Point, len(loop.vertices)),
}
copy(l.vertices, loop.vertices)
return l
}
func (l *LaxLoop) vertex(i int) Point { return l.vertices[i] }
func (l *LaxLoop) NumEdges() int { return l.numVertices }
func (l *LaxLoop) Edge(e int) Edge {
e1 := e + 1
if e1 == l.numVertices {
e1 = 0
}
return Edge{l.vertices[e], l.vertices[e1]}
}
func (l *LaxLoop) Dimension() int { return 2 }
func (l *LaxLoop) ReferencePoint() ReferencePoint { return referencePointForShape(l) }
func (l *LaxLoop) NumChains() int { return minInt(1, l.numVertices) }
func (l *LaxLoop) Chain(i int) Chain { return Chain{0, l.numVertices} }
func (l *LaxLoop) ChainEdge(i, j int) Edge {
var k int
if j+1 == l.numVertices {
k = j + 1
}
return Edge{l.vertices[j], l.vertices[k]}
}
func (l *LaxLoop) ChainPosition(e int) ChainPosition { return ChainPosition{0, e} }
func (l *LaxLoop) IsEmpty() bool { return defaultShapeIsEmpty(l) }
func (l *LaxLoop) IsFull() bool { return defaultShapeIsFull(l) }
func (l *LaxLoop) typeTag() typeTag { return typeTagNone }
func (l *LaxLoop) privateInterface() {}
// TODO(roberts): Remaining to be ported from C++:
// LaxClosedPolyline
// VertexIDLaxLoop

224
vendor/github.com/blevesearch/geo/s2/lax_polygon.go generated vendored Normal file
View File

@@ -0,0 +1,224 @@
// Copyright 2023 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
// Shape interface enforcement
var _ Shape = (*LaxPolygon)(nil)
// LaxPolygon represents a region defined by a collection of zero or more
// closed loops. The interior is the region to the left of all loops. This
// is similar to Polygon except that this class supports polygons
// with degeneracies. Degeneracies are of two types: degenerate edges (from a
// vertex to itself) and sibling edge pairs (consisting of two oppositely
// oriented edges). Degeneracies can represent either "shells" or "holes"
// depending on the loop they are contained by. For example, a degenerate
// edge or sibling pair contained by a "shell" would be interpreted as a
// degenerate hole. Such edges form part of the boundary of the polygon.
//
// Loops with fewer than three vertices are interpreted as follows:
// - A loop with two vertices defines two edges (in opposite directions).
// - A loop with one vertex defines a single degenerate edge.
// - A loop with no vertices is interpreted as the "full loop" containing
//
// all points on the sphere. If this loop is present, then all other loops
// must form degeneracies (i.e., degenerate edges or sibling pairs). For
// example, two loops {} and {X} would be interpreted as the full polygon
// with a degenerate single-point hole at X.
//
// LaxPolygon does not have any error checking, and it is perfectly fine to
// create LaxPolygon objects that do not meet the requirements below (e.g., in
// order to analyze or fix those problems). However, LaxPolygons must satisfy
// some additional conditions in order to perform certain operations:
//
// - In order to be valid for point containment tests, the polygon must
//
// satisfy the "interior is on the left" rule. This means that there must
// not be any crossing edges, and if there are duplicate edges then all but
// at most one of them must belong to a sibling pair (i.e., the number of
// edges in opposite directions must differ by at most one).
//
// - To be valid for polygon operations (BoundaryOperation), degenerate
//
// edges and sibling pairs cannot coincide with any other edges. For
// example, the following situations are not allowed:
//
// {AA, AA} // degenerate edge coincides with another edge
// {AA, AB} // degenerate edge coincides with another edge
// {AB, BA, AB} // sibling pair coincides with another edge
//
// Note that LaxPolygon is much faster to initialize and is more compact than
// Polygon, but unlike Polygon it does not have any built-in operations.
// Instead you should use ShapeIndex based operations such as BoundaryOperation,
// ClosestEdgeQuery, etc.
type LaxPolygon struct {
numLoops int
vertices []Point
numVerts int
cumulativeVertices []int
// TODO(roberts): C++ adds a prevLoop int field that claims to boost
// chain position lookups by 1.5-4.5x. Benchmark to see if this
// is useful here.
}
// LaxPolygonFromPolygon creates a LaxPolygon from the given Polygon.
func LaxPolygonFromPolygon(p *Polygon) *LaxPolygon {
spans := make([][]Point, len(p.loops))
for i, loop := range p.loops {
if loop.IsFull() {
spans[i] = []Point{} // Empty span.
} else {
spans[i] = make([]Point, len(loop.vertices))
copy(spans[i], loop.vertices)
}
}
return LaxPolygonFromPoints(spans)
}
// LaxPolygonFromPoints creates a LaxPolygon from the given points.
func LaxPolygonFromPoints(loops [][]Point) *LaxPolygon {
p := &LaxPolygon{}
p.numLoops = len(loops)
switch p.numLoops {
case 0:
p.numVerts = 0
p.vertices = nil
case 1:
p.numVerts = len(loops[0])
p.vertices = make([]Point, p.numVerts)
copy(p.vertices, loops[0])
default:
p.cumulativeVertices = make([]int, p.numLoops+1)
numVertices := 0
for i, loop := range loops {
p.cumulativeVertices[i] = numVertices
numVertices += len(loop)
}
p.cumulativeVertices[p.numLoops] = numVertices
for _, points := range loops {
p.vertices = append(p.vertices, points...)
}
}
return p
}
// numVertices reports the total number of vertices in all loops.
func (p *LaxPolygon) numVertices() int {
if p.numLoops <= 1 {
return p.numVerts
}
return p.cumulativeVertices[p.numLoops]
}
// numLoopVertices reports the total number of vertices in the given loop.
func (p *LaxPolygon) numLoopVertices(i int) int {
if p.numLoops == 1 {
return p.numVerts
}
return p.cumulativeVertices[i+1] - p.cumulativeVertices[i]
}
// loopVertex returns the vertex from loop i at index j.
//
// This requires:
//
// 0 <= i < len(loops)
// 0 <= j < len(loop[i].vertices)
func (p *LaxPolygon) loopVertex(i, j int) Point {
if p.numLoops == 1 {
return p.vertices[j]
}
return p.vertices[p.cumulativeVertices[i]+j]
}
func (p *LaxPolygon) NumEdges() int { return p.numVertices() }
func (p *LaxPolygon) Edge(e int) Edge {
e1 := e + 1
if p.numLoops == 1 {
// wrap the end vertex if this is the last edge.
if e1 == p.numVerts {
e1 = 0
}
return Edge{p.vertices[e], p.vertices[e1]}
}
// TODO(roberts): If this turns out to be performance critical in tests
// incorporate the maxLinearSearchLoops like in C++.
// Check if e1 would cross a loop boundary in the set of all vertices.
nextLoop := 0
for p.cumulativeVertices[nextLoop] <= e {
nextLoop++
}
// If so, wrap around to the first vertex of the loop.
if e1 == p.cumulativeVertices[nextLoop] {
e1 = p.cumulativeVertices[nextLoop-1]
}
return Edge{p.vertices[e], p.vertices[e1]}
}
func (p *LaxPolygon) Dimension() int { return 2 }
func (p *LaxPolygon) typeTag() typeTag { return typeTagLaxPolygon }
func (p *LaxPolygon) privateInterface() {}
func (p *LaxPolygon) IsEmpty() bool { return defaultShapeIsEmpty(p) }
func (p *LaxPolygon) IsFull() bool { return defaultShapeIsFull(p) }
func (p *LaxPolygon) ReferencePoint() ReferencePoint { return referencePointForShape(p) }
func (p *LaxPolygon) NumChains() int { return p.numLoops }
func (p *LaxPolygon) Chain(i int) Chain {
if p.numLoops == 1 {
return Chain{0, p.numVertices()}
}
start := p.cumulativeVertices[i]
return Chain{start, p.cumulativeVertices[i+1] - start}
}
func (p *LaxPolygon) ChainEdge(i, j int) Edge {
n := p.numLoopVertices(i)
k := 0
if j+1 != n {
k = j + 1
}
if p.numLoops == 1 {
return Edge{p.vertices[j], p.vertices[k]}
}
base := p.cumulativeVertices[i]
return Edge{p.vertices[base+j], p.vertices[base+k]}
}
func (p *LaxPolygon) ChainPosition(e int) ChainPosition {
if p.numLoops == 1 {
return ChainPosition{0, e}
}
// TODO(roberts): If this turns out to be performance critical in tests
// incorporate the maxLinearSearchLoops like in C++.
// Find the index of the first vertex of the loop following this one.
nextLoop := 1
for p.cumulativeVertices[nextLoop] <= e {
nextLoop++
}
return ChainPosition{p.cumulativeVertices[nextLoop] - p.cumulativeVertices[1], e - p.cumulativeVertices[nextLoop-1]}
}
// TODO(roberts): Remaining to port from C++:
// EncodedLaxPolygon

58
vendor/github.com/blevesearch/geo/s2/lax_polyline.go generated vendored Normal file
View File

@@ -0,0 +1,58 @@
// Copyright 2023 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
const laxPolylineTypeTag = 4
// LaxPolyline represents a polyline. It is similar to Polyline except
// that adjacent vertices are allowed to be identical or antipodal, and
// the representation is slightly more compact.
//
// Polylines may have any number of vertices, but note that polylines with
// fewer than 2 vertices do not define any edges. (To create a polyline
// consisting of a single degenerate edge, either repeat the same vertex twice
// or use LaxClosedPolyline.
type LaxPolyline struct {
vertices []Point
}
// LaxPolylineFromPoints constructs a LaxPolyline from the given points.
func LaxPolylineFromPoints(vertices []Point) *LaxPolyline {
return &LaxPolyline{
vertices: append([]Point(nil), vertices...),
}
}
// LaxPolylineFromPolyline converts the given Polyline into a LaxPolyline.
func LaxPolylineFromPolyline(p Polyline) *LaxPolyline {
return LaxPolylineFromPoints(p)
}
func (l *LaxPolyline) NumEdges() int { return maxInt(0, len(l.vertices)-1) }
func (l *LaxPolyline) Edge(e int) Edge { return Edge{l.vertices[e], l.vertices[e+1]} }
func (l *LaxPolyline) ReferencePoint() ReferencePoint { return OriginReferencePoint(false) }
func (l *LaxPolyline) NumChains() int { return minInt(1, l.NumEdges()) }
func (l *LaxPolyline) Chain(i int) Chain { return Chain{0, l.NumEdges()} }
func (l *LaxPolyline) ChainEdge(i, j int) Edge { return Edge{l.vertices[j], l.vertices[j+1]} }
func (l *LaxPolyline) ChainPosition(e int) ChainPosition { return ChainPosition{0, e} }
func (l *LaxPolyline) Dimension() int { return 1 }
func (l *LaxPolyline) IsEmpty() bool { return defaultShapeIsEmpty(l) }
func (l *LaxPolyline) IsFull() bool { return defaultShapeIsFull(l) }
func (l *LaxPolyline) typeTag() typeTag { return typeTagLaxPolyline }
func (l *LaxPolyline) privateInterface() {}
// TODO(roberts):
// Add Encode/Decode support
// Add EncodedLaxPolyline type

View File

@@ -55,7 +55,9 @@ func newIDSetLexicon() *idSetLexicon {
// The primary difference between this and sequenceLexicon are:
// 1. Empty and singleton sets are represented implicitly; they use no space.
// 2. Sets are represented rather than sequences; the ordering of values is
// not important and duplicates are removed.
//
// not important and duplicates are removed.
//
// 3. The values must be 32-bit non-negative integers only.
func (l *idSetLexicon) add(ids ...int32) int32 {
// Empty sets have a special ID chosen not to conflict with other IDs.
@@ -127,9 +129,7 @@ func (l *sequenceLexicon) add(ids []int32) int32 {
if id, ok := l.idSet[hashSet(ids)]; ok {
return id
}
for _, v := range ids {
l.values = append(l.values, v)
}
l.values = append(l.values, ids...)
l.begins = append(l.begins, uint32(len(l.values)))
id := int32(len(l.begins)) - 2
@@ -149,7 +149,7 @@ func (l *sequenceLexicon) size() int {
return len(l.begins) - 1
}
// hash returns a hash of this sequence of int32s.
// hashSet returns a hash of this sequence of int32s.
func hashSet(s []int32) uint32 {
// TODO(roberts): We just need a way to nicely hash all the values down to
// a 32-bit value. To ensure no unnecessary dependencies we use the core

View File

@@ -21,9 +21,9 @@ import (
"math"
"reflect"
"github.com/golang/geo/r1"
"github.com/golang/geo/r3"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/r1"
"github.com/blevesearch/geo/r3"
"github.com/blevesearch/geo/s1"
)
// Loop represents a simple spherical polygon. It consists of a sequence
@@ -139,25 +139,31 @@ func (l *Loop) initOriginAndBound() {
// the vertex is in the southern hemisphere or not.
l.originInside = l.vertices[0].Z < 0
} else {
// Point containment testing is done by counting edge crossings starting
// at a fixed point on the sphere (OriginPoint). We need to know whether
// the reference point (OriginPoint) is inside or outside the loop before
// we can construct the ShapeIndex. We do this by first guessing that
// it is outside, and then seeing whether we get the correct containment
// result for vertex 1. If the result is incorrect, the origin must be
// inside the loop.
// The brute force point containment algorithm works by counting edge
// crossings starting at a fixed reference point (chosen as OriginPoint()
// for historical reasons). Loop initialization would be more efficient
// if we used a loop vertex such as vertex(0) as the reference point
// instead, however making this change would be a lot of work because
// originInside is currently part of the Encode() format.
//
// A loop with consecutive vertices A,B,C contains vertex B if and only if
// the fixed vector R = B.Ortho is contained by the wedge ABC. The
// wedge is closed at A and open at C, i.e. the point B is inside the loop
// if A = R but not if C = R. This convention is required for compatibility
// with VertexCrossing. (Note that we can't use OriginPoint
// as the fixed vector because of the possibility that B == OriginPoint.)
// In any case, we initialize originInside by first guessing that it is
// outside, and then seeing whether we get the correct containment result
// for vertex 1. If the result is incorrect, the origin must be inside
// the loop instead. Note that the Loop is not necessarily valid and so
// we need to check the requirements of AngleContainsVertex first.
v1Inside := l.vertices[0] != l.vertices[1] &&
l.vertices[2] != l.vertices[1] &&
AngleContainsVertex(l.vertices[0], l.vertices[1], l.vertices[2])
// initialize before calling ContainsPoint
l.originInside = false
v1Inside := OrderedCCW(Point{l.vertices[1].Ortho()}, l.vertices[0], l.vertices[2], l.vertices[1])
// Note that ContainsPoint only does a bounds check once initIndex
// has been called, so it doesn't matter that bound is undefined here.
if v1Inside != l.ContainsPoint(l.vertices[1]) {
l.originInside = true
}
}
// We *must* call initBound before initializing the index, because
@@ -199,15 +205,15 @@ func (l *Loop) initBound() {
}
b := bounder.RectBound()
if l.ContainsPoint(Point{r3.Vector{0, 0, 1}}) {
b = Rect{r1.Interval{b.Lat.Lo, math.Pi / 2}, s1.FullInterval()}
if l.ContainsPoint(Point{r3.Vector{X: 0, Y: 0, Z: 1}}) {
b = Rect{r1.Interval{Lo: b.Lat.Lo, Hi: math.Pi / 2}, s1.FullInterval()}
}
// If a loop contains the south pole, then either it wraps entirely
// around the sphere (full longitude range), or it also contains the
// north pole in which case b.Lng.IsFull() due to the test above.
// Either way, we only need to do the south pole containment test if
// b.Lng.IsFull().
if b.Lng.IsFull() && l.ContainsPoint(Point{r3.Vector{0, 0, -1}}) {
if b.Lng.IsFull() && l.ContainsPoint(Point{r3.Vector{X: 0, Y: 0, Z: -1}}) {
b.Lat.Lo = -math.Pi / 2
}
l.bound = b
@@ -418,12 +424,12 @@ func (l *Loop) BoundaryEqual(o *Loop) bool {
// -1 if it excludes the boundary of the other, and 0 if the boundaries of the two
// loops cross. Shared edges are handled as follows:
//
// If XY is a shared edge, define Reversed(XY) to be true if XY
// appears in opposite directions in both loops.
// Then this loop contains XY if and only if Reversed(XY) == the other loop is a hole.
// (Intuitively, this checks whether this loop contains a vanishingly small region
// extending from the boundary of the other toward the interior of the polygon to
// which the other belongs.)
// If XY is a shared edge, define Reversed(XY) to be true if XY
// appears in opposite directions in both loops.
// Then this loop contains XY if and only if Reversed(XY) == the other loop is a hole.
// (Intuitively, this checks whether this loop contains a vanishingly small region
// extending from the boundary of the other toward the interior of the polygon to
// which the other belongs.)
//
// This function is used for testing containment and intersection of
// multi-loop polygons. Note that this method is not symmetric, since the
@@ -984,21 +990,23 @@ func (l *Loop) ContainsNested(other *Loop) bool {
// surface integral" means:
//
// (1) f(A,B,C) must be the integral of f if ABC is counterclockwise,
// and the integral of -f if ABC is clockwise.
//
// and the integral of -f if ABC is clockwise.
//
// (2) The result of this function is *either* the integral of f over the
// loop interior, or the integral of (-f) over the loop exterior.
//
// loop interior, or the integral of (-f) over the loop exterior.
//
// Note that there are at least two common situations where it easy to work
// around property (2) above:
//
// - If the integral of f over the entire sphere is zero, then it doesn't
// matter which case is returned because they are always equal.
// - If the integral of f over the entire sphere is zero, then it doesn't
// matter which case is returned because they are always equal.
//
// - If f is non-negative, then it is easy to detect when the integral over
// the loop exterior has been returned, and the integral over the loop
// interior can be obtained by adding the integral of f over the entire
// unit sphere (a constant) to the result.
// - If f is non-negative, then it is easy to detect when the integral over
// the loop exterior has been returned, and the integral over the loop
// interior can be obtained by adding the integral of f over the entire
// unit sphere (a constant) to the result.
//
// Any changes to this method may need corresponding changes to surfaceIntegralPoint as well.
func (l *Loop) surfaceIntegralFloat64(f func(a, b, c Point) float64) float64 {
@@ -1585,7 +1593,7 @@ func (l *loopCrosser) cellCrossesAnySubcell(aClipped *clippedShape, bID CellID)
// correct index cells more efficiently.
bRoot := PaddedCellFromCellID(bID, 0)
for _, aj := range aClipped.edges {
// Use an CrossingEdgeQuery starting at bRoot to find the index cells
// Use a CrossingEdgeQuery starting at bRoot to find the index cells
// of B that might contain crossing edges.
l.bCells = l.bQuery.getCells(l.a.Vertex(aj), l.a.Vertex(aj+1), bRoot)
if len(l.bCells) == 0 {
@@ -1616,11 +1624,11 @@ func (l *loopCrosser) hasCrossing(ai, bi *rangeIterator) bool {
l.bCells = nil
for {
if n := bi.it.IndexCell().shapes[0].numEdges(); n > 0 {
if n := bi.clipped().numEdges(); n > 0 {
totalEdges += n
if totalEdges >= edgeQueryMinEdges {
// There are too many edges to test them directly, so use CrossingEdgeQuery.
if l.cellCrossesAnySubcell(ai.it.IndexCell().shapes[0], ai.cellID()) {
if l.cellCrossesAnySubcell(ai.clipped(), ai.cellID()) {
return true
}
bi.seekBeyond(ai)
@@ -1636,7 +1644,7 @@ func (l *loopCrosser) hasCrossing(ai, bi *rangeIterator) bool {
// Test all the edge crossings directly.
for _, c := range l.bCells {
if l.cellCrossesCell(ai.it.IndexCell().shapes[0], c.shapes[0]) {
if l.cellCrossesCell(ai.clipped(), c.shapes[0]) {
return true
}
}
@@ -1644,12 +1652,12 @@ func (l *loopCrosser) hasCrossing(ai, bi *rangeIterator) bool {
return false
}
// containsCenterMatches reports if the clippedShapes containsCenter boolean corresponds
// to the crossing target type given. (This is to work around C++ allowing false == 0,
// true == 1 type implicit conversions and comparisons)
func containsCenterMatches(a *clippedShape, target crossingTarget) bool {
return (!a.containsCenter && target == crossingTargetDontCross) ||
(a.containsCenter && target == crossingTargetCross)
// containsCenterMatches reports if the clippedShapes containsCenter boolean
// corresponds to the crossing target type given. (This is to work around C++
// allowing false == 0, true == 1 type implicit conversions and comparisons)
func containsCenterMatches(containsCenter bool, target crossingTarget) bool {
return (!containsCenter && target == crossingTargetDontCross) ||
(containsCenter && target == crossingTargetCross)
}
// hasCrossingRelation reports whether given two iterators positioned such that
@@ -1658,7 +1666,8 @@ func containsCenterMatches(a *clippedShape, target crossingTarget) bool {
// is an edge crossing, a wedge crossing, or a point P that matches both relations
// crossing targets. This function advances both iterators past ai.cellID.
func (l *loopCrosser) hasCrossingRelation(ai, bi *rangeIterator) bool {
aClipped := ai.it.IndexCell().shapes[0]
// ABSL_DCHECK(ai->id().contains(bi->id()));
aClipped := ai.clipped()
if aClipped.numEdges() != 0 {
// The current cell of A has at least one edge, so check for crossings.
if l.hasCrossing(ai, bi) {
@@ -1668,8 +1677,9 @@ func (l *loopCrosser) hasCrossingRelation(ai, bi *rangeIterator) bool {
return false
}
if !containsCenterMatches(aClipped, l.aCrossingTarget) {
// The crossing target for A is not satisfied, so we skip over these cells of B.
if !containsCenterMatches(ai.containsCenter(), l.aCrossingTarget) {
// The crossing target for A is not satisfied, so we skip over
// these cells of B.
bi.seekBeyond(ai)
ai.next()
return false
@@ -1679,8 +1689,7 @@ func (l *loopCrosser) hasCrossingRelation(ai, bi *rangeIterator) bool {
// worth iterating through the cells of B to see whether any cell
// centers also satisfy the crossing target for B.
for bi.cellID() <= ai.rangeMax {
bClipped := bi.it.IndexCell().shapes[0]
if containsCenterMatches(bClipped, l.bCrossingTarget) {
if containsCenterMatches(bi.containsCenter(), l.bCrossingTarget) {
return true
}
bi.next()
@@ -1733,16 +1742,16 @@ func hasCrossingRelation(a, b *Loop, relation loopRelation) bool {
return true
}
} else {
// The A and B cells are the same. Since the two cells
// have the same center point P, check whether P satisfies
// the crossing targets.
aClipped := ai.it.IndexCell().shapes[0]
bClipped := bi.it.IndexCell().shapes[0]
if containsCenterMatches(aClipped, ab.aCrossingTarget) &&
containsCenterMatches(bClipped, ab.bCrossingTarget) {
// The A and B cells are the same. Since the two
// cells have the same center point P, check
// whether P satisfies the crossing targets.
if containsCenterMatches(ai.containsCenter(), ab.aCrossingTarget) &&
containsCenterMatches(bi.containsCenter(), ab.bCrossingTarget) {
return true
}
// Otherwise test all the edge crossings directly.
aClipped := ai.clipped()
bClipped := bi.clipped()
if aClipped.numEdges() > 0 && bClipped.numEdges() > 0 && ab.cellCrossesCell(aClipped, bClipped) {
return true
}
@@ -1788,10 +1797,12 @@ func (i *intersectsRelation) wedgesCross(a0, ab1, a2, b0, b2 Point) bool {
// so we return crossingTargetDontCare for both crossing targets.
//
// Aside: A possible early exit condition could be based on the following.
// If A contains a point of both B and ~B, then A intersects Boundary(B).
// If ~A contains a point of both B and ~B, then ~A intersects Boundary(B).
// So if the intersections of {A, ~A} with {B, ~B} are all non-empty,
// the return value is 0, i.e., Boundary(A) intersects Boundary(B).
//
// If A contains a point of both B and ~B, then A intersects Boundary(B).
// If ~A contains a point of both B and ~B, then ~A intersects Boundary(B).
// So if the intersections of {A, ~A} with {B, ~B} are all non-empty,
// the return value is 0, i.e., Boundary(A) intersects Boundary(B).
//
// Unfortunately it isn't worth detecting this situation because by the
// time we have seen a point in all four intersection regions, we are also
// guaranteed to have seen at least one pair of crossing edges.

View File

@@ -17,7 +17,7 @@ package s2
import (
"fmt"
"github.com/golang/geo/r3"
"github.com/blevesearch/geo/r3"
)
// matrix3x3 represents a traditional 3x3 matrix of floating point values.
@@ -27,12 +27,12 @@ type matrix3x3 [3][3]float64
// col returns the given column as a Point.
func (m *matrix3x3) col(col int) Point {
return Point{r3.Vector{m[0][col], m[1][col], m[2][col]}}
return Point{r3.Vector{X: m[0][col], Y: m[1][col], Z: m[2][col]}}
}
// row returns the given row as a Point.
func (m *matrix3x3) row(row int) Point {
return Point{r3.Vector{m[row][0], m[row][1], m[row][2]}}
return Point{r3.Vector{X: m[row][0], Y: m[row][1], Z: m[row][2]}}
}
// setCol sets the specified column to the value in the given Point.
@@ -66,9 +66,9 @@ func (m *matrix3x3) scale(f float64) *matrix3x3 {
// resulting 1x3 matrix into a Point.
func (m *matrix3x3) mul(p Point) Point {
return Point{r3.Vector{
m[0][0]*p.X + m[0][1]*p.Y + m[0][2]*p.Z,
m[1][0]*p.X + m[1][1]*p.Y + m[1][2]*p.Z,
m[2][0]*p.X + m[2][1]*p.Y + m[2][2]*p.Z,
X: float64(m[0][0]*p.X) + float64(m[0][1]*p.Y) + float64(m[0][2]*p.Z),
Y: float64(m[1][0]*p.X) + float64(m[1][1]*p.Y) + float64(m[1][2]*p.Z),
Z: float64(m[2][0]*p.X) + float64(m[2][1]*p.Y) + float64(m[2][2]*p.Z),
}}
}
@@ -77,8 +77,9 @@ func (m *matrix3x3) det() float64 {
// | a b c |
// det | d e f | = aei + bfg + cdh - ceg - bdi - afh
// | g h i |
return m[0][0]*m[1][1]*m[2][2] + m[0][1]*m[1][2]*m[2][0] + m[0][2]*m[1][0]*m[2][1] -
m[0][2]*m[1][1]*m[2][0] - m[0][1]*m[1][0]*m[2][2] - m[0][0]*m[1][2]*m[2][1]
return float64(m[0][0]*m[1][1]*m[2][2]) + float64(m[0][1]*m[1][2]*m[2][0]) +
float64(m[0][2]*m[1][0]*m[2][1]) - float64(m[0][2]*m[1][1]*m[2][0]) -
float64(m[0][1]*m[1][0]*m[2][2]) - float64(m[0][0]*m[1][2]*m[2][1])
}
// transpose reflects the matrix along its diagonal and returns the result.
@@ -107,7 +108,7 @@ func getFrame(p Point) matrix3x3 {
// while p itself is an orthonormal frame for the normal space at p.
m := matrix3x3{}
m.setCol(2, p)
m.setCol(1, Point{p.Ortho()})
m.setCol(1, Ortho(p))
m.setCol(0, Point{m.col(1).Cross(p.Vector)})
return m
}

View File

@@ -17,7 +17,7 @@ package s2
import (
"math"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/s1"
)
// maxDistance implements distance as the supplementary distance (Pi - x) to find
@@ -211,10 +211,8 @@ func NewMaxDistanceToShapeIndexTarget(index *ShapeIndex) *MaxDistanceToShapeInde
// capBound returns a Cap that bounds the antipode of the target. This
// is the set of points whose maxDistance to the target is maxDistance.zero()
func (m *MaxDistanceToShapeIndexTarget) capBound() Cap {
// TODO(roberts): Depends on ShapeIndexRegion
// c := makeShapeIndexRegion(m.index).CapBound()
// return CapFromCenterRadius(Point{c.Center.Mul(-1)}, c.Radius())
panic("not implemented yet")
c := m.index.Region().CapBound()
return CapFromCenterAngle(Point{c.Center().Mul(-1)}, c.Radius())
}
func (m *MaxDistanceToShapeIndexTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
@@ -302,5 +300,4 @@ func (m *MaxDistanceToShapeIndexTarget) setUseBruteForce(b bool) { m.query.opts.
// TODO(roberts): Remaining methods
//
// func (m *MaxDistanceToShapeIndexTarget) capBound() Cap {
// CellUnionTarget

View File

@@ -34,6 +34,9 @@ type Metric struct {
// Defined metrics.
// Of the projection methods defined in C++, Go only supports the quadratic projection.
// See
// https://github.com/google/s2geometry/blob/58de4ea1e2f8a294e0c072c602c22232fd1433ad/src/s2/s2coords.h#L238
// for more details.
// Each cell is bounded by four planes passing through its four edges and
// the center of the sphere. These metrics relate to the angle between each
@@ -108,7 +111,7 @@ func (m Metric) Value(level int) float64 {
}
// MinLevel returns the minimum level such that the metric is at most
// the given value, or maxLevel (30) if there is no such level.
// the given value, or MaxLevel (30) if there is no such level.
//
// For example, MinLevel(0.1) returns the minimum level such that all cell diagonal
// lengths are 0.1 or smaller. The returned value is always a valid level.
@@ -116,12 +119,12 @@ func (m Metric) Value(level int) float64 {
// In C++, this is called GetLevelForMaxValue.
func (m Metric) MinLevel(val float64) int {
if val < 0 {
return maxLevel
return MaxLevel
}
level := -(math.Ilogb(val/m.Deriv) >> uint(m.Dim-1))
if level > maxLevel {
level = maxLevel
if level > MaxLevel {
level = MaxLevel
}
if level < 0 {
level = 0
@@ -138,12 +141,12 @@ func (m Metric) MinLevel(val float64) int {
// In C++, this is called GetLevelForMinValue.
func (m Metric) MaxLevel(val float64) int {
if val <= 0 {
return maxLevel
return MaxLevel
}
level := math.Ilogb(m.Deriv/val) >> uint(m.Dim-1)
if level > maxLevel {
level = maxLevel
if level > MaxLevel {
level = MaxLevel
}
if level < 0 {
level = 0

View File

@@ -17,7 +17,7 @@ package s2
import (
"math"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/s1"
)
// minDistance implements distance interface to find closest distance types.
@@ -268,10 +268,8 @@ func NewMinDistanceToShapeIndexTarget(index *ShapeIndex) *MinDistanceToShapeInde
}
func (m *MinDistanceToShapeIndexTarget) capBound() Cap {
// TODO(roberts): Depends on ShapeIndexRegion existing.
// c := makeS2ShapeIndexRegion(m.index).CapBound()
// return CapFromCenterRadius(Point{c.Center.Mul(-1)}, c.Radius())
panic("not implemented yet")
c := m.index.Region().CapBound()
return CapFromCenterAngle(Point{c.Center().Mul(-1)}, c.Radius())
}
func (m *MinDistanceToShapeIndexTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
@@ -358,5 +356,4 @@ func (m *MinDistanceToShapeIndexTarget) setUseBruteForce(b bool) { m.query.opts.
// TODO(roberts): Remaining methods
//
// func (m *MinDistanceToShapeIndexTarget) capBound() Cap {
// CellUnionTarget

View File

@@ -15,7 +15,8 @@
package s2
// nthDerivativeCoder provides Nth Derivative Coding.
// (In signal processing disciplines, this is known as N-th Delta Coding.)
//
// (In signal processing disciplines, this is known as N-th Delta Coding.)
//
// Good for varint coding integer sequences with polynomial trends.
//
@@ -23,28 +24,32 @@ package s2
// derivative. Overflow in integer addition and subtraction makes this a
// lossless transform.
//
// constant linear quadratic
// trend trend trend
// / \ / \ / \_
// constant linear quadratic
// trend trend trend
// / \ / \ / \_
//
// input |0 0 0 0 1 2 3 4 9 16 25 36
// 0th derivative(identity) |0 0 0 0 1 2 3 4 9 16 25 36
// 1st derivative(delta coding) | 0 0 0 1 1 1 1 5 7 9 11
// 2nd derivative(linear prediction) | 0 0 1 0 0 0 4 2 2 2
// -------------------------------------
// 0 1 2 3 4 5 6 7 8 9 10 11
// n in sequence
//
// -------------------------------------
// 0 1 2 3 4 5 6 7 8 9 10 11
// n in sequence
//
// Higher-order codings can break even or be detrimental on other sequences.
//
// random oscillating
// / \ / \_
// random oscillating
// / \ / \_
//
// input |5 9 6 1 8 8 2 -2 4 -4 6 -6
// 0th derivative(identity) |5 9 6 1 8 8 2 -2 4 -4 6 -6
// 1st derivative(delta coding) | 4 -3 -5 7 0 -6 -4 6 -8 10 -12
// 2nd derivative(linear prediction) | -7 -2 12 -7 -6 2 10 -14 18 -22
// ---------------------------------------
// 0 1 2 3 4 5 6 7 8 9 10 11
// n in sequence
//
// ---------------------------------------
// 0 1 2 3 4 5 6 7 8 9 10 11
// n in sequence
//
// Note that the nth derivative isn't available until sequence item n. Earlier
// values are coded at lower order. For the above table, read 5 4 -7 -2 12 ...

View File

@@ -15,8 +15,8 @@
package s2
import (
"github.com/golang/geo/r1"
"github.com/golang/geo/r2"
"github.com/blevesearch/geo/r1"
"github.com/blevesearch/geo/r2"
)
// PaddedCell represents a Cell whose (u,v)-range has been expanded on
@@ -44,8 +44,8 @@ func PaddedCellFromCellID(id CellID, padding float64) *PaddedCell {
// Fast path for constructing a top-level face (the most common case).
if id.isFace() {
limit := padding + 1
p.bound = r2.Rect{r1.Interval{-limit, limit}, r1.Interval{-limit, limit}}
p.middle = r2.Rect{r1.Interval{-padding, padding}, r1.Interval{-padding, padding}}
p.bound = r2.Rect{X: r1.Interval{Lo: -limit, Hi: limit}, Y: r1.Interval{Lo: -limit, Hi: limit}}
p.middle = r2.Rect{X: r1.Interval{Lo: -padding, Hi: padding}, Y: r1.Interval{Lo: -padding, Hi: padding}}
p.orientation = id.Face() & 1
return p
}
@@ -131,8 +131,8 @@ func (p *PaddedCell) Middle() r2.Rect {
u := stToUV(siTiToST(uint32(2*p.iLo + ijSize)))
v := stToUV(siTiToST(uint32(2*p.jLo + ijSize)))
p.middle = r2.Rect{
r1.Interval{u - p.padding, u + p.padding},
r1.Interval{v - p.padding, v + p.padding},
X: r1.Interval{Lo: u - p.padding, Hi: u + p.padding},
Y: r1.Interval{Lo: v - p.padding, Hi: v + p.padding},
}
}
return p.middle
@@ -240,10 +240,10 @@ func (p *PaddedCell) ShrinkToFit(rect r2.Rect) CellID {
// Compute the highest bit position where the two i- or j-endpoints differ,
// and then choose the cell level that includes both of these endpoints. So
// if both pairs of endpoints are equal we choose maxLevel; if they differ
// only at bit 0, we choose (maxLevel - 1), and so on.
// if both pairs of endpoints are equal we choose MaxLevel; if they differ
// only at bit 0, we choose (MaxLevel - 1), and so on.
levelMSB := uint64(((iXor | jXor) << 1) + 1)
level := maxLevel - findMSBSetNonZero64(levelMSB)
level := MaxLevel - findMSBSetNonZero64(levelMSB)
if level <= p.level {
return p.id
}

View File

@@ -20,8 +20,8 @@ import (
"math"
"sort"
"github.com/golang/geo/r3"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/r3"
"github.com/blevesearch/geo/s1"
)
// Point represents a point on the unit sphere as a normalized 3D vector.
@@ -53,7 +53,7 @@ func PointFromCoords(x, y, z float64) Point {
if x == 0 && y == 0 && z == 0 {
return OriginPoint()
}
return Point{r3.Vector{x, y, z}.Normalize()}
return Point{r3.Vector{X: x, Y: y, Z: z}.Normalize()}
}
// OriginPoint returns a unique "origin" on the sphere for operations that need a fixed
@@ -65,7 +65,7 @@ func PointFromCoords(x, y, z float64) Point {
// north and south poles). It should also not be on the boundary of any
// low-level S2Cell for the same reason.
func OriginPoint() Point {
return Point{r3.Vector{-0.0099994664350250197, 0.0025924542609324121, 0.99994664350250195}}
return Point{r3.Vector{X: -0.0099994664350250197, Y: 0.0025924542609324121, Z: 0.99994664350250195}}
}
// PointCross returns a Point that is orthogonal to both p and op. This is similar to
@@ -75,10 +75,10 @@ func OriginPoint() Point {
//
// It satisfies the following properties (f == PointCross):
//
// (1) f(p, op) != 0 for all p, op
// (2) f(op,p) == -f(p,op) unless p == op or p == -op
// (3) f(-p,op) == -f(p,op) unless p == op or p == -op
// (4) f(p,-op) == -f(p,op) unless p == op or p == -op
// (1) f(p, op) != 0 for all p, op
// (2) f(op,p) == -f(p,op) unless p == op or p == -op
// (3) f(-p,op) == -f(p,op) unless p == op or p == -op
// (4) f(p,-op) == -f(p,op) unless p == op or p == -op
func (p Point) PointCross(op Point) Point {
// NOTE(dnadasi): In the C++ API the equivalent method here was known as "RobustCrossProd",
// but PointCross more accurately describes how this method is used.
@@ -102,11 +102,11 @@ func (p Point) PointCross(op Point) Point {
// contained in the range of angles (inclusive) that starts at A and extends
// CCW to C. Properties:
//
// (1) If OrderedCCW(a,b,c,o) && OrderedCCW(b,a,c,o), then a == b
// (2) If OrderedCCW(a,b,c,o) && OrderedCCW(a,c,b,o), then b == c
// (3) If OrderedCCW(a,b,c,o) && OrderedCCW(c,b,a,o), then a == b == c
// (4) If a == b or b == c, then OrderedCCW(a,b,c,o) is true
// (5) Otherwise if a == c, then OrderedCCW(a,b,c,o) is false
// (1) If OrderedCCW(a,b,c,o) && OrderedCCW(b,a,c,o), then a == b
// (2) If OrderedCCW(a,b,c,o) && OrderedCCW(a,c,b,o), then b == c
// (3) If OrderedCCW(a,b,c,o) && OrderedCCW(c,b,a,o), then a == b == c
// (4) If a == b or b == c, then OrderedCCW(a,b,c,o) is true
// (5) Otherwise if a == c, then OrderedCCW(a,b,c,o) is false
func OrderedCCW(a, b, c, o Point) bool {
sum := 0
if RobustSign(b, o, a) != Clockwise {
@@ -123,17 +123,17 @@ func OrderedCCW(a, b, c, o Point) bool {
// Distance returns the angle between two points.
func (p Point) Distance(b Point) s1.Angle {
return p.Vector.Angle(b.Vector)
return p.Angle(b.Vector)
}
// ApproxEqual reports whether the two points are similar enough to be equal.
func (p Point) ApproxEqual(other Point) bool {
return p.approxEqual(other, s1.Angle(epsilon))
return p.approxEqual(other, s1.Angle(1e-15))
}
// approxEqual reports whether the two points are within the given epsilon.
func (p Point) approxEqual(other Point, eps s1.Angle) bool {
return p.Vector.Angle(other.Vector) <= eps
return p.Angle(other.Vector) <= eps
}
// ChordAngleBetweenPoints constructs a ChordAngle corresponding to the distance
@@ -164,7 +164,7 @@ func regularPointsForFrame(frame matrix3x3, radius s1.Angle, numVertices int) []
for i := 0; i < numVertices; i++ {
angle := float64(i) * radianStep
p := Point{r3.Vector{r * math.Cos(angle), r * math.Sin(angle), z}}
p := Point{r3.Vector{X: r * math.Cos(angle), Y: r * math.Sin(angle), Z: z}}
vertices = append(vertices, Point{fromFrame(frame, p).Normalize()})
}
@@ -240,6 +240,34 @@ func (p *Point) decode(d *decoder) {
p.Z = d.readFloat64()
}
// Ortho returns a unit-length vector that is orthogonal to "a". Satisfies
// Ortho(-a) = -Ortho(a) for all a.
//
// Note that Vector3 also defines an "Ortho" method, but this one is
// preferred for use in S2 code because it explicitly tries to avoid result
// coordinates that are zero. (This is a performance optimization that
// reduces the amount of time spent in functions that handle degeneracies.)
func Ortho(a Point) Point {
temp := r3.Vector{X: 0.012, Y: 0.0053, Z: 0.00457}
switch a.LargestComponent() {
case r3.XAxis:
temp.Z = 1
case r3.YAxis:
temp.X = 1
case r3.ZAxis:
temp.Y = 1
}
return Point{a.Cross(temp).Normalize()}
}
// referenceDir returns a unit-length vector to use as the reference direction for
// deciding whether a polygon with semi-open boundaries contains the given vertex "a"
// (see ContainsVertexQuery). The result is unit length and is guaranteed
// to be different from the given point "a".
func (p Point) referenceDir() Point {
return Ortho(p)
}
// Rotate the given point about the given axis by the given angle. p and
// axis must be unit length; angle has no restrictions (e.g., it can be
// positive, negative, greater than 360 degrees, etc).
@@ -256,3 +284,81 @@ func Rotate(p, axis Point, angle s1.Angle) Point {
// to ensure that numerical errors don't accumulate.
return Point{dx.Mul(math.Cos(angle.Radians())).Add(dy.Mul(math.Sin(angle.Radians()))).Add(center).Normalize()}
}
// stableAngle reports the angle between two vectors with better precision when
// the two are nearly parallel.
//
// The .Angle() member function uses atan(|AxB|, A.B) to compute the angle
// between A and B, which can lose about half its precision when A and B are
// nearly (anti-)parallel.
//
// Kahan provides a much more stable form:
//
// 2*atan2(| A*|B| - |A|*B |, | A*|B| + |A|*B |)
//
// Since Points are unit magnitude by construction we can simplify further:
//
// 2*atan2(|A-B|,|A+B|)
//
// This likely can't replace Vectors Angle since it requires four magnitude
// calculations, each of which takes 5 operations + a square root, plus 6
// operations to find the sum and difference of the vectors, for a total of 26 +
// 4 square roots. Vectors Angle requires 19 + 1 square root.
//
// Since we always have unit vectors, we can elide two of those magnitude
// calculations for a total of 16 + 2 square roots which is competitive with
// Vectors Angle performance.
//
// Reference: Kahan, W. (2006, Jan 11). "How Futile are Mindless Assessments of
// Roundoff in Floating-Point Computation?" (p. 47).
// https://people.eecs.berkeley.edu/~wkahan/Mindless.pdf
//
// The 2 points must be normalized.
func (p Point) stableAngle(o Point) s1.Angle {
return s1.Angle(2 * math.Atan2(p.Sub(o.Vector).Norm(), p.Add(o.Vector).Norm()))
}
// IsNormalizable reports if the given Point's magnitude is large enough such that the
// angle to another vector of the same magnitude can be measured using Angle()
// without loss of precision due to floating-point underflow. (This requirement
// is also sufficient to ensure that Normalize() can be called without risk of
// precision loss.)
func (p Point) IsNormalizable() bool {
// Let ab = RobustCrossProd(a, b) and cd = RobustCrossProd(cd). In order for
// ab.Angle(cd) to not lose precision, the squared magnitudes of ab and cd
// must each be at least 2**-484. This ensures that the sum of the squared
// magnitudes of ab.CrossProd(cd) and ab.DotProd(cd) is at least 2**-968,
// which ensures that any denormalized terms in these two calculations do
// not affect the accuracy of the result (since all denormalized numbers are
// smaller than 2**-1022, which is less than dblError * 2**-968).
//
// The fastest way to ensure this is to test whether the largest component of
// the result has a magnitude of at least 2**-242.
return maxFloat64(math.Abs(p.X), math.Abs(p.Y), math.Abs(p.Z)) >= math.Ldexp(1, -242)
}
// EnsureNormalizable scales a vector as necessary to ensure that the result can
// be normalized without loss of precision due to floating-point underflow.
//
// This requires p != (0, 0, 0)
func (p Point) EnsureNormalizable() Point {
// TODO(rsned): Zero vector isn't normalizable, and we don't have DCHECK in Go.
// What is the appropriate return value in this case? Is it {NaN, NaN, NaN}?
if p == (Point{r3.Vector{X: 0, Y: 0, Z: 0}}) {
return p
}
if !p.IsNormalizable() {
// We can't just scale by a fixed factor because the smallest representable
// double is 2**-1074, so if we multiplied by 2**(1074 - 242) then the
// result might be so large that we couldn't square it without overflow.
//
// Note that we must scale by a power of two to avoid rounding errors.
// The code below scales "p" such that the largest component is
// in the range [1, 2).
pMax := maxFloat64(math.Abs(p.X), math.Abs(p.Y), math.Abs(p.Z))
// This avoids signed overflow for any value of Ilogb().
return Point{p.Mul(math.Ldexp(2, -1-math.Ilogb(pMax)))}
}
return p
}

View File

@@ -17,7 +17,7 @@ package s2
import (
"math"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/s1"
)
// PointArea returns the area of triangle ABC. This method combines two different
@@ -60,25 +60,26 @@ func PointArea(a, b, c Point) float64 {
// k3 is about 0.1. Since the best case error using Girard's formula
// is about 1e-15, this means that we shouldn't even consider it unless
// s >= 3e-4 or so.
sa := float64(b.Angle(c.Vector))
sb := float64(c.Angle(a.Vector))
sc := float64(a.Angle(b.Vector))
sa := b.stableAngle(c)
sb := c.stableAngle(a)
sc := a.stableAngle(b)
s := 0.5 * (sa + sb + sc)
if s >= 3e-4 {
// Consider whether Girard's formula might be more accurate.
dmin := s - math.Max(sa, math.Max(sb, sc))
dmin := s - maxAngle(sa, sb, sc)
if dmin < 1e-2*s*s*s*s*s {
// This triangle is skinny enough to use Girard's formula.
area := GirardArea(a, b, c)
if dmin < s*0.1*area {
if dmin < s*0.1*s1.Angle(area+5e-15) {
return area
}
}
}
// Use l'Huilier's formula.
return 4 * math.Atan(math.Sqrt(math.Max(0.0, math.Tan(0.5*s)*math.Tan(0.5*(s-sa))*
math.Tan(0.5*(s-sb))*math.Tan(0.5*(s-sc)))))
return 4 * math.Atan(math.Sqrt(math.Max(0.0,
math.Tan(float64(0.5*s))*math.Tan(0.5*float64(s-sa))*
math.Tan(0.5*float64(s-sb))*math.Tan(0.5*float64(s-sc)))))
}
// GirardArea returns the area of the triangle computed using Girard's formula.

View File

@@ -18,7 +18,7 @@ import (
"errors"
"fmt"
"github.com/golang/geo/r3"
"github.com/blevesearch/geo/r3"
)
// maxEncodedVertices is the maximum number of vertices, in a row, to be encoded or decoded.
@@ -108,17 +108,27 @@ func encodeFirstPointFixedLength(e *encoder, pi, qi uint32, level int, piCoder,
// compresses it into a stream using the following method:
// - decompose the points into (face, si, ti) tuples.
// - run-length encode the faces, combining face number and count into a
// varint32. See the faceRun struct.
//
// varint32. See the faceRun struct.
//
// - right shift the (si, ti) to remove the part that's constant for all cells
// of level-k. The result is called the (pi, qi) space.
//
// of level-k. The result is called the (pi, qi) space.
//
// - 2nd derivative encode the pi and qi sequences (linear prediction)
// - zig-zag encode all derivative values but the first, which cannot be
// negative
//
// negative
//
// - interleave the zig-zag encoded values
// - encode the first interleaved value in a fixed length encoding
// (varint would make this value larger)
//
// (varint would make this value larger)
//
// - encode the remaining interleaved values as varint64s, as the
// derivative encoding should make the values small.
//
// derivative encoding should make the values small.
//
// In addition, provides a lossless method to compress a sequence of points even
// if some points are not the center of level-k cells. These points are stored
// exactly, using 3 double precision values, after the above encoded string,
@@ -145,8 +155,8 @@ type faceRun struct {
func decodeFaceRun(d *decoder) faceRun {
faceAndCount := d.readUvarint()
ret := faceRun{
face: int(faceAndCount % numFaces),
count: int(faceAndCount / numFaces),
face: int(faceAndCount % NumFaces),
count: int(faceAndCount / NumFaces),
}
if ret.count <= 0 && d.err == nil {
d.err = errors.New("non-positive count for face run")
@@ -167,12 +177,12 @@ func decodeFaces(numVertices int, d *decoder) []faceRun {
return frs
}
// encodeFaceRun encodes each faceRun as a varint64 with value numFaces * count + face.
// encodeFaceRun encodes each faceRun as a varint64 with value NumFaces * count + face.
func encodeFaceRun(e *encoder, fr faceRun) {
// It isn't necessary to encode the number of faces left for the last run,
// but since this would only help if there were more than 21 faces, it will
// be a small overall savings, much smaller than the bound encoding.
coded := numFaces*uint64(fr.count) + uint64(fr.face)
coded := NumFaces*uint64(fr.count) + uint64(fr.face)
e.writeUvarint(coded)
}
@@ -290,7 +300,7 @@ func stToPiQi(s float64, level uint) uint32 {
return uint32(s * float64(int(1)<<level))
}
// siTiToPiQi returns the value transformed into the PiQi coordinate spade.
// siTitoPiQi returns the value transformed into the PiQi coordinate spade.
// encodeFirstPointFixedLength encodes the return value using level bits,
// so we clamp si to the range [0, 2**level - 1] before trying to encode
// it. This is okay because if si == maxSiTi, then it is not a cell center
@@ -302,7 +312,7 @@ func siTitoPiQi(siTi uint32, level int) uint32 {
s = max
}
return uint32(s >> (maxLevel + 1 - uint(level)))
return uint32(s >> (MaxLevel + 1 - uint(level)))
}
// piQiToST returns the value transformed to ST space.

View File

@@ -38,16 +38,16 @@ import (
//
// Polygons have the following restrictions:
//
// - Loops may not cross, i.e. the boundary of a loop may not intersect
// both the interior and exterior of any other loop.
// - Loops may not cross, i.e. the boundary of a loop may not intersect
// both the interior and exterior of any other loop.
//
// - Loops may not share edges, i.e. if a loop contains an edge AB, then
// no other loop may contain AB or BA.
// - Loops may not share edges, i.e. if a loop contains an edge AB, then
// no other loop may contain AB or BA.
//
// - Loops may share vertices, however no vertex may appear twice in a
// single loop (see Loop).
// - Loops may share vertices, however no vertex may appear twice in a
// single loop (see Loop).
//
// - No loop may be empty. The full loop may appear only in the full polygon.
// - No loop may be empty. The full loop may appear only in the full polygon.
type Polygon struct {
loops []*Loop
@@ -191,7 +191,7 @@ func (p *Polygon) Invert() {
// Inverting any one loop will invert the polygon. The best loop to invert
// is the one whose area is largest, since this yields the smallest area
// after inversion. The loop with the largest area is always at depth 0.
// The descendents of this loop all have their depth reduced by 1, while the
// The descendants of this loop all have their depth reduced by 1, while the
// former siblings of this loop all have their depth increased by 1.
// The empty and full polygons are handled specially.
@@ -450,7 +450,7 @@ func (p *Polygon) Validate() error {
for i, l := range p.loops {
// Check for loop errors that don't require building a ShapeIndex.
if err := l.findValidationErrorNoIndex(); err != nil {
return fmt.Errorf("loop %d: %v", i, err)
return fmt.Errorf("loop %d: %w", i, err)
}
// Check that no loop is empty, and that the full loop only appears in the
// full polygon.
@@ -1017,6 +1017,27 @@ func (p *Polygon) Area() float64 {
return area
}
// Centroid returns the true centroid of the polygon multiplied by the area of
// the polygon. The result is not unit length, so you may want to normalize it.
// Also note that in general, the centroid may not be contained by the polygon.
//
// We prescale by the polygon area for two reasons: (1) it is cheaper to
// compute this way, and (2) it makes it easier to compute the centroid of
// more complicated shapes (by splitting them into disjoint regions and
// adding their centroids).
func (p *Polygon) Centroid() Point {
u := Point{}.Vector
for _, loop := range p.loops {
v := loop.Centroid().Vector
if loop.Sign() < 0 {
u = u.Sub(v)
} else {
u = u.Add(v)
}
}
return Point{u}
}
// Encode encodes the Polygon
func (p *Polygon) Encode(w io.Writer) error {
e := &encoder{w: w}
@@ -1027,7 +1048,7 @@ func (p *Polygon) Encode(w io.Writer) error {
// encode only supports lossless encoding and not compressed format.
func (p *Polygon) encode(e *encoder) {
if p.numVertices == 0 {
p.encodeCompressed(e, maxLevel, nil)
p.encodeCompressed(e, MaxLevel, nil)
return
}
@@ -1040,7 +1061,7 @@ func (p *Polygon) encode(e *encoder) {
// Computes a histogram of the cell levels at which the vertices are snapped.
// (histogram[0] is the number of unsnapped vertices, histogram[i] the number
// of vertices snapped at level i-1).
histogram := make([]int, maxLevel+2)
histogram := make([]int, MaxLevel+2)
for _, v := range vs {
histogram[v.level+1]++
}
@@ -1170,7 +1191,7 @@ func (p *Polygon) decode(d *decoder) {
func (p *Polygon) decodeCompressed(d *decoder) {
snapLevel := int(d.readUint8())
if snapLevel > maxLevel {
if snapLevel > MaxLevel {
d.err = fmt.Errorf("snaplevel too big: %d", snapLevel)
return
}

View File

@@ -19,7 +19,7 @@ import (
"io"
"math"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/s1"
)
// Polyline represents a sequence of zero or more vertices connected by
@@ -90,7 +90,7 @@ func (p *Polyline) Equal(b *Polyline) bool {
// ApproxEqual reports whether two polylines have the same number of vertices,
// and corresponding vertex pairs are separated by no more the standard margin.
func (p *Polyline) ApproxEqual(o *Polyline) bool {
return p.approxEqual(o, s1.Angle(epsilon))
return p.approxEqual(o, s1.Angle(1e-15))
}
// approxEqual reports whether two polylines are equal within the given margin.
@@ -318,19 +318,19 @@ func findEndVertex(p Polyline, tolerance s1.Angle, index int) int {
//
// Some useful properties of the algorithm:
//
// - It runs in linear time.
// - It runs in linear time.
//
// - The output always represents a valid polyline. In particular, adjacent
// output vertices are never identical or antipodal.
// - The output always represents a valid polyline. In particular, adjacent
// output vertices are never identical or antipodal.
//
// - The method is not optimal, but it tends to produce 2-3% fewer
// vertices than the Douglas-Peucker algorithm with the same tolerance.
// - The method is not optimal, but it tends to produce 2-3% fewer
// vertices than the Douglas-Peucker algorithm with the same tolerance.
//
// - The output is parametrically equivalent to the original polyline to
// within the given tolerance. For example, if a polyline backtracks on
// itself and then proceeds onwards, the backtracking will be preserved
// (to within the given tolerance). This is different than the
// Douglas-Peucker algorithm which only guarantees geometric equivalence.
// - The output is parametrically equivalent to the original polyline to
// within the given tolerance. For example, if a polyline backtracks on
// itself and then proceeds onwards, the backtracking will be preserved
// (to within the given tolerance). This is different than the
// Douglas-Peucker algorithm which only guarantees geometric equivalence.
func (p *Polyline) SubsampleVertices(tolerance s1.Angle) []int {
var result []int
@@ -559,7 +559,7 @@ func (p *Polyline) Interpolate(fraction float64) (Point, int) {
// Uninterpolate is the inverse operation of Interpolate. Given a point on the
// polyline, it returns the ratio of the distance to the point from the
// beginning of the polyline over the length of the polyline. The return
// value is always betwen 0 and 1 inclusive.
// value is always between 0 and 1 inclusive.
//
// The polyline should not be empty. If it has fewer than 2 vertices, the
// return value is zero.

View File

@@ -0,0 +1,508 @@
// Copyright 2023 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package s2
import (
"bytes"
"fmt"
"math"
)
// This library provides code to compute vertex alignments between Polylines.
//
// A vertex "alignment" or "warp" between two polylines is a matching between
// pairs of their vertices. Users can imagine pairing each vertex from
// Polyline `a` with at least one other vertex in Polyline `b`. The "cost"
// of an arbitrary alignment is defined as the summed value of the squared
// chordal distance between each pair of points in the warp path. An "optimal
// alignment" for a pair of polylines is defined as the alignment with least
// cost. Note: optimal alignments are not necessarily unique. The standard way
// of computing an optimal alignment between two sequences is the use of the
// `Dynamic Timewarp` algorithm.
//
// We provide three methods for computing (via Dynamic Timewarp) the optimal
// alignment between two Polylines. These methods are performance-sensitive,
// and have been reasonably optimized for space- and time- usage. On modern
// hardware, it is possible to compute exact alignments between 4096x4096
// element polylines in ~70ms, and approximate alignments much more quickly.
//
// The results of an alignment operation are captured in a VertexAlignment
// object. In particular, a VertexAlignment keeps track of the total cost of
// alignment, as well as the warp path (a sequence of pairs of indices into each
// polyline whose vertices are linked together in the optimal alignment)
//
// For a worked example, consider the polylines
//
// a = [(1, 0), (5, 0), (6, 0), (9, 0)] and
// b = [(2, 0), (7, 0), (8, 0)].
//
// The "cost matrix" between these two polylines (using chordal
// distance, .Norm(), as our distance function) looks like this:
//
// (2, 0) (7, 0) (8, 0)
// (1, 0) 1 6 7
// (5, 0) 3 2 3
// (6, 0) 4 1 2
// (9, 0) 7 2 1
//
// The Dynamic Timewarp DP table for this cost matrix has cells defined by
//
// table[i][j] = cost(i,j) + min(table[i-1][j-1], table[i][j-1], table[i-1, j])
//
// (2, 0) (7, 0) (8, 0)
// (1, 0) 1 7 14
// (5, 0) 4 3 7
// (6, 0) 8 4 6
// (9, 0) 15 6 5
//
// Starting at the bottom right corner of the DP table, we can work our way
// backwards to the upper left corner to recover the reverse of the warp path:
// (3, 2) -> (2, 1) -> (1, 1) -> (0, 0). The VertexAlignment produced containing
// this has alignment_cost = 7 and warp_path = {(0, 0), (1, 1), (2, 1), (3, 2)}.
//
// We also provide methods for performing alignment of multiple sequences. These
// methods return a single, representative polyline from a non-empty collection
// of polylines, for various definitions of "representative."
//
// GetMedoidPolyline() returns a new polyline (point-for-point-equal to some
// existing polyline from the collection) that minimizes the summed vertex
// alignment cost to all other polylines in the collection.
//
// GetConsensusPolyline() returns a new polyline (unlikely to be present in the
// input collection) that represents a "weighted consensus" polyline. This
// polyline is constructed iteratively using the Dynamic Timewarp Barycenter
// Averaging algorithm of F. Petitjean, A. Ketterlin, and P. Gancarski, which
// can be found here:
// https://pdfs.semanticscholar.org/a596/8ca9488199291ffe5473643142862293d69d.pdf
// A columnStride is a [start, end) range of columns in a search window.
// It enables us to lazily fill up our costTable structures by providing bounds
// checked access for reads. We also use them to keep track of structured,
// sparse window matrices by tracking start and end columns for each row.
type columnStride struct {
start int
end int
}
// InRange reports if the given index is in range of this stride.
func (c columnStride) InRange(index int) bool {
return c.start <= index && index < c.end
}
// allColumnStride returns a columnStride where inRange evaluates to `true` for all
// non-negative inputs less than math.MaxInt.
func allColumnStride() columnStride {
return columnStride{-1, math.MaxInt}
}
// A Window is a sparse binary matrix with specific structural constraints
// on allowed element configurations. It is used in this library to represent
// "search windows" for windowed dynamic timewarping.
//
// Valid Windows require the following structural conditions to hold:
// 1. All rows must consist of a single contiguous stride of `true` values.
// 2. All strides are greater than zero length (i.e. no empty rows).
// 3. The index of the first `true` column in a row must be at least as
// large as the index of the first `true` column in the previous row.
// 4. The index of the last `true` column in a row must be at least as large
// as the index of the last `true` column in the previous row.
// 5. strides[0].start = 0 (the first cell is always filled).
// 6. strides[n_rows-1].end = n_cols (the last cell is filled).
//
// Example valid strided_masks (* = filled, . = unfilled)
//
// 0 1 2 3 4 5
// 0 * * * . . .
// 1 . * * * . .
// 2 . * * * . .
// 3 . . * * * *
// 4 . . * * * *
//
// 0 1 2 3 4 5
// 0 * * * * . .
// 1 . * * * * .
// 2 . . * * * .
// 3 . . . . * *
// 4 . . . . . *
//
// 0 1 2 3 4 5
// 0 * * . . . .
// 1 . * . . . .
// 2 . . * * * .
// 3 . . . . . *
// 4 . . . . . *
//
// Example invalid strided_masks:
//
// 0 1 2 3 4 5
//
// 0 * * * . * * <-- more than one continuous run
// 1 . * * * . .
// 2 . * * * . .
// 3 . . * * * *
// 4 . . * * * *
//
// 0 1 2 3 4 5
//
// 0 * * * . . .
// 1 . * * * . .
// 2 . * * * . .
// 3 * * * * * * <-- start index not monotonically increasing
// 4 . . * * * *
//
// 0 1 2 3 4 5
//
// 0 * * * . . .
// 1 . * * * * .
// 2 . * * * . . <-- end index not monotonically increasing
// 3 . . * * * *
// 4 . . * * * *
//
// 0 1 2 3 4 5
//
// 0 . * . . . . <-- does not fill upper left corner
// 1 . * . . . .
// 2 . * . . . .
// 3 . * * * . .
// 4 . . * * * *
type window struct {
rows int
cols int
strides []columnStride
}
// windowFromStrides creates a window from the given columnStrides.
func windowFromStrides(strides []columnStride) *window {
return &window{
rows: len(strides),
cols: strides[len(strides)-1].end,
strides: strides,
}
}
// TODO(rsned): Add windowFromWarpPath
// isValid reports if this windows data represents a valid window.
//
// Valid Windows require the following structural conditions to hold:
// 1. All rows must consist of a single contiguous stride of `true` values.
// 2. All strides are greater than zero length (i.e. no empty rows).
// 3. The index of the first `true` column in a row must be at least as
// large as the index of the first `true` column in the previous row.
// 4. The index of the last `true` column in a row must be at least as large
// as the index of the last `true` column in the previous row.
// 5. strides[0].start = 0 (the first cell is always filled).
// 6. strides[n_rows-1].end = n_cols (the last cell is filled).
func (w *window) isValid() bool {
if w.rows <= 0 || w.cols <= 0 || len(w.strides) == 0 ||
w.strides[0].start != 0 || w.strides[len(w.strides)-1].end != w.cols {
return false
}
var prev = columnStride{-1, -1}
for _, curr := range w.strides {
if curr.end <= curr.start || curr.start < prev.start ||
curr.end < prev.end {
return false
}
prev = curr
}
return true
}
func (w *window) columnStride(row int) columnStride {
return w.strides[row]
}
func (w *window) checkedColumnStride(row int) columnStride {
if row < 0 {
return allColumnStride()
}
return w.strides[row]
}
// upsample returns a new, larger window that is an upscaled version of this window.
//
// Used by ApproximateAlignment window expansion step.
func (w *window) upsample(newRows, newCols int) *window {
// TODO(rsned): What to do if the upsample is actually a downsample.
// C++ has this as a debug CHECK.
rowScale := float64(newRows) / float64(w.rows)
colScale := float64(newCols) / float64(w.cols)
newStrides := make([]columnStride, newRows)
var fromStride columnStride
for row := 0; row < newRows; row++ {
fromStride = w.strides[int((float64(row)+0.5)/rowScale)]
newStrides[row] = columnStride{
start: int(colScale*float64(fromStride.start) + 0.5),
end: int(colScale*float64(fromStride.end) + 0.5),
}
}
return windowFromStrides(newStrides)
}
// dilate returns a new, equal-size window by dilating this window with a square
// structuring element with half-length `radius`. Radius = 1 corresponds to
// a 3x3 square morphological dilation.
//
// Used by ApproximateAlignment window expansion step.
func (w *window) dilate(radius int) *window {
// This code takes advantage of the fact that the dilation window is square to
// ensure that we can compute the stride for each output row in constant time.
// TODO (mrdmnd): a potential optimization might be to combine this method and
// the Upsample method into a single "Expand" method. For the sake of
// testing, I haven't done that here, but I think it would be fairly
// straightforward to do so. This method generally isn't very expensive so it
// feels unnecessary to combine them.
newStrides := make([]columnStride, w.rows)
for row := 0; row < w.rows; row++ {
prevRow := maxInt(0, row-radius)
nextRow := minInt(row+radius, w.rows-1)
newStrides[row] = columnStride{
start: maxInt(0, w.strides[prevRow].start-radius),
end: minInt(w.strides[nextRow].end+radius, w.cols),
}
}
return windowFromStrides(newStrides)
}
// debugString returns a string representation of this window.
func (w *window) debugString() string {
var buf bytes.Buffer
for _, row := range w.strides {
for col := 0; col < w.cols; col++ {
if row.InRange(col) {
buf.WriteString(" *")
} else {
buf.WriteString(" .")
}
}
buf.WriteString("\n")
}
return buf.String()
}
// halfResolution reduces the number of vertices of polyline p by selecting every other
// vertex for inclusion in a new polyline. Specifically, we take even-index
// vertices [0, 2, 4,...]. For an even-length polyline, the last vertex is not
// selected. For an odd-length polyline, the last vertex is selected.
// Constructs and returns a new Polyline in linear time.
func halfResolution(p *Polyline) *Polyline {
var p2 Polyline
for i := 0; i < len(*p); i += 2 {
p2 = append(p2, (*p)[i])
}
return &p2
}
// warpPath represents a pairing between vertex
// a.vertex(i) and vertex b.vertex(j) in the optimal alignment.
// The warpPath is defined in forward order, such that the result of
// aligning polylines `a` and `b` is always a warpPath with warpPath[0] = {0,0}
// and warp_path[n] = {len(a) - 1, len(b)- 1}
//
// Note that this DOES NOT define an alignment from a point sequence to an
// edge sequence. That functionality may come at a later date.
type warpPath []warpPair
type warpPair struct{ a, b int }
type vertexAlignment struct {
// alignmentCost represents the sum of the squared chordal distances
// between each pair of vertices in the warp path. Specifically,
// cost = sum_{(i, j) \in path} (a.vertex(i) - b.vertex(j)).Norm();
// This means that the units of alignment_cost are distance. This is
// an optimization to avoid the (expensive) atan computation of the true
// spherical angular distance between the points. All we need to compute
// vertex alignment is a metric that satisfies the triangle inequality, and
// chordal distance works as well as spherical s1.Angle distance for
// this purpose.
alignmentCost float64
warpPath warpPath
}
type costTable [][]float64
func newCostTable(rows, cols int) costTable {
c := make([][]float64, rows)
for i := 0; i < rows; i++ {
c[i] = make([]float64, cols)
}
return c
}
func (c costTable) String() string {
var buf bytes.Buffer
for i, row := range c {
buf.WriteString(fmt.Sprintf("%2d: [", i))
for _, col := range row {
buf.WriteString(fmt.Sprintf("%0.3f, ", col))
}
buf.WriteString("]\n")
}
return buf.String()
}
func (c costTable) boundsCheckedTableCost(row, col int, stride columnStride) float64 {
if row < 0 && col < 0 {
return 0.0
} else if row < 0 || col < 0 || !stride.InRange(col) {
return math.MaxFloat64
} else {
return c[row][col]
}
}
func (c costTable) cost() float64 {
r := len(c) - 1
return c[r][len(c[r])-1]
}
// ExactVertexAlignmentCost takes two non-empty polylines as input, and
// returns the *cost* of their optimal alignment. A standard, traditional
// dynamic timewarp algorithm can output both a warp path and a cost, but
// requires quadratic space to reconstruct the path by walking back through the
// Dynamic Programming cost table. If all you really need is the warp cost (i.e.
// you're inducing a similarity metric between Polylines, or something
// equivalent), you can overwrite the DP table and use constant space -
// O(max(A,B)). This method provides that space-efficiency optimization.
func ExactVertexAlignmentCost(a, b *Polyline) float64 {
aN := len(*a)
bN := len(*b)
cost := make([]float64, bN)
for i := 0; i < bN; i++ {
cost[i] = math.MaxFloat64
}
leftDiagMinCost := 0.0
for row := 0; row < aN; row++ {
for col := 0; col < bN; col++ {
upCost := cost[col]
cost[col] = math.Min(leftDiagMinCost, upCost) +
(*a)[row].Sub((*b)[col].Vector).Norm()
leftDiagMinCost = math.Min(cost[col], upCost)
}
leftDiagMinCost = math.MaxFloat64
}
return cost[len(cost)-1]
}
// ExactVertexAlignment takes two non-empty polylines as input, and returns
// the VertexAlignment corresponding to the optimal alignment between them. This
// method is quadratic O(A*B) in both space and time complexity.
func ExactVertexAlignment(a, b *Polyline) *vertexAlignment {
aN := len(*a)
bN := len(*b)
strides := make([]columnStride, aN)
for i := 0; i < aN; i++ {
strides[i] = columnStride{start: 0, end: bN}
}
w := windowFromStrides(strides)
return dynamicTimewarp(a, b, w)
}
// Perform dynamic timewarping by filling in the DP table on cells that are
// inside our search window. For an exact (all-squares) evaluation, this
// incurs bounds checking overhead - we don't need to ensure that we're inside
// the appropriate cells in the window, because it's guaranteed. Structuring
// the program to reuse code for both the EXACT and WINDOWED cases by
// abstracting EXACT as a window with full-covering strides is done for
// maintainability reasons. One potential optimization here might be to overload
// this function to skip bounds checking when the window is full.
//
// As a note of general interest, the Dynamic Timewarp algorithm as stated here
// prefers shorter warp paths, when two warp paths might be equally costly. This
// is because it favors progressing in the sequences simultaneously due to the
// equal weighting of a diagonal step in the cost table with a horizontal or
// vertical step. This may be counterintuitive, but represents the standard
// implementation of this algorithm. TODO(user) - future implementations could
// allow weights on the lookup costs to mitigate this.
//
// This is the hottest routine in the whole package, please be careful to
// profile any future changes made here.
//
// This method takes time proportional to the number of cells in the window,
// which can range from O(max(a, b)) cells (best) to O(a*b) cells (worst)
func dynamicTimewarp(a, b *Polyline, w *window) *vertexAlignment {
rows := len(*a)
cols := len(*b)
costs := newCostTable(rows, cols)
var curr columnStride
prev := allColumnStride()
for row := 0; row < rows; row++ {
curr = w.columnStride(row)
for col := curr.start; col < curr.end; col++ {
// The total cost up to (row,col) is the minimum of the cost up, down,
// left and the distance between the points in row and col. We use
// the distance between the points, as we are trying to minimize the
// distance between the two polylines.
dCost := costs.boundsCheckedTableCost(row-1, col-1, prev)
uCost := costs.boundsCheckedTableCost(row-1, col-0, prev)
lCost := costs.boundsCheckedTableCost(row-0, col-1, curr)
costs[row][col] = minFloat64(dCost, uCost, lCost) +
(*a)[row].Sub((*b)[col].Vector).Norm()
}
prev = curr
}
// Now we walk back through the cost table and build up the warp path.
// Somewhat surprisingly, it is faster to recover the path this way than it
// is to save the comparisons from the computation we *already did* to get the
// direction we came from. The author speculates that this behavior is
// assignment-cost-related: to persist direction, we have to do extra
// stores/loads of "directional" information, and the extra assignment cost
// this incurs is larger than the cost to simply redo the comparisons.
// It's probably worth revisiting this assumption in the future.
// As it turns out, the following code ends up effectively free.
warpPath := make([]warpPair, 0, maxInt(rows, cols))
row := rows - 1
col := cols - 1
curr = w.checkedColumnStride(row)
prev = w.checkedColumnStride(row - 1)
for row >= 0 && col >= 0 {
warpPath = append(warpPath, warpPair{row, col})
dCost := costs.boundsCheckedTableCost(row-1, col-1, prev)
uCost := costs.boundsCheckedTableCost(row-1, col-0, prev)
lCost := costs.boundsCheckedTableCost(row-0, col-1, curr)
if dCost <= uCost && dCost <= lCost {
row -= 1
col -= 1
curr = w.checkedColumnStride(row)
prev = w.checkedColumnStride(row - 1)
} else if uCost <= lCost {
row -= 1
curr = w.checkedColumnStride(row)
prev = w.checkedColumnStride(row - 1)
} else {
col -= 1
}
}
// TODO(rsned): warpPath.reverse
return &vertexAlignment{alignmentCost: costs.cost(), warpPath: warpPath}
}
// TODO(rsned): Differences from C++
// ApproxVertexAlignment/Cost
// MedoidPolyline / Options
// ConsensusPolyline / Options

View File

@@ -19,8 +19,8 @@ package s2
// implement the methods in various other measures files.
import (
"github.com/golang/geo/r3"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/r3"
"github.com/blevesearch/geo/s1"
)
// polylineLength returns the length of the given Polyline.

View File

@@ -27,23 +27,25 @@ import (
"math"
"math/big"
"github.com/golang/geo/r3"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/r3"
"github.com/blevesearch/geo/s1"
)
const (
// If any other machine architectures need to be suppported, these next three
// If any other machine architectures need to be supported, these next
// values will need to be updated.
// epsilon is a small number that represents a reasonable level of noise between two
// values that can be considered to be equal.
epsilon = 1e-15
// dblEpsilon is a smaller number for values that require more precision.
// This is the C++ DBL_EPSILON equivalent.
dblEpsilon = 2.220446049250313e-16
// dblError is the C++ value for S2 rounding_epsilon().
dblError = 1.110223024625156e-16
// sqrt3 is used many times throughout but computed every time,
// so use the OEIS value like package math does for Sqrt2, etc.
// https://oeis.org/A002194
sqrt3 = 1.73205080756887729352744634150587236694280525381038062805580
// maxDeterminantError is the maximum error in computing (AxB).C where all vectors
// are unit length. Using standard inequalities, it can be shown that
//
@@ -73,6 +75,34 @@ const (
detErrorMultiplier = 3.2321 * dblEpsilon
)
// epsilonForDigits reports the epsilon for the given number of digits of mantissa.
// This is essentially 2 ** (-digits).
func epsilonForDigits(digits int) float64 {
// IEEE floats have either 24 (32-bit floating point) or 53
// (64 bit floating point) digits or mantissa.
if digits < 64 {
return 1.0 / float64(uint64(1)<<digits)
}
return epsilonForDigits(digits-63) / float64(1<<63)
}
// roundingEpsilon reports the maximum rounding error for arithmetic operations for
// given type t.
//
// We could simply return 0.5 * epsilon, but that is not always the correct approach
// on all platforms.
func roundingEpsilon(t any) float64 {
switch t.(type) {
case float32:
return epsilonForDigits(24)
case float64:
return epsilonForDigits(53)
default:
// TODO(rsned): If go adds any other size floating point types, revisit this.
panic("unsupported type for rounding epsilon")
}
}
// Direction is an indication of the ordering of a set of points.
type Direction int
@@ -83,9 +113,21 @@ const (
CounterClockwise Direction = 1
)
// These big.Float methods are copied from r3/precisevector.go
// newBigFloat constructs a new big.Float with maximum precision.
func newBigFloat() *big.Float { return new(big.Float).SetPrec(big.MaxPrec) }
// precSub is a helper to wrap the boilerplate of subtracting two big.Floats.
func precSub(a, b *big.Float) *big.Float {
return new(big.Float).SetPrec(big.MaxPrec).Sub(a, b)
}
// precMul is a helper to wrap the boilerplate of multiplying two big.Floats.
func precMul(a, b *big.Float) *big.Float {
return new(big.Float).SetPrec(big.MaxPrec).Mul(a, b)
}
// Sign returns true if the points A, B, C are strictly counterclockwise,
// and returns false if the points are clockwise or collinear (i.e. if they are all
// contained on some great circle).
@@ -120,15 +162,15 @@ func Sign(a, b, c Point) bool {
//
// RobustSign satisfies the following conditions:
//
// (1) RobustSign(a,b,c) == Indeterminate if and only if a == b, b == c, or c == a
// (2) RobustSign(b,c,a) == RobustSign(a,b,c) for all a,b,c
// (3) RobustSign(c,b,a) == -RobustSign(a,b,c) for all a,b,c
// (1) RobustSign(a,b,c) == Indeterminate if and only if a == b, b == c, or c == a
// (2) RobustSign(b,c,a) == RobustSign(a,b,c) for all a,b,c
// (3) RobustSign(c,b,a) == -RobustSign(a,b,c) for all a,b,c
//
// In other words:
//
// (1) The result is Indeterminate if and only if two points are the same.
// (2) Rotating the order of the arguments does not affect the result.
// (3) Exchanging any two arguments inverts the result.
// (1) The result is Indeterminate if and only if two points are the same.
// (2) Rotating the order of the arguments does not affect the result.
// (3) Exchanging any two arguments inverts the result.
//
// On the other hand, note that it is not true in general that
// RobustSign(-a,b,c) == -RobustSign(a,b,c), or any similar identities
@@ -298,9 +340,9 @@ func exactSign(a, b, c Point, perturb bool) Direction {
// And the points must be distinct, with A < B < C in lexicographic order.
//
// Reference:
// "Simulation of Simplicity" (Edelsbrunner and Muecke, ACM Transactions on
// Graphics, 1990).
//
// "Simulation of Simplicity" (Edelsbrunner and Muecke, ACM Transactions on
// Graphics, 1990).
func symbolicallyPerturbedSign(a, b, c, bCrossC r3.PreciseVector) Direction {
// This method requires that the points are sorted in lexicographically
// increasing order. This is because every possible Point has its own
@@ -484,8 +526,8 @@ func sin2Distance(x, y Point) (sin2, err float64) {
// distances as small as dblError.
n := x.Sub(y.Vector).Cross(x.Add(y.Vector))
sin2 = 0.25 * n.Norm2()
err = ((21+4*math.Sqrt(3))*dblError*sin2 +
32*math.Sqrt(3)*dblError*dblError*math.Sqrt(sin2) +
err = ((21+4*sqrt3)*dblError*sin2 +
32*sqrt3*dblError*dblError*math.Sqrt(sin2) +
768*dblError*dblError*dblError*dblError)
return sin2, err
}
@@ -677,25 +719,400 @@ func exactCompareDistance(x, y r3.PreciseVector, r2 *big.Float) int {
return xySign * cmp.Sign()
}
// SignDotProd reports the exact sign of the dot product between A and B.
//
// REQUIRES: |a|^2 <= 2 and |b|^2 <= 2
func SignDotProd(a, b Point) int {
sign := triageSignDotProd(a, b)
if sign != 0 {
return sign
}
// big.Float.Sign() returns -1/0/+1 already, so the C++
// ExactSignDotProd is not necessary.
return r3.PreciseVectorFromVector(a.Vector).Dot(r3.PreciseVectorFromVector(b.Vector)).Sign()
}
func triageSignDotProd(a, b Point) int {
// The dot product error can be bound as 1.01nu|a||b| assuming nu < .01,
// where u is the rounding unit (epsilon/2). n=3 because we have 3
// components, and we require that our vectors be <= sqrt(2) in length (so
// that we can support the un-normalized edge normals for cells).
//
// So we have 1.01*3*ε/2*2 = 3.03ε, which we'll round up to 3.046875ε
// which is exactly representable.
//
// Reference:
// Error Estimation Of Floating-Point Summation And Dot Product, Rump
// 2011
const maxError = 3.046875 * dblEpsilon
na := a.Dot(b.Vector)
if math.Abs(na) <= maxError {
return 0
}
if na > 0 {
return 1
}
return -1
}
// CircleEdgeIntersectionOrdering reports the relative position of two edges crossing
// a great circle relative to a given point.
//
// Given two edges AB and CD that cross a great circle defined by a normal
// vector M, orders the crossings of AB and CD relative to another great circle
// N representing a zero point.
//
// This predicate can be used in any circumstance where we have an exact normal
// vector to order edge crossings relative to some zero point.
//
// As an example, if we have edges AB and CD that cross boundary 2 of a cell:
//
// B D
// • 2 •
// ┌─\───/─┐
// 3 │ • • │ 1
// A C
//
// We could order them by using the normal of boundary 2 as M, and the normal of
// either boundary 1 or 3 as N. If we use boundary 1 as N, then:
//
// CircleEdgeIntersectionOrdering(A, B, C, D, M, N) == +1
//
// Indicating that CD is closer to boundary 1 than AB is.
//
// But, if we use boundary 3 as N, then:
//
// CircleEdgeIntersectionOrdering(A, B, C, D, M, N) == -1
//
// Indicating that AB is closer to boundary 3 than CD is.
//
// These results are consistent but one needs to bear in mind what boundary is
// being used as the reference.
//
// The edges AB and CD should be specified such that A and C are on the positive
// side of M and B and D are on the negative side, as illustrated above. This
// will make the sign of their cross products with M consistent.
//
// Because we use a dot product to check the distance from N, this predicate can
// only unambiguously order along edges within [0,90] degrees of N (both
// vertices must be in quadrant one of the unit circle).
//
// REQUIRES:
// - A and B are not equal or antipodal.
// - C and D are not equal or antipodal.
// - M and N are not equal or antipodal.
// - AB crosses M (vertices have opposite dot product signs with M)
// - CD crosses M (vertices have opposite dot product signs with M)
// - A and C are on the positive side of M
// - B and D are on the negative side of M
// - Intersection of AB and N is on the positive side of N
// - Intersection of CD and N is on the positive side of N
//
// Returns:
//
// -1 if crossing AB is closer to N than crossing CD
// 0 if the two edges cross at exactly the same position
// +1 if crossing AB is further from N than crossing CD
func CircleEdgeIntersectionOrdering(a, b, c, d, m, n Point) int {
ans := triageIntersectionOrdering(a, b, c, d, m, n)
if ans != 0 {
return ans
}
// We got zero, check for duplicate/reverse duplicate edges before falling
// back to more precision.
if (a == c && b == d) || (a == d && b == c) {
return 0
}
return exactIntersectionOrdering(
r3.PreciseVectorFromVector(a.Vector), r3.PreciseVectorFromVector(b.Vector),
r3.PreciseVectorFromVector(c.Vector), r3.PreciseVectorFromVector(d.Vector),
r3.PreciseVectorFromVector(m.Vector), r3.PreciseVectorFromVector(n.Vector))
}
// triageIntersectionOrdering reports the order of intersections along a great circle
// relative to some reference point using the float64 implementation.
func triageIntersectionOrdering(a, b, c, d, m, n Point) int {
// Given an edge AB, and the normal of a great circle M, the intersection of
// the edge with the great circle is given by the triple product (A×B)×M.
//
// Its distance relative to the reference circle N is then proportional to the
// dot product with N: d0 = ((A×B)×M)•N
//
// Edge CD is similar, we want to compute d1 = ((C×D)×M)•N and compare d0 to
// d1. If they're further than some error from each other, we can rely on the
// comparison, otherwise we fall back to more exact arithmetic.
//
// ((A×B)×M)•N is a quadruple product. We can expand this out using
// Lagrange's formula for a vector triple product and then distribute the dot
// product, which eliminates all the cross products:
//
// d0 = ((A×B)×M)•N
// d0 = ((M•A)B - (M•B)A)•N
// d0 = (M•A)(N•B) - (M•B)(N•A)
//
// Similarly:
//
// d1 = (M•C)(N•D) - (M•D)(N•C)
//
// We can compute this difference with a maximum absolute error of 32ε (see
// the gappa proof at end of the file).
//
// NOTE: If we want to push this error bound down as far as possible, we could
// use the dot product algorithm created by Ogita et al:
//
// Accurate Sum and Dot Product, Ogita, Rump, Oishi 2005.
//
// Along with the 2x2 determinant algorithm by Kahan (which is useful for any
// bilinear form):
//
// Further Analysis of Kahan's Algorithm for the Accurate Computation of
// 2x2 Determinants, Jeannerod, Louvet, and Muller, 2013.
//
// Both algorithms allow us to have bounded relative error, and since we're
// only interested in the sign of this operation, as long as the relative
// error is < 1 we can never get a sign flip, which would make this exact for
// our purposes.
const maxError = 32 * dblEpsilon
mdota := m.Dot(a.Vector)
mdotb := m.Dot(b.Vector)
mdotc := m.Dot(c.Vector)
mdotd := m.Dot(d.Vector)
ndota := n.Dot(a.Vector)
ndotb := n.Dot(b.Vector)
ndotc := n.Dot(c.Vector)
ndotd := n.Dot(d.Vector)
prodab := mdota*ndotb - mdotb*ndota
prodcd := mdotc*ndotd - mdotd*ndotc
if math.Abs(prodab-prodcd) > maxError {
if prodab < prodcd {
return -1
} else {
return +1
}
}
return 0
}
// exactIntersectionOrdering reports the order of intersections along a great circle
// relative to some reference point using the precise implementation.
func exactIntersectionOrdering(a, b, c, d, m, n r3.PreciseVector) int {
mdota := m.Dot(a)
mdotb := m.Dot(b)
mdotc := m.Dot(c)
mdotd := m.Dot(d)
ndota := n.Dot(a)
ndotb := n.Dot(b)
ndotc := n.Dot(c)
ndotd := n.Dot(d)
prodab := precSub(precMul(mdota, ndotb), precMul(mdotb, ndota))
prodcd := precSub(precMul(mdotc, ndotd), precMul(mdotd, ndotc))
return prodab.Cmp(prodcd)
}
// Gappa proof for TriageIntersectionOrdering
//
// # Use IEEE754 double precision, round-to-nearest by default.
// @rnd = float<ieee_64, ne>;
//
// # Five vectors, two forming edges AB, CD and two normals N,M for great
// # circles.
// a0 = rnd(a0_ex);
// a1 = rnd(a1_ex);
// a2 = rnd(a2_ex);
// b0 = rnd(b0_ex);
// b1 = rnd(b1_ex);
// b2 = rnd(b2_ex);
// c0 = rnd(c0_ex);
// c1 = rnd(c1_ex);
// c2 = rnd(c2_ex);
// d0 = rnd(d0_ex);
// d1 = rnd(d1_ex);
// d2 = rnd(d2_ex);
// n0 = rnd(n0_ex);
// n1 = rnd(n1_ex);
// n2 = rnd(n2_ex);
// m0 = rnd(m0_ex);
// m1 = rnd(m1_ex);
// m2 = rnd(m2_ex);
//
// # (AxB)xN = (N*A)B - (N*B)*A -- Lagrange's formula
// # ((AxB)xN)*M = ((N*A)B - (N*B)*A)*M
// # = (N*A)(M*B) - (X*B)(M*A)
//
// ndota_ rnd = n0*a0 + n1*a1 + n2*a2;
// ndotb_ rnd = n0*b0 + n1*b1 + n2*b2;
// mdota_ rnd = m0*a0 + m1*a1 + m2*a2;
// mdotb_ rnd = m0*b0 + m1*b1 + m2*b2;
// prod0_ rnd = ndota_*mdotb_ - ndotb_*mdota_;
//
// ndotc_ rnd = n0*c0 + n1*c1 + n2*c2;
// ndotd_ rnd = n0*d0 + n1*d1 + n2*d2;
// mdotc_ rnd = m0*c0 + m1*c1 + m2*c2;
// mdotd_ rnd = m0*d0 + m1*d1 + m2*d2;
// prod1_ rnd = ndotc_*mdotd_ - ndotd_*mdotc_;
//
// diff_ rnd = prod1_ - prod0_;
//
// # Compute it all again in exact arithmetic.
// ndota = n0*a0 + n1*a1 + n2*a2;
// ndotb = n0*b0 + n1*b1 + n2*b2;
// mdota = m0*a0 + m1*a1 + m2*a2;
// mdotb = m0*b0 + m1*b1 + m2*b2;
// prod0 = ndota*mdotb - ndotb*mdota;
//
// ndotc = n0*c0 + n1*c1 + n2*c2;
// ndotd = n0*d0 + n1*d1 + n2*d2;
// mdotc = m0*c0 + m1*c1 + m2*c2;
// mdotd = m0*d0 + m1*d1 + m2*d2;
// prod1 = ndotc*mdotd - ndotd*mdotc;
//
// diff = prod1 - prod0;
//
// {
// # A,B,C, and D are meant to be normalized S2Point values, so their
// # magnitude will be at most 1. M and N are allowed to be unnormalized cell
// # edge normals, so their magnitude can be up to sqrt(2). In each case the
// # components will be at most one.
// a0 in [-1, 1]
// /\ a1 in [-1, 1]
// /\ a2 in [-1, 1]
// /\ b0 in [-1, 1]
// /\ b1 in [-1, 1]
// /\ b2 in [-1, 1]
// /\ c0 in [-1, 1]
// /\ c1 in [-1, 1]
// /\ c2 in [-1, 1]
// /\ d0 in [-1, 1]
// /\ d1 in [-1, 1]
// /\ d2 in [-1, 1]
//
// /\ n0 in [-1, 1]
// /\ n1 in [-1, 1]
// /\ n2 in [-1, 1]
// /\ m0 in [-1, 1]
// /\ m1 in [-1, 1]
// /\ m2 in [-1, 1]
//
// # We always dot an unnormalized normal against a normalized point so the
// # magnitude of the dot product in each case is bounded by sqrt(2).
// /\ |ndota_| in [0, 1.4142135623730954]
// /\ |ndotb_| in [0, 1.4142135623730954]
// /\ |ndotc_| in [0, 1.4142135623730954]
// /\ |ndotd_| in [0, 1.4142135623730954]
//
// /\ |mdota_| in [0, 1.4142135623730954]
// /\ |mdotb_| in [0, 1.4142135623730954]
// /\ |mdotc_| in [0, 1.4142135623730954]
// /\ |mdotd_| in [0, 1.4142135623730954]
//
// ->
// |diff_ - diff| in ?
// }
//
// > gappa proof.gappa
// Results:
// | diff_ - diff| in [0, 1145679351550454559b-107 {7.06079e-15, 2^(-47.0091)}]
//
// >>> 1145679351550454559*2**-107/2**-52
// 31.79898987322334
// Gappa proof for TriageCircleEdgeIntersectionSign
//
// # Use IEEE754 double precision, round-to-nearest by default.
// @rnd = float<ieee_64, ne>;
//
// # Four vectors, two forming an edge AB and two normals (X,Y) for great
// circles. a0 = rnd(a0_ex); a1 = rnd(a1_ex); a2 = rnd(a2_ex); b0 =
// rnd(b0_ex); b1 = rnd(b1_ex); b2 = rnd(b2_ex); n0 = rnd(n0_ex); n1 =
// rnd(n1_ex); n2 = rnd(n2_ex); x0 = rnd(x0_ex); x1 = rnd(x1_ex); x2 =
// rnd(x2_ex);
//
// # (AxB)xX = (X*A)B - (X*B)*A -- Lagrange's formula
// # ((AxB)xX)*Y = ((X*A)B - (X*B)*A)*Y
// # = (X*A)(Y*B) - (X*B)(Y*A)
//
// ndota_ rnd = n0*a0 + n1*a1 + n2*a2;
// ndotb_ rnd = n0*b0 + n1*b1 + n2*b2;
// xdota_ rnd = x0*a0 + x1*a1 + x2*a2;
// xdotb_ rnd = x0*b0 + x1*b1 + x2*b2;
// diff_ rnd = ndota_*xdotb_ - ndotb_*xdota_;
//
// # Compute it all again in exact arithmetic.
// ndota = n0*a0 + n1*a1 + n2*a2;
// ndotb = n0*b0 + n1*b1 + n2*b2;
// xdota = x0*a0 + x1*a1 + x2*a2;
// xdotb = x0*b0 + x1*b1 + x2*b2;
// diff = ndota*xdotb - ndotb*xdota;
//
// {
// # A and B are meant to be normalized S2Point values, so their magnitude
// will # be at most 1. X and Y are allowed to be unnormalized cell edge
// normals, so # their magnitude can be up to sqrt(2). In each case the
// components will be # at most one.
// a0 in [-1, 1]
// /\ a1 in [-1, 1]
// /\ a2 in [-1, 1]
// /\ b0 in [-1, 1]
// /\ b1 in [-1, 1]
// /\ b2 in [-1, 1]
// /\ n0 in [-1, 1]
// /\ n1 in [-1, 1]
// /\ n2 in [-1, 1]
// /\ x0 in [-1, 1]
// /\ x1 in [-1, 1]
// /\ x2 in [-1, 1]
//
// # We always dot an unnormalized normal against a normalized point so the
// # magnitude of the dot product in each case is bounded by sqrt(2).
// /\ |ndota_| in [0, 1.4142135623730954]
// /\ |ndotb_| in [0, 1.4142135623730954]
// /\ |xdota_| in [0, 1.4142135623730954]
// /\ |xdotb_| in [0, 1.4142135623730954]
//
// ->
// |diff_ - diff| in ?
// } 6ms
//
// > gappa proof.gappa
// Results:
// |diff_ - diff| in [0, 1001564163474598623b-108 {3.08631e-15,
// 2^(-48.2031)}]
//
// >>> 1001564163474598623*2**-108/2**-52
// 13.89949493661167
// TODO(roberts): Differences from C++
// CompareEdgeDistance
// CompareEdgeDirections
// EdgeCircumcenterSign
// GetVoronoiSiteExclusion
// GetClosestVertex
// TriageCompareLineSin2Distance
// TriageCompareLineCos2Distance
// TriageCompareLineDistance
// TriageCompareEdgeDistance
// ExactCompareLineDistance
// ExactCompareEdgeDistance
// TriageCompareEdgeDirections
// ExactCompareEdgeDirections
// ArePointsAntipodal
// ArePointsLinearlyDependent
// GetCircumcenter
// TriageEdgeCircumcenterSign
// ExactEdgeCircumcenterSign
// UnperturbedSign
// SymbolicEdgeCircumcenterSign
// ExactVoronoiSiteExclusion
//
// getClosestVertex
// triageCompareLineSin2Distance
// triageCompareLineCos2Distance
// triageCompareLineDistance
// triageCompareEdgeDistance
// exactCompareLineDistance
// exactCompareEdgeDistance
// triageCompareEdgeDirections
// exactCompareEdgeDirections
// arePointsAntipodal
// arePointsLinearlyDependent
// getCircumcenter
// triageEdgeCircumcenterSign
// exactEdgeCircumcenterSign
// unperturbedSign
// symbolicEdgeCircumcenterSign
// exactVoronoiSiteExclusion

View File

@@ -17,8 +17,8 @@ package s2
import (
"math"
"github.com/golang/geo/r2"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/r2"
"github.com/blevesearch/geo/s1"
)
// Projection defines an interface for different ways of mapping between s2 and r2 Points.
@@ -134,7 +134,7 @@ func (p *PlateCarreeProjection) Interpolate(f float64, a, b r2.Point) r2.Point {
// WrapDistance reports the coordinate wrapping distance along each axis.
func (p *PlateCarreeProjection) WrapDistance() r2.Point {
return r2.Point{p.xWrap, 0}
return r2.Point{X: p.xWrap, Y: 0}
}
// WrapDestination wraps the points if needed to get the shortest edge.
@@ -191,7 +191,7 @@ func (p *MercatorProjection) FromLatLng(ll LatLng) r2.Point {
// Note that latitudes of +/- 90 degrees yield "y" values of +/- infinity.
sinPhi := math.Sin(float64(ll.Lat))
y := 0.5 * math.Log((1+sinPhi)/(1-sinPhi))
return r2.Point{p.fromRadians * float64(ll.Lng), p.fromRadians * y}
return r2.Point{X: p.fromRadians * float64(ll.Lng), Y: p.fromRadians * y}
}
// ToLatLng returns the LatLng projected from the given R2 Point.
@@ -216,7 +216,7 @@ func (p *MercatorProjection) Interpolate(f float64, a, b r2.Point) r2.Point {
// WrapDistance reports the coordinate wrapping distance along each axis.
func (p *MercatorProjection) WrapDistance() r2.Point {
return r2.Point{p.xWrap, 0}
return r2.Point{X: p.xWrap, Y: 0}
}
// WrapDestination wraps the points if needed to get the shortest edge.
@@ -237,5 +237,5 @@ func wrapDestination(a, b r2.Point, wrapDistance func() r2.Point) r2.Point {
if wrap.Y > 0 && math.Abs(y-a.Y) > 0.5*wrap.Y {
y = a.Y + math.Remainder(y-a.Y, wrap.Y)
}
return r2.Point{x, y}
return r2.Point{X: x, Y: y}
}

View File

@@ -80,13 +80,13 @@ func (q queryPQ) Swap(i, j int) {
}
// Push adds the given entry to the queue.
func (q *queryPQ) Push(x interface{}) {
func (q *queryPQ) Push(x any) {
item := x.(*queryQueueEntry)
*q = append(*q, item)
}
// Pop returns the top element of the queue.
func (q *queryPQ) Pop() interface{} {
func (q *queryPQ) Pop() any {
item := (*q)[len(*q)-1]
*q = (*q)[:len(*q)-1]
return item

View File

@@ -17,7 +17,7 @@ package s2
import (
"math"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/s1"
)
const maxQueryResults = math.MaxInt32
@@ -130,7 +130,7 @@ func (q *queryOptions) MaxError(x s1.ChordAngle) *queryOptions {
// This must be at least 1.
func (q *queryOptions) MaxResults(x int) *queryOptions {
// TODO(roberts): What should be done if the value is <= 0?
q.maxResults = int(x)
q.maxResults = x
return q
}

View File

@@ -19,9 +19,9 @@ import (
"io"
"math"
"github.com/golang/geo/r1"
"github.com/golang/geo/r3"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/r1"
"github.com/blevesearch/geo/r3"
"github.com/blevesearch/geo/s1"
)
// Rect represents a closed latitude-longitude rectangle.
@@ -31,7 +31,8 @@ type Rect struct {
}
var (
validRectLatRange = r1.Interval{-math.Pi / 2, math.Pi / 2}
// TODO(rsned): Make these public to match FullLat/FullLng from C++
validRectLatRange = r1.Interval{Lo: -math.Pi / 2, Hi: math.Pi / 2}
validRectLngRange = s1.FullInterval()
)
@@ -44,8 +45,8 @@ func FullRect() Rect { return Rect{validRectLatRange, validRectLngRange} }
// RectFromLatLng constructs a rectangle containing a single point p.
func RectFromLatLng(p LatLng) Rect {
return Rect{
Lat: r1.Interval{p.Lat.Radians(), p.Lat.Radians()},
Lng: s1.Interval{p.Lng.Radians(), p.Lng.Radians()},
Lat: r1.Interval{Lo: p.Lat.Radians(), Hi: p.Lat.Radians()},
Lng: s1.Interval{Lo: p.Lng.Radians(), Hi: p.Lng.Radians()},
}
}
@@ -56,9 +57,10 @@ func RectFromLatLng(p LatLng) Rect {
// 360 degrees or more.
//
// Examples of clamping (in degrees):
// center=(80,170), size=(40,60) -> lat=[60,90], lng=[140,-160]
// center=(10,40), size=(210,400) -> lat=[-90,90], lng=[-180,180]
// center=(-90,180), size=(20,50) -> lat=[-90,-80], lng=[155,-155]
//
// center=(80,170), size=(40,60) -> lat=[60,90], lng=[140,-160]
// center=(10,40), size=(210,400) -> lat=[-90,90], lng=[-180,180]
// center=(-90,180), size=(20,50) -> lat=[-90,-80], lng=[155,-155]
func RectFromCenterSize(center, size LatLng) Rect {
half := LatLng{size.Lat / 2, size.Lng / 2}
return RectFromLatLng(center).expanded(half)
@@ -239,7 +241,7 @@ func (r Rect) CapBound() Cap {
poleZ = 1
poleAngle = math.Pi/2 - r.Lat.Lo
}
poleCap := CapFromCenterAngle(Point{r3.Vector{0, 0, poleZ}}, s1.Angle(poleAngle)*s1.Radian)
poleCap := CapFromCenterAngle(Point{r3.Vector{X: 0, Y: 0, Z: poleZ}}, s1.Angle(poleAngle)*s1.Radian)
// For bounding rectangles that span 180 degrees or less in longitude, the
// maximum cap size is achieved at one of the rectangle vertices. For
@@ -309,7 +311,7 @@ func intersectsLatEdge(a, b Point, lat s1.Angle, lng s1.Interval) bool {
}
// Extend this to an orthonormal frame (x,y,z) where x is the direction
// where the great circle through AB achieves its maximium latitude.
// where the great circle through AB achieves its maximum latitude.
y := Point{z.PointCross(PointFromCoords(0, 0, 1)).Normalize()}
x := y.Cross(z.Vector)
@@ -458,7 +460,6 @@ func (r *Rect) decode(d *decoder) {
r.Lat.Hi = d.readFloat64()
r.Lng.Lo = d.readFloat64()
r.Lng.Hi = d.readFloat64()
return
}
// DistanceToLatLng returns the minimum distance (measured along the surface of the sphere)
@@ -484,7 +485,8 @@ func (r Rect) DistanceToLatLng(ll LatLng) s1.Angle {
// DirectedHausdorffDistance returns the directed Hausdorff distance (measured along the
// surface of the sphere) to the given Rect. The directed Hausdorff
// distance from rectangle A to rectangle B is given by
// h(A, B) = max_{p in A} min_{q in B} d(p, q).
//
// h(A, B) = max_{p in A} min_{q in B} d(p, q).
func (r Rect) DirectedHausdorffDistance(other Rect) s1.Angle {
if r.IsEmpty() {
return 0 * s1.Radian
@@ -500,7 +502,8 @@ func (r Rect) DirectedHausdorffDistance(other Rect) s1.Angle {
// HausdorffDistance returns the undirected Hausdorff distance (measured along the
// surface of the sphere) to the given Rect.
// The Hausdorff distance between rectangle A and rectangle B is given by
// H(A, B) = max{h(A, B), h(B, A)}.
//
// H(A, B) = max{h(A, B), h(B, A)}.
func (r Rect) HausdorffDistance(other Rect) s1.Angle {
return maxAngle(r.DirectedHausdorffDistance(other),
other.DirectedHausdorffDistance(r))
@@ -585,13 +588,13 @@ func directedHausdorffDistance(lngDiff s1.Angle, a, b r1.Interval) s1.Angle {
// Case B3.
if pLat > s1.Angle(a.Lo) {
intDist, ok := interiorMaxDistance(r1.Interval{a.Lo, math.Min(float64(pLat), a.Hi)}, bLo)
intDist, ok := interiorMaxDistance(r1.Interval{Lo: a.Lo, Hi: math.Min(float64(pLat), a.Hi)}, bLo)
if ok {
maxDistance = maxAngle(maxDistance, intDist)
}
}
if pLat < s1.Angle(a.Hi) {
intDist, ok := interiorMaxDistance(r1.Interval{math.Max(float64(pLat), a.Lo), a.Hi}, bHi)
intDist, ok := interiorMaxDistance(r1.Interval{Lo: math.Max(float64(pLat), a.Lo), Hi: a.Hi}, bHi)
if ok {
maxDistance = maxAngle(maxDistance, intDist)
}
@@ -610,7 +613,7 @@ func interiorMaxDistance(aLat r1.Interval, b Point) (a s1.Angle, ok bool) {
}
// Project b to the y=0 plane. The antipodal of the normalized projection is
// the point at which the maxium distance from b occurs, if it is contained
// the point at which the maximum distance from b occurs, if it is contained
// in aLat.
intersectionPoint := PointFromCoords(-b.X, 0, -b.Z)
if !aLat.InteriorContains(float64(LatLngFromPoint(intersectionPoint).Lat)) {
@@ -632,7 +635,7 @@ func bisectorIntersection(lat r1.Interval, lng s1.Angle) Point {
}
// A vector orthogonal to longitude 0.
orthoLng := Point{r3.Vector{0, -1, 0}}
orthoLng := Point{r3.Vector{X: 0, Y: -1, Z: 0}}
return orthoLng.PointCross(PointFromLatLng(orthoBisector))
}
@@ -703,7 +706,7 @@ func (r Rect) Centroid() Point {
lng := r.Lng.Center()
z := alpha * (z2 + z1) * (z2 - z1) // scaled by the area
return Point{r3.Vector{r0 * math.Cos(lng), r0 * math.Sin(lng), z}}
return Point{r3.Vector{X: r0 * math.Cos(lng), Y: r0 * math.Sin(lng), Z: z}}
}
// BUG: The major differences from the C++ version are:

View File

@@ -17,9 +17,9 @@ package s2
import (
"math"
"github.com/golang/geo/r1"
"github.com/golang/geo/r3"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/r1"
"github.com/blevesearch/geo/r3"
"github.com/blevesearch/geo/s1"
)
// RectBounder is used to compute a bounding rectangle that contains all edges
@@ -132,7 +132,7 @@ func (r *RectBounder) AddPoint(b Point) {
// and B attains its minimum and maximum latitudes). To test whether AB
// crosses this plane, we compute a vector M perpendicular to this
// plane and then project A and B onto it.
m := n.Cross(r3.Vector{0, 0, 1})
m := n.Cross(r3.Vector{X: 0, Y: 0, Z: 1})
mA := m.Dot(r.a.Vector)
mB := m.Dot(b.Vector)
@@ -216,8 +216,7 @@ func (r *RectBounder) RectBound() Rect {
// More precisely, if L is a loop that does not contain either pole, and S
// is a loop such that L.Contains(S), then
//
// ExpandForSubregions(L.RectBound).Contains(S.RectBound).
//
// ExpandForSubregions(L.RectBound).Contains(S.RectBound).
func ExpandForSubregions(bound Rect) Rect {
// Empty bounds don't need expansion.
if bound.IsEmpty() {

View File

@@ -75,7 +75,7 @@ package s2
import (
"strings"
"github.com/golang/geo/s1"
"github.com/blevesearch/geo/s1"
)
type TermType int

View File

@@ -38,21 +38,21 @@ import (
//
// Note the following:
//
// - MinLevel takes priority over MaxCells, i.e. cells below the given level will
// never be used even if this causes a large number of cells to be returned.
// - MinLevel takes priority over MaxCells, i.e. cells below the given level will
// never be used even if this causes a large number of cells to be returned.
//
// - For any setting of MaxCells, up to 6 cells may be returned if that
// is the minimum number of cells required (e.g. if the region intersects
// all six face cells). Up to 3 cells may be returned even for very tiny
// convex regions if they happen to be located at the intersection of
// three cube faces.
// - For any setting of MaxCells, up to 6 cells may be returned if that
// is the minimum number of cells required (e.g. if the region intersects
// all six face cells). Up to 3 cells may be returned even for very tiny
// convex regions if they happen to be located at the intersection of
// three cube faces.
//
// - For any setting of MaxCells, an arbitrary number of cells may be
// returned if MinLevel is too high for the region being approximated.
// - For any setting of MaxCells, an arbitrary number of cells may be
// returned if MinLevel is too high for the region being approximated.
//
// - If MaxCells is less than 4, the area of the covering may be
// arbitrarily large compared to the area of the original region even if
// the region is convex (e.g. a Cap or Rect).
// - If MaxCells is less than 4, the area of the covering may be
// arbitrarily large compared to the area of the original region even if
// the region is convex (e.g. a Cap or Rect).
//
// The approximation algorithm is not optimal but does a pretty good job in
// practice. The output does not always use the maximum number of cells
@@ -83,7 +83,7 @@ type RegionCoverer struct {
func NewRegionCoverer() *RegionCoverer {
return &RegionCoverer{
MinLevel: 0,
MaxLevel: maxLevel,
MaxLevel: MaxLevel,
LevelMod: 1,
MaxCells: 8,
}
@@ -91,7 +91,7 @@ func NewRegionCoverer() *RegionCoverer {
type coverer struct {
minLevel int // the minimum cell level to be used.
maxLevel int // the maximum cell level to be used.
MaxLevel int // the maximum cell level to be used.
levelMod int // the LevelMod to be used.
maxCells int // the maximum desired number of cells in the approximation.
region Region
@@ -123,12 +123,12 @@ func (pq priorityQueue) Swap(i, j int) {
pq[i], pq[j] = pq[j], pq[i]
}
func (pq *priorityQueue) Push(x interface{}) {
func (pq *priorityQueue) Push(x any) {
item := x.(*candidate)
*pq = append(*pq, item)
}
func (pq *priorityQueue) Pop() interface{} {
func (pq *priorityQueue) Pop() any {
item := (*pq)[len(*pq)-1]
*pq = (*pq)[:len(*pq)-1]
return item
@@ -150,10 +150,10 @@ func (c *coverer) newCandidate(cell Cell) *candidate {
if c.interiorCovering {
if c.region.ContainsCell(cell) {
cand.terminal = true
} else if level+c.levelMod > c.maxLevel {
} else if level+c.levelMod > c.MaxLevel {
return nil
}
} else if level+c.levelMod > c.maxLevel || c.region.ContainsCell(cell) {
} else if level+c.levelMod > c.MaxLevel || c.region.ContainsCell(cell) {
cand.terminal = true
}
}
@@ -268,7 +268,7 @@ func (c *coverer) adjustCellLevels(cells *CellUnion) {
// initialCandidates computes a set of initial candidates that cover the given region.
func (c *coverer) initialCandidates() {
// Optimization: start with a small (usually 4 cell) covering of the region's bounding cap.
temp := &RegionCoverer{MaxLevel: c.maxLevel, LevelMod: 1, MaxCells: minInt(4, c.maxCells)}
temp := &RegionCoverer{MaxLevel: c.MaxLevel, LevelMod: 1, MaxCells: minInt(4, c.maxCells)}
cells := temp.FastCovering(c.region)
c.adjustCellLevels(&cells)
@@ -336,8 +336,8 @@ func (c *coverer) coveringInternal(region Region) {
// newCoverer returns an instance of coverer.
func (rc *RegionCoverer) newCoverer() *coverer {
return &coverer{
minLevel: maxInt(0, minInt(maxLevel, rc.MinLevel)),
maxLevel: maxInt(0, minInt(maxLevel, rc.MaxLevel)),
minLevel: maxInt(0, minInt(MaxLevel, rc.MinLevel)),
MaxLevel: maxInt(0, minInt(MaxLevel, rc.MaxLevel)),
levelMod: maxInt(1, minInt(3, rc.LevelMod)),
maxCells: rc.MaxCells,
}
@@ -346,14 +346,14 @@ func (rc *RegionCoverer) newCoverer() *coverer {
// Covering returns a CellUnion that covers the given region and satisfies the various restrictions.
func (rc *RegionCoverer) Covering(region Region) CellUnion {
covering := rc.CellUnion(region)
covering.Denormalize(maxInt(0, minInt(maxLevel, rc.MinLevel)), maxInt(1, minInt(3, rc.LevelMod)))
covering.Denormalize(maxInt(0, minInt(MaxLevel, rc.MinLevel)), maxInt(1, minInt(3, rc.LevelMod)))
return covering
}
// InteriorCovering returns a CellUnion that is contained within the given region and satisfies the various restrictions.
func (rc *RegionCoverer) InteriorCovering(region Region) CellUnion {
intCovering := rc.InteriorCellUnion(region)
intCovering.Denormalize(maxInt(0, minInt(maxLevel, rc.MinLevel)), maxInt(1, minInt(3, rc.LevelMod)))
intCovering.Denormalize(maxInt(0, minInt(MaxLevel, rc.MinLevel)), maxInt(1, minInt(3, rc.LevelMod)))
return intCovering
}
@@ -404,23 +404,23 @@ func (rc *RegionCoverer) FastCovering(region Region) CellUnion {
// IsCanonical reports whether the given CellUnion represents a valid covering
// that conforms to the current covering parameters. In particular:
//
// - All CellIDs must be valid.
// - All CellIDs must be valid.
//
// - CellIDs must be sorted and non-overlapping.
// - CellIDs must be sorted and non-overlapping.
//
// - CellID levels must satisfy MinLevel, MaxLevel, and LevelMod.
// - CellID levels must satisfy MinLevel, MaxLevel, and LevelMod.
//
// - If the covering has more than MaxCells, there must be no two cells with
// a common ancestor at MinLevel or higher.
// - If the covering has more than MaxCells, there must be no two cells with
// a common ancestor at MinLevel or higher.
//
// - There must be no sequence of cells that could be replaced by an
// ancestor (i.e. with LevelMod == 1, the 4 child cells of a parent).
// - There must be no sequence of cells that could be replaced by an
// ancestor (i.e. with LevelMod == 1, the 4 child cells of a parent).
func (rc *RegionCoverer) IsCanonical(covering CellUnion) bool {
return rc.newCoverer().isCanonical(covering)
}
// normalizeCovering normalizes the "covering" so that it conforms to the
// current covering parameters (maxCells, minLevel, maxLevel, and levelMod).
// current covering parameters (maxCells, minLevel, MaxLevel, and levelMod).
// This method makes no attempt to be optimal. In particular, if
// minLevel > 0 or levelMod > 1 then it may return more than the
// desired number of cells even when this isn't necessary.
@@ -429,10 +429,10 @@ func (rc *RegionCoverer) IsCanonical(covering CellUnion) bool {
// all of the code in this function is skipped.
func (c *coverer) normalizeCovering(covering *CellUnion) {
// If any cells are too small, or don't satisfy levelMod, then replace them with ancestors.
if c.maxLevel < maxLevel || c.levelMod > 1 {
if c.MaxLevel < MaxLevel || c.levelMod > 1 {
for i, ci := range *covering {
level := ci.Level()
newLevel := c.adjustLevel(minInt(level, c.maxLevel))
newLevel := c.adjustLevel(minInt(level, c.MaxLevel))
if newLevel != level {
(*covering)[i] = ci.Parent(newLevel)
}
@@ -500,9 +500,9 @@ func (c *coverer) normalizeCovering(covering *CellUnion) {
// isCanonical reports whether the covering is canonical.
func (c *coverer) isCanonical(covering CellUnion) bool {
trueMax := c.maxLevel
trueMax := c.MaxLevel
if c.levelMod != 1 {
trueMax = c.maxLevel - (c.maxLevel-c.minLevel)%c.levelMod
trueMax = c.MaxLevel - (c.MaxLevel-c.minLevel)%c.levelMod
}
tooManyCells := len(covering) > c.maxCells
sameParentCount := 1

View File

@@ -14,8 +14,19 @@
package s2
// A RegionUnion represents a union of possibly overlapping regions.
// It is convenient for computing a covering of a set of regions.
// A RegionUnion represents a union of possibly overlapping regions. It is
// convenient for computing a covering of a set of regions. However, note
// that currently, using RegionCoverer to compute coverings of RegionUnions
// may produce coverings with considerably less than the requested number of
// cells in cases of overlapping or tiling regions. This occurs because the
// current RegionUnion.Contains implementation for Cells only returns
// true if the cell is fully contained by one of the regions. So, cells along
// internal boundaries in the region union will be subdivided by the coverer
// even though this is unnecessary, using up the maxSize cell budget. Then,
// when the coverer normalizes the covering, groups of 4 sibling cells along
// these internal borders will be replaced by parents, resulting in coverings
// that may have significantly fewer than maxSize cells, and so are less
// accurate. This is not a concern for unions of disjoint regions.
type RegionUnion []Region
// CapBound returns a bounding cap for this RegionUnion.
@@ -31,6 +42,9 @@ func (ru RegionUnion) RectBound() Rect {
}
// ContainsCell reports whether the given Cell is contained by this RegionUnion.
//
// The current implementation only returns true if one of the regions in the
// union fully contains the cell.
func (ru RegionUnion) ContainsCell(c Cell) bool {
for _, reg := range ru {
if reg.ContainsCell(c) {

View File

@@ -18,17 +18,17 @@ import (
"sort"
)
// Edge represents a geodesic edge consisting of two vertices. Zero-length edges are
// allowed, and can be used to represent points.
// Edge represents a geodesic edge consisting of two vertices. Zero-length
// edges are allowed, and can be used to represent points.
type Edge struct {
V0, V1 Point
}
// Cmp compares the two edges using the underlying Points Cmp method and returns
//
// -1 if e < other
// 0 if e == other
// +1 if e > other
// -1 if e < other
// 0 if e == other
// +1 if e > other
//
// The two edges are compared by first vertex, and then by the second vertex.
func (e Edge) Cmp(other Edge) int {
@@ -38,6 +38,27 @@ func (e Edge) Cmp(other Edge) int {
return e.V1.Cmp(other.V1.Vector)
}
// TODO(rsned): Add helpers for <=, >=
// Reversed returns a new edge with the vertices reversed.
func (e Edge) Reversed() Edge {
return Edge{V0: e.V1, V1: e.V0}
}
// IsDegenerate reports if the edge is degenerate.
func (e Edge) IsDegenerate() bool { return e.V0 == e.V1 }
// Incoming reports if point equals v1, indicating this edge is arriving.
func (e Edge) Incoming(point Point) bool { return e.V1 == point }
// Outgoing reports if point equals v0, indicating this edge is leaving.
func (e Edge) Outgoing(point Point) bool { return e.V0 == point }
// IncidentOn reports if point is one of the vertices of this edge.
func (e Edge) IncidentOn(point Point) bool {
return e.Incoming(point) || e.Outgoing(point)
}
// sortEdges sorts the slice of Edges in place.
func sortEdges(e []Edge) {
sort.Sort(edges(e))
@@ -50,6 +71,8 @@ func (e edges) Len() int { return len(e) }
func (e edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e edges) Less(i, j int) bool { return e[i].Cmp(e[j]) == -1 }
// TODO(rsned): Implement the slices.SortFunc interface.
// ShapeEdgeID is a unique identifier for an Edge within an ShapeIndex,
// consisting of a (shapeID, edgeID) pair.
type ShapeEdgeID struct {
@@ -59,9 +82,9 @@ type ShapeEdgeID struct {
// Cmp compares the two ShapeEdgeIDs and returns
//
// -1 if s < other
// 0 if s == other
// +1 if s > other
// -1 if s < other
// 0 if s == other
// +1 if s > other
//
// The two are compared first by shape id and then by edge id.
func (s ShapeEdgeID) Cmp(other ShapeEdgeID) int {
@@ -193,7 +216,7 @@ type Shape interface {
// Chain(i).start + Chain(i).length == NumEdges(), for i == NumChains()-1
Chain(chainID int) Chain
// ChainEdgeReturns the edge at offset "offset" within edge chain "chainID".
// ChainEdge returns the edge at offset "offset" within edge chain "chainID".
// Equivalent to "shape.Edge(shape.Chain(chainID).start + offset)"
// but more efficient.
ChainEdge(chainID, offset int) Edge
@@ -261,3 +284,9 @@ var (
_ Shape = &Polygon{}
_ Shape = &Polyline{}
)
// TODO(rsned): Remaining methods and types from C++
// ChainVertexIterator
// ChainVertexRange
// ChainIterator
// ChainRange

View File

@@ -20,8 +20,8 @@ import (
"sync"
"sync/atomic"
"github.com/golang/geo/r1"
"github.com/golang/geo/r2"
"github.com/blevesearch/geo/r1"
"github.com/blevesearch/geo/r2"
)
// CellRelation describes the possible relationships between a target cell
@@ -129,6 +129,14 @@ func (s *ShapeIndexCell) numEdges() int {
return e
}
// clipped returns the clipped shape at the given index. Shapes are kept sorted in
// increasing order of shape id.
//
// Requires: 0 <= i < len(shapes)
func (s *ShapeIndexCell) clipped(i int) *clippedShape {
return s.shapes[i]
}
// add adds the given clipped shape to this index cell.
func (s *ShapeIndexCell) add(c *clippedShape) {
// C++ uses a set, so it's ordered and unique. We don't currently catch
@@ -170,7 +178,7 @@ func (s *ShapeIndexCell) findByShapeID(shapeID int32) *clippedShape {
type faceEdge struct {
shapeID int32 // The ID of shape that this edge belongs to
edgeID int // Edge ID within that shape
maxLevel int // Not desirable to subdivide this edge beyond this level
MaxLevel int // Not desirable to subdivide this edge beyond this level
hasInterior bool // Belongs to a shape that has a dimension of 2
a, b r2.Point // The edge endpoints, clipped to a given face
edge Edge // The original edge.
@@ -197,10 +205,9 @@ const (
// ShapeIndexIterator is an iterator that provides low-level access to
// the cells of the index. Cells are returned in increasing order of CellID.
//
// for it := index.Iterator(); !it.Done(); it.Next() {
// fmt.Print(it.CellID())
// }
//
// for it := index.Iterator(); !it.Done(); it.Next() {
// fmt.Print(it.CellID())
// }
type ShapeIndexIterator struct {
index *ShapeIndex
position int
@@ -407,7 +414,7 @@ func newTracker() *tracker {
t := &tracker{
isActive: false,
b: trackerOrigin(),
nextCellID: CellIDFromFace(0).ChildBeginAtLevel(maxLevel),
nextCellID: CellIDFromFace(0).ChildBeginAtLevel(MaxLevel),
}
t.drawTo(Point{faceUVToXYZ(0, -1, -1).Normalize()}) // CellID curve start
@@ -571,12 +578,11 @@ const (
//
// Example showing how to build an index of Polylines:
//
// index := NewShapeIndex()
// for _, polyline := range polylines {
// index.Add(polyline);
// }
// // Now you can use a CrossingEdgeQuery or ClosestEdgeQuery here.
//
// index := NewShapeIndex()
// for _, polyline := range polylines {
// index.Add(polyline);
// }
// // Now you can use a CrossingEdgeQuery or ClosestEdgeQuery here.
type ShapeIndex struct {
// shapes is a map of shape ID to shape.
shapes map[int32]Shape
@@ -606,7 +612,7 @@ type ShapeIndex struct {
// the amount of entities added grows.
// - Often the index will never be queried, in which case we can save both
// the time and memory required to build it. Examples:
// + Loops that are created simply to pass to an Polygon. (We don't
// + Loops that are created simply to pass to a Polygon. (We don't
// need the Loop index, because Polygon builds its own index.)
// + Applications that load a database of geometry and then query only
// a small fraction of it.
@@ -661,6 +667,15 @@ func (s *ShapeIndex) End() *ShapeIndexIterator {
return NewShapeIndexIterator(s, IteratorEnd)
}
// Region returns a new ShapeIndexRegion for this ShapeIndex.
func (s *ShapeIndex) Region() *ShapeIndexRegion {
return &ShapeIndexRegion{
index: s,
containsQuery: NewContainsPointQuery(s, VertexModelSemiOpen),
iter: s.Iterator(),
}
}
// Len reports the number of Shapes in this index.
func (s *ShapeIndex) Len() int {
return len(s.shapes)
@@ -873,7 +888,7 @@ func (s *ShapeIndex) addShapeInternal(shapeID int32, allEdges [][]faceEdge, t *t
faceEdge.edgeID = e
faceEdge.edge = edge
faceEdge.maxLevel = maxLevelForEdge(edge)
faceEdge.MaxLevel = maxLevelForEdge(edge)
s.addFaceEdge(faceEdge, allEdges)
}
}
@@ -885,9 +900,9 @@ func (s *ShapeIndex) addFaceEdge(fe faceEdge, allEdges [][]faceEdge) {
// the edge of the face that they don't intersect any (padded) adjacent face.
if aFace == face(fe.edge.V1.Vector) {
x, y := validFaceXYZToUV(aFace, fe.edge.V0.Vector)
fe.a = r2.Point{x, y}
fe.a = r2.Point{X: x, Y: y}
x, y = validFaceXYZToUV(aFace, fe.edge.V1.Vector)
fe.b = r2.Point{x, y}
fe.b = r2.Point{X: x, Y: y}
maxUV := 1 - cellPadding
if math.Abs(fe.a.X) <= maxUV && math.Abs(fe.a.Y) <= maxUV &&
@@ -992,7 +1007,7 @@ func (s *ShapeIndex) skipCellRange(begin, end CellID, t *tracker, disjointFromIn
// given cell. disjointFromIndex is an optimization hint indicating that cellMap
// does not contain any entries that overlap the given cell.
func (s *ShapeIndex) updateEdges(pcell *PaddedCell, edges []*clippedEdge, t *tracker, disjointFromIndex bool) {
// This function is recursive with a maximum recursion depth of 30 (maxLevel).
// This function is recursive with a maximum recursion depth of 30 (MaxLevel).
// Incremental updates are handled as follows. All edges being added or
// removed are combined together in edges, and all shapes with interiors
@@ -1019,16 +1034,19 @@ func (s *ShapeIndex) updateEdges(pcell *PaddedCell, edges []*clippedEdge, t *tra
// the existing cell contents by absorbing the cell.
iter := s.Iterator()
r := iter.LocateCellID(pcell.id)
if r == Disjoint {
switch r {
case Disjoint:
disjointFromIndex = true
} else if r == Indexed {
case Indexed:
// Absorb the index cell by transferring its contents to edges and
// deleting it. We also start tracking the interior of any new shapes.
s.absorbIndexCell(pcell, iter, edges, t)
indexCellAbsorbed = true
disjointFromIndex = true
} else {
// DCHECK_EQ(SUBDIVIDED, r)
case Subdivided:
// TODO(rsned): Figure out the right way to deal with
// this case since we don't DCHECK.
// ABSL_DCHECK_EQ(SUBDIVIDED, r)
}
}
@@ -1140,7 +1158,7 @@ func (s *ShapeIndex) makeIndexCell(p *PaddedCell, edges []*clippedEdge, t *track
// Return false if there are too many such edges.
count := 0
for _, ce := range edges {
if p.Level() < ce.faceEdge.maxLevel {
if p.Level() < ce.faceEdge.MaxLevel {
count++
}
@@ -1331,7 +1349,7 @@ func (s *ShapeIndex) clipVBound(edge *clippedEdge, vEnd int, v float64) *clipped
return s.updateBound(edge, uEnd, u, vEnd, v)
}
// cliupVAxis returns the given edge clipped to within the boundaries of the middle
// clipVAxis returns the given edge clipped to within the boundaries of the middle
// interval along the v-axis, and adds the result to its children.
func (s *ShapeIndex) clipVAxis(edge *clippedEdge, middle r1.Interval) (a, b *clippedEdge) {
if edge.bound.Y.Hi <= middle.Lo {
@@ -1427,7 +1445,7 @@ func (s *ShapeIndex) absorbIndexCell(p *PaddedCell, iter *ShapeIndexIterator, ed
edgeID := clipped.edges[i]
edge.edgeID = edgeID
edge.edge = shape.Edge(edgeID)
edge.maxLevel = maxLevelForEdge(edge.edge)
edge.MaxLevel = maxLevelForEdge(edge.edge)
if edge.hasInterior {
t.testEdge(shapeID, edge.edge)
}
@@ -1459,9 +1477,11 @@ func (s *ShapeIndex) absorbIndexCell(p *PaddedCell, iter *ShapeIndexIterator, ed
}
// Update the edge list and delete this cell from the index.
edges, newEdges = newEdges, edges
// TODO(rsned): Figure out best fix for this. Linters are
// flagging the swap because newEdges is no longer used after
// this.
edges, newEdges = newEdges, edges // nolint
delete(s.cellMap, p.id)
// TODO(roberts): delete from s.Cells
}
// testAllEdges calls the trackers testEdge on all edges from shapes that have interiors.

View File

@@ -0,0 +1,133 @@
// Copyright 2023 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
// ShapeIndexRegion wraps a ShapeIndex and implements the Region interface.
// This allows RegionCoverer to work with ShapeIndexes as well as being
// able to be used by some of the Query types.
type ShapeIndexRegion struct {
index *ShapeIndex
containsQuery *ContainsPointQuery
iter *ShapeIndexIterator
}
// TODO(roberts): Uncomment once implementation is complete.
// Enforce Region interface satisfaction similar to other types that implement Region.
// var _ Region = (*ShapeIndexRegion)(nil)
// CapBound returns a bounding spherical cap for this collection of geometry.
// This is not guaranteed to be exact.
func (s *ShapeIndexRegion) CapBound() Cap {
cu := CellUnion(s.CellUnionBound())
return cu.CapBound()
}
// RectBound returns a bounding rectangle for this collection of geometry.
// The bounds are not guaranteed to be tight.
func (s *ShapeIndexRegion) RectBound() Rect {
cu := CellUnion(s.CellUnionBound())
return cu.RectBound()
}
// CellUnionBound returns the bounding CellUnion for this collection of geometry.
// This method currently returns at most 4 cells, unless the index spans
// multiple faces in which case it may return up to 6 cells.
func (s *ShapeIndexRegion) CellUnionBound() []CellID {
// We find the range of Cells spanned by the index and choose a level such
// that the entire index can be covered with just a few cells. There are
// two cases:
//
// - If the index intersects two or more faces, then for each intersected
// face we add one cell to the covering. Rather than adding the entire
// face, instead we add the smallest Cell that covers the ShapeIndex
// cells within that face.
//
// - If the index intersects only one face, then we first find the smallest
// cell S that contains the index cells (just like the case above).
// However rather than using the cell S itself, instead we repeat this
// process for each of its child cells. In other words, for each
// child cell C we add the smallest Cell C' that covers the index cells
// within C. This extra step is relatively cheap and produces much
// tighter coverings when the ShapeIndex consists of a small region
// near the center of a large Cell.
var cellIDs []CellID
// Find the last CellID in the index.
s.iter.End()
if !s.iter.Prev() {
return cellIDs // Empty index.
}
lastIndexID := s.iter.CellID()
s.iter.Begin()
if s.iter.CellID() != lastIndexID {
// The index has at least two cells. Choose a CellID level such that
// the entire index can be spanned with at most 6 cells (if the index
// spans multiple faces) or 4 cells (it the index spans a single face).
level, ok := s.iter.CellID().CommonAncestorLevel(lastIndexID)
if !ok {
// C++ returns -1 for no common level, ours returns 0. Set
// to -1 so the next ++ puts us at the same place as C++ does.
level = -1
}
level++
// For each cell C at the chosen level, we compute the smallest Cell
// that covers the ShapeIndex cells within C.
lastID := lastIndexID.Parent(level)
for id := s.iter.CellID().Parent(level); id != lastID; id = id.Next() {
// If the cell C does not contain any index cells, then skip it.
if id.RangeMax() < s.iter.CellID() {
continue
}
// Find the range of index cells contained by C and then shrink C so
// that it just covers those cells.
first := s.iter.CellID()
s.iter.seek(id.RangeMax().Next())
s.iter.Prev()
cellIDs = s.coverRange(first, s.iter.CellID(), cellIDs)
s.iter.Next()
}
}
return s.coverRange(s.iter.CellID(), lastIndexID, cellIDs)
}
// coverRange computes the smallest CellID that covers the Cell range (first, last)
// and returns the updated slice.
//
// This requires first and last have a common ancestor.
func (s *ShapeIndexRegion) coverRange(first, last CellID, cellIDs []CellID) []CellID {
// The range consists of a single index cell.
if first == last {
return append(cellIDs, first)
}
// Add the lowest common ancestor of the given range.
level, ok := first.CommonAncestorLevel(last)
if !ok {
return append(cellIDs, CellID(0))
}
return append(cellIDs, first.Parent(level))
}
// TODO(roberts): remaining methods
/*
// ContainsCell(target Cell) bool {
// IntersectsCell(target Cell) bool {
// ContainsPoint(p Point) bool {
// contains(id CellID, clipped clippedShape, p Point) bool {
// anyEdgeIntersects(clipped clippedShape, target Cell) bool {
*/

View File

@@ -52,6 +52,8 @@ func newRangeIterator(index *ShapeIndex) *rangeIterator {
func (r *rangeIterator) cellID() CellID { return r.it.CellID() }
func (r *rangeIterator) indexCell() *ShapeIndexCell { return r.it.IndexCell() }
func (r *rangeIterator) clipped() *clippedShape { return r.indexCell().clipped(0) }
func (r *rangeIterator) containsCenter() bool { return r.clipped().containsCenter }
func (r *rangeIterator) next() { r.it.Next(); r.refresh() }
func (r *rangeIterator) done() bool { return r.it.Done() }

View File

@@ -17,9 +17,11 @@ package s2
import (
"math"
"github.com/golang/geo/r3"
"github.com/blevesearch/geo/r3"
)
// TODO(rsned): Rename this to coords.go to match the C++
//
// This file contains documentation of the various coordinate systems used
// throughout the library. Most importantly, S2 defines a framework for
@@ -32,7 +34,7 @@ import (
// transformation is designed to make the cells at each level fairly uniform
// in size.
//
////////////////////////// S2 Cell Decomposition /////////////////////////
// /////////////////////// S2 Cell Decomposition /////////////////////////
//
// The following methods define the cube-to-sphere projection used by
// the Cell decomposition.
@@ -41,7 +43,7 @@ import (
// id, the following coordinate systems are used:
//
// (id)
// An CellID is a 64-bit encoding of a face and a Hilbert curve position
// A CellID is a 64-bit encoding of a face and a Hilbert curve position
// on that face. The Hilbert curve position implicitly encodes both the
// position of a cell and its subdivision level (see s2cellid.go).
//
@@ -88,7 +90,7 @@ import (
// (x, y, z)
// Direction vector (Point). Direction vectors are not necessarily unit
// length, and are often chosen to be points on the biunit cube
// [-1,+1]x[-1,+1]x[-1,+1]. They can be be normalized to obtain the
// [-1,+1]x[-1,+1]x[-1,+1]. They can be normalized to obtain the
// corresponding point on the unit sphere.
//
// (lat, lng)
@@ -144,14 +146,21 @@ import (
// implementations may offer other choices.
const (
// The maximum absolute error in U/V coordinates when converting from XYZ.
//
// The XYZ -> UV conversion is a single division per coordinate, which is
// promised to be at most 0.5*dblEpsilon absolute error for values with
// magnitude less than two.
maxXYZtoUVError = 0.5 * dblEpsilon
// maxSiTi is the maximum value of an si- or ti-coordinate.
// It is one shift more than maxSize. The range of valid (si,ti)
// It is one shift more than MaxSize. The range of valid (si,ti)
// values is [0..maxSiTi].
maxSiTi = maxSize << 1
maxSiTi = MaxSize << 1
)
// siTiToST converts an si- or ti-value to the corresponding s- or t-value.
// Value is capped at 1.0 because there is no DCHECK in Go.
// Value is capped at 1.0 because there is no ABSL_DCHECK in Go.
func siTiToST(si uint32) float64 {
if si > maxSiTi {
return 1.0
@@ -205,6 +214,19 @@ func face(r r3.Vector) int {
return int(f)
}
// ijToSTMin converts the i- or j-index of a leaf cell to the minimum corresponding
// s- or t-value contained by that cell. The argument must be in the range
// [0..2**30], i.e. up to one position beyond the normal range of valid leaf
// cell indices.
func ijToSTMin(i int) float64 {
return float64(i) / float64(MaxSize)
}
// stToIJ converts value in ST coordinates to a value in IJ coordinates.
func stToIJ(s float64) int {
return clampInt(int(math.Floor(MaxSize*s)), 0, MaxSize-1)
}
// validFaceXYZToUV given a valid face for the given point r (meaning that
// dot product of r with the face normal is positive), returns
// the corresponding u and v values, which may lie outside the range [-1,1].
@@ -236,17 +258,17 @@ func xyzToFaceUV(r r3.Vector) (f int, u, v float64) {
func faceUVToXYZ(face int, u, v float64) r3.Vector {
switch face {
case 0:
return r3.Vector{1, u, v}
return r3.Vector{X: 1, Y: u, Z: v}
case 1:
return r3.Vector{-u, 1, v}
return r3.Vector{X: -u, Y: 1, Z: v}
case 2:
return r3.Vector{-u, -v, 1}
return r3.Vector{X: -u, Y: -v, Z: 1}
case 3:
return r3.Vector{-1, -v, -u}
return r3.Vector{X: -1, Y: -v, Z: -u}
case 4:
return r3.Vector{v, -1, -u}
return r3.Vector{X: v, Y: -1, Z: -u}
default:
return r3.Vector{v, u, -1}
return r3.Vector{X: v, Y: u, Z: -1}
}
}
@@ -291,17 +313,17 @@ func faceXYZtoUVW(face int, p Point) Point {
// axes for the given face (see faceUVWAxes).
switch face {
case 0:
return Point{r3.Vector{p.Y, p.Z, p.X}}
return Point{r3.Vector{X: p.Y, Y: p.Z, Z: p.X}}
case 1:
return Point{r3.Vector{-p.X, p.Z, p.Y}}
return Point{r3.Vector{X: -p.X, Y: p.Z, Z: p.Y}}
case 2:
return Point{r3.Vector{-p.X, -p.Y, p.Z}}
return Point{r3.Vector{X: -p.X, Y: -p.Y, Z: p.Z}}
case 3:
return Point{r3.Vector{-p.Z, -p.Y, -p.X}}
return Point{r3.Vector{X: -p.Z, Y: -p.Y, Z: -p.X}}
case 4:
return Point{r3.Vector{-p.Z, p.X, -p.Y}}
return Point{r3.Vector{X: -p.Z, Y: p.X, Z: -p.Y}}
default:
return Point{r3.Vector{p.Y, p.X, -p.Z}}
return Point{r3.Vector{X: p.Y, Y: p.X, Z: -p.Z}}
}
}
@@ -322,8 +344,8 @@ func xyzToFaceSiTi(p Point) (face int, si, ti uint32, level int) {
// center. The si,ti values of 0 and maxSiTi need to be handled specially
// because they do not correspond to cell centers at any valid level; they
// are mapped to level -1 by the code at the end.
level = maxLevel - findLSBSetNonZero64(uint64(si|maxSiTi))
if level < 0 || level != maxLevel-findLSBSetNonZero64(uint64(ti|maxSiTi)) {
level = MaxLevel - findLSBSetNonZero64(uint64(si|maxSiTi))
if level < 0 || level != MaxLevel-findLSBSetNonZero64(uint64(ti|maxSiTi)) {
return face, si, ti, -1
}
@@ -346,17 +368,17 @@ func xyzToFaceSiTi(p Point) (face int, si, ti uint32, level int) {
func uNorm(face int, u float64) r3.Vector {
switch face {
case 0:
return r3.Vector{u, -1, 0}
return r3.Vector{X: u, Y: -1, Z: 0}
case 1:
return r3.Vector{1, u, 0}
return r3.Vector{X: 1, Y: u, Z: 0}
case 2:
return r3.Vector{1, 0, u}
return r3.Vector{X: 1, Y: 0, Z: u}
case 3:
return r3.Vector{-u, 0, 1}
return r3.Vector{X: -u, Y: 0, Z: 1}
case 4:
return r3.Vector{0, -u, 1}
return r3.Vector{X: 0, Y: -u, Z: 1}
default:
return r3.Vector{0, -1, -u}
return r3.Vector{X: 0, Y: -1, Z: -u}
}
}
@@ -366,28 +388,28 @@ func uNorm(face int, u float64) r3.Vector {
func vNorm(face int, v float64) r3.Vector {
switch face {
case 0:
return r3.Vector{-v, 0, 1}
return r3.Vector{X: -v, Y: 0, Z: 1}
case 1:
return r3.Vector{0, -v, 1}
return r3.Vector{X: 0, Y: -v, Z: 1}
case 2:
return r3.Vector{0, -1, -v}
return r3.Vector{X: 0, Y: -1, Z: -v}
case 3:
return r3.Vector{v, -1, 0}
return r3.Vector{X: v, Y: -1, Z: 0}
case 4:
return r3.Vector{1, v, 0}
return r3.Vector{X: 1, Y: v, Z: 0}
default:
return r3.Vector{1, 0, v}
return r3.Vector{X: 1, Y: 0, Z: v}
}
}
// faceUVWAxes are the U, V, and W axes for each face.
var faceUVWAxes = [6][3]Point{
{Point{r3.Vector{0, 1, 0}}, Point{r3.Vector{0, 0, 1}}, Point{r3.Vector{1, 0, 0}}},
{Point{r3.Vector{-1, 0, 0}}, Point{r3.Vector{0, 0, 1}}, Point{r3.Vector{0, 1, 0}}},
{Point{r3.Vector{-1, 0, 0}}, Point{r3.Vector{0, -1, 0}}, Point{r3.Vector{0, 0, 1}}},
{Point{r3.Vector{0, 0, -1}}, Point{r3.Vector{0, -1, 0}}, Point{r3.Vector{-1, 0, 0}}},
{Point{r3.Vector{0, 0, -1}}, Point{r3.Vector{1, 0, 0}}, Point{r3.Vector{0, -1, 0}}},
{Point{r3.Vector{0, 1, 0}}, Point{r3.Vector{1, 0, 0}}, Point{r3.Vector{0, 0, -1}}},
{Point{r3.Vector{X: 0, Y: 1, Z: 0}}, Point{r3.Vector{X: 0, Y: 0, Z: 1}}, Point{r3.Vector{X: 1, Y: 0, Z: 0}}},
{Point{r3.Vector{X: -1, Y: 0, Z: 0}}, Point{r3.Vector{X: 0, Y: 0, Z: 1}}, Point{r3.Vector{X: 0, Y: 1, Z: 0}}},
{Point{r3.Vector{X: -1, Y: 0, Z: 0}}, Point{r3.Vector{X: 0, Y: -1, Z: 0}}, Point{r3.Vector{X: 0, Y: 0, Z: 1}}},
{Point{r3.Vector{X: 0, Y: 0, Z: -1}}, Point{r3.Vector{X: 0, Y: -1, Z: 0}}, Point{r3.Vector{X: -1, Y: 0, Z: 0}}},
{Point{r3.Vector{X: 0, Y: 0, Z: -1}}, Point{r3.Vector{X: 1, Y: 0, Z: 0}}, Point{r3.Vector{X: 0, Y: -1, Z: 0}}},
{Point{r3.Vector{X: 0, Y: 1, Z: 0}}, Point{r3.Vector{X: 1, Y: 0, Z: 0}}, Point{r3.Vector{X: 0, Y: 0, Z: -1}}},
}
// faceUVWFaces are the precomputed neighbors of each face.
@@ -405,7 +427,7 @@ func uvwAxis(face, axis int) Point {
return faceUVWAxes[face][axis]
}
// uvwFaces returns the face in the (u,v,w) coordinate system on the given axis
// uvwFace returns the face in the (u,v,w) coordinate system on the given axis
// in the given direction.
func uvwFace(face, axis, direction int) int {
return faceUVWFaces[face][axis][direction]

View File

@@ -14,7 +14,7 @@
package s2
import "github.com/golang/geo/s1"
import "github.com/blevesearch/geo/s1"
// roundAngle returns the value rounded to nearest as an int32.
// This does not match C++ exactly for the case of x.5.

View File

@@ -93,5 +93,5 @@ func WedgeIntersects(a0, ab1, a2, b0, b2 Point) bool {
// Note that it's important to write these conditions as negatives
// (!OrderedCCW(a,b,c,o) rather than Ordered(c,b,a,o)) to get correct
// results when two vertices are the same.
return !(OrderedCCW(a0, b2, b0, ab1) && OrderedCCW(b0, a2, a0, ab1))
return !OrderedCCW(a0, b2, b0, ab1) || !OrderedCCW(b0, a2, a0, ab1)
}

View File

@@ -133,6 +133,11 @@ type interim struct {
numTermsPerPostingsList []int // key is postings list id
numLocsPerPostingsList []int // key is postings list id
// store terms that are unnecessary for the term dictionaries but needed in doc values
// eg - encoded geoshapes
// docNum -> fieldID -> term
extraDocValues map[int]map[uint16][]byte
builder *vellum.Builder
builderBuf bytes.Buffer
@@ -188,6 +193,7 @@ func (s *interim) reset() (err error) {
s.tmp1 = s.tmp1[:0]
s.lastNumDocs = 0
s.lastOutSize = 0
s.extraDocValues = nil
return err
}
@@ -308,7 +314,7 @@ func (s *interim) prepareDicts() {
var totTFs int
var totLocs int
visitField := func(field index.Field) {
visitField := func(field index.Field, docNum int) {
fieldID := uint16(s.getOrDefineField(field.Name()))
dict := s.Dicts[fieldID]
@@ -339,16 +345,28 @@ func (s *interim) prepareDicts() {
totTFs += len(tfs)
s.DictKeys[fieldID] = dictKeys
if f, ok := field.(index.GeoShapeField); ok {
if _, exists := s.extraDocValues[docNum]; !exists {
s.extraDocValues[docNum] = make(map[uint16][]byte)
}
s.extraDocValues[docNum][fieldID] = f.EncodedShape()
}
}
for _, result := range s.results {
if s.extraDocValues == nil {
s.extraDocValues = map[int]map[uint16][]byte{}
}
for docNum, result := range s.results {
// walk each composite field
result.VisitComposite(func(field index.CompositeField) {
visitField(field)
visitField(field, docNum)
})
// walk each field
result.VisitFields(visitField)
result.VisitFields(func(field index.Field) {
visitField(field, docNum)
})
}
numPostingsLists := pidNext
@@ -747,6 +765,11 @@ func (s *interim) writeDicts() (fdvIndexOffset uint64, dictOffsets []uint64, err
// write the field doc values
if s.IncludeDocValues[fieldID] {
for docNum, docTerms := range docTermMap {
if fieldTermMap, ok := s.extraDocValues[docNum]; ok {
if sTerm, ok := fieldTermMap[uint16(fieldID)]; ok {
docTerms = append(append(docTerms, sTerm...), termSeparator)
}
}
if len(docTerms) > 0 {
err = fdvEncoder.Add(uint64(docNum), docTerms)
if err != nil {

View File

@@ -131,6 +131,11 @@ type interim struct {
numTermsPerPostingsList []int // key is postings list id
numLocsPerPostingsList []int // key is postings list id
// store terms that are unnecessary for the term dictionaries but needed in doc values
// eg - encoded geoshapes
// docNum -> fieldID -> term
extraDocValues map[int]map[uint16][]byte
builder *vellum.Builder
builderBuf bytes.Buffer
@@ -186,6 +191,7 @@ func (s *interim) reset() (err error) {
s.tmp1 = s.tmp1[:0]
s.lastNumDocs = 0
s.lastOutSize = 0
s.extraDocValues = nil
return err
}
@@ -306,7 +312,7 @@ func (s *interim) prepareDicts() {
var totTFs int
var totLocs int
visitField := func(field index.Field) {
visitField := func(field index.Field, docNum int) {
fieldID := uint16(s.getOrDefineField(field.Name()))
dict := s.Dicts[fieldID]
@@ -337,16 +343,28 @@ func (s *interim) prepareDicts() {
totTFs += len(tfs)
s.DictKeys[fieldID] = dictKeys
if f, ok := field.(index.GeoShapeField); ok {
if _, exists := s.extraDocValues[docNum]; !exists {
s.extraDocValues[docNum] = make(map[uint16][]byte)
}
s.extraDocValues[docNum][fieldID] = f.EncodedShape()
}
}
for _, result := range s.results {
if s.extraDocValues == nil {
s.extraDocValues = map[int]map[uint16][]byte{}
}
for docNum, result := range s.results {
// walk each composite field
result.VisitComposite(func(field index.CompositeField) {
visitField(field)
visitField(field, docNum)
})
// walk each field
result.VisitFields(visitField)
result.VisitFields(func(field index.Field) {
visitField(field, docNum)
})
}
numPostingsLists := pidNext
@@ -760,6 +778,11 @@ func (s *interim) writeDicts() (fdvIndexOffset uint64, dictOffsets []uint64, err
fdvEncoder := newChunkedContentCoder(chunkSize, uint64(len(s.results)-1), s.w, false)
if s.IncludeDocValues[fieldID] {
for docNum, docTerms := range docTermMap {
if fieldTermMap, ok := s.extraDocValues[docNum]; ok {
if sTerm, ok := fieldTermMap[uint16(fieldID)]; ok {
docTerms = append(append(docTerms, sTerm...), termSeparator)
}
}
if len(docTerms) > 0 {
err = fdvEncoder.Add(uint64(docNum), docTerms)
if err != nil {

View File

@@ -131,6 +131,11 @@ type interim struct {
numTermsPerPostingsList []int // key is postings list id
numLocsPerPostingsList []int // key is postings list id
// store terms that are unnecessary for the term dictionaries but needed in doc values
// eg - encoded geoshapes
// docNum -> fieldID -> term
extraDocValues map[int]map[uint16][]byte
builder *vellum.Builder
builderBuf bytes.Buffer
@@ -186,6 +191,7 @@ func (s *interim) reset() (err error) {
s.tmp1 = s.tmp1[:0]
s.lastNumDocs = 0
s.lastOutSize = 0
s.extraDocValues = nil
return err
}
@@ -306,7 +312,7 @@ func (s *interim) prepareDicts() {
var totTFs int
var totLocs int
visitField := func(field index.Field) {
visitField := func(field index.Field, docNum int) {
fieldID := uint16(s.getOrDefineField(field.Name()))
dict := s.Dicts[fieldID]
@@ -337,16 +343,28 @@ func (s *interim) prepareDicts() {
totTFs += len(tfs)
s.DictKeys[fieldID] = dictKeys
if f, ok := field.(index.GeoShapeField); ok {
if _, exists := s.extraDocValues[docNum]; !exists {
s.extraDocValues[docNum] = make(map[uint16][]byte)
}
s.extraDocValues[docNum][fieldID] = f.EncodedShape()
}
}
for _, result := range s.results {
if s.extraDocValues == nil {
s.extraDocValues = map[int]map[uint16][]byte{}
}
for docNum, result := range s.results {
// walk each composite field
result.VisitComposite(func(field index.CompositeField) {
visitField(field)
visitField(field, docNum)
})
// walk each field
result.VisitFields(visitField)
result.VisitFields(func(field index.Field) {
visitField(field, docNum)
})
}
numPostingsLists := pidNext
@@ -760,6 +778,11 @@ func (s *interim) writeDicts() (fdvIndexOffset uint64, dictOffsets []uint64, err
fdvEncoder := newChunkedContentCoder(chunkSize, uint64(len(s.results)-1), s.w, false)
if s.IncludeDocValues[fieldID] {
for docNum, docTerms := range docTermMap {
if fieldTermMap, ok := s.extraDocValues[docNum]; ok {
if sTerm, ok := fieldTermMap[uint16(fieldID)]; ok {
docTerms = append(append(docTerms, sTerm...), termSeparator)
}
}
if len(docTerms) > 0 {
err = fdvEncoder.Add(uint64(docNum), docTerms)
if err != nil {

View File

@@ -131,6 +131,11 @@ type interim struct {
numTermsPerPostingsList []int // key is postings list id
numLocsPerPostingsList []int // key is postings list id
// store terms that are unnecessary for the term dictionaries but needed in doc values
// eg - encoded geoshapes
// docNum -> fieldID -> term
extraDocValues map[int]map[uint16][]byte
builder *vellum.Builder
builderBuf bytes.Buffer
@@ -186,6 +191,7 @@ func (s *interim) reset() (err error) {
s.tmp1 = s.tmp1[:0]
s.lastNumDocs = 0
s.lastOutSize = 0
s.extraDocValues = nil
return err
}
@@ -306,7 +312,7 @@ func (s *interim) prepareDicts() {
var totTFs int
var totLocs int
visitField := func(field index.Field) {
visitField := func(field index.Field, docNum int) {
fieldID := uint16(s.getOrDefineField(field.Name()))
dict := s.Dicts[fieldID]
@@ -337,16 +343,28 @@ func (s *interim) prepareDicts() {
totTFs += len(tfs)
s.DictKeys[fieldID] = dictKeys
if f, ok := field.(index.GeoShapeField); ok {
if _, exists := s.extraDocValues[docNum]; !exists {
s.extraDocValues[docNum] = make(map[uint16][]byte)
}
s.extraDocValues[docNum][fieldID] = f.EncodedShape()
}
}
for _, result := range s.results {
if s.extraDocValues == nil {
s.extraDocValues = map[int]map[uint16][]byte{}
}
for docNum, result := range s.results {
// walk each composite field
result.VisitComposite(func(field index.CompositeField) {
visitField(field)
visitField(field, docNum)
})
// walk each field
result.VisitFields(visitField)
result.VisitFields(func(field index.Field) {
visitField(field, docNum)
})
}
numPostingsLists := pidNext
@@ -760,6 +778,11 @@ func (s *interim) writeDicts() (fdvIndexOffset uint64, dictOffsets []uint64, err
fdvEncoder := newChunkedContentCoder(chunkSize, uint64(len(s.results)-1), s.w, false)
if s.IncludeDocValues[fieldID] {
for docNum, docTerms := range docTermMap {
if fieldTermMap, ok := s.extraDocValues[docNum]; ok {
if sTerm, ok := fieldTermMap[uint16(fieldID)]; ok {
docTerms = append(append(docTerms, sTerm...), termSeparator)
}
}
if len(docTerms) > 0 {
err = fdvEncoder.Add(uint64(docNum), docTerms)
if err != nil {

View File

@@ -136,6 +136,11 @@ type interim struct {
numTermsPerPostingsList []int // key is postings list id
numLocsPerPostingsList []int // key is postings list id
// store terms that are unnecessary for the term dictionaries but needed in doc values
// eg - encoded geoshapes
// docNum -> fieldID -> term
extraDocValues map[int]map[uint16][]byte
builder *vellum.Builder
builderBuf bytes.Buffer
@@ -193,6 +198,7 @@ func (s *interim) reset() (err error) {
s.tmp1 = s.tmp1[:0]
s.lastNumDocs = 0
s.lastOutSize = 0
s.extraDocValues = nil
// reset the bytes written stat count
// to avoid leaking of bytesWritten across reuse cycles.
@@ -317,7 +323,7 @@ func (s *interim) prepareDicts() {
var totTFs int
var totLocs int
visitField := func(field index.Field) {
visitField := func(field index.Field, docNum int) {
fieldID := uint16(s.getOrDefineField(field.Name()))
dict := s.Dicts[fieldID]
@@ -348,16 +354,28 @@ func (s *interim) prepareDicts() {
totTFs += len(tfs)
s.DictKeys[fieldID] = dictKeys
if f, ok := field.(index.GeoShapeField); ok {
if _, exists := s.extraDocValues[docNum]; !exists {
s.extraDocValues[docNum] = make(map[uint16][]byte)
}
s.extraDocValues[docNum][fieldID] = f.EncodedShape()
}
}
for _, result := range s.results {
if s.extraDocValues == nil {
s.extraDocValues = map[int]map[uint16][]byte{}
}
for docNum, result := range s.results {
// walk each composite field
result.VisitComposite(func(field index.CompositeField) {
visitField(field)
visitField(field, docNum)
})
// walk each field
result.VisitFields(visitField)
result.VisitFields(func(field index.Field) {
visitField(field, docNum)
})
}
numPostingsLists := pidNext
@@ -796,6 +814,11 @@ func (s *interim) writeDicts() (fdvIndexOffset uint64, dictOffsets []uint64, err
fdvEncoder := newChunkedContentCoder(chunkSize, uint64(len(s.results)-1), s.w, false)
if s.IncludeDocValues[fieldID] {
for docNum, docTerms := range docTermMap {
if fieldTermMap, ok := s.extraDocValues[docNum]; ok {
if sTerm, ok := fieldTermMap[uint16(fieldID)]; ok {
docTerms = append(append(docTerms, sTerm...), termSeparator)
}
}
if len(docTerms) > 0 {
err = fdvEncoder.Add(uint64(docNum), docTerms)
if err != nil {

202
vendor/github.com/golang/geo/LICENSE generated vendored
View File

@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

32
vendor/modules.txt vendored
View File

@@ -161,7 +161,7 @@ github.com/bitly/go-simplejson
# github.com/bits-and-blooms/bitset v1.22.0
## explicit; go 1.16
github.com/bits-and-blooms/bitset
# github.com/blevesearch/bleve/v2 v2.5.0
# github.com/blevesearch/bleve/v2 v2.5.1
## explicit; go 1.23
github.com/blevesearch/bleve/v2
github.com/blevesearch/bleve/v2/analysis
@@ -203,12 +203,16 @@ github.com/blevesearch/bleve/v2/search/scorer
github.com/blevesearch/bleve/v2/search/searcher
github.com/blevesearch/bleve/v2/size
github.com/blevesearch/bleve/v2/util
# github.com/blevesearch/bleve_index_api v1.2.7
# github.com/blevesearch/bleve_index_api v1.2.8
## explicit; go 1.21
github.com/blevesearch/bleve_index_api
# github.com/blevesearch/geo v0.1.20
## explicit; go 1.18
# github.com/blevesearch/geo v0.2.3
## explicit; go 1.21.0
github.com/blevesearch/geo/geojson
github.com/blevesearch/geo/r1
github.com/blevesearch/geo/r2
github.com/blevesearch/geo/r3
github.com/blevesearch/geo/s1
github.com/blevesearch/geo/s2
# github.com/blevesearch/go-faiss v1.0.25
## explicit; go 1.21
@@ -222,7 +226,7 @@ github.com/blevesearch/gtreap
# github.com/blevesearch/mmap-go v1.0.4
## explicit; go 1.13
github.com/blevesearch/mmap-go
# github.com/blevesearch/scorch_segment_api/v2 v2.3.9
# github.com/blevesearch/scorch_segment_api/v2 v2.3.10
## explicit; go 1.21
github.com/blevesearch/scorch_segment_api/v2
# github.com/blevesearch/segment v0.9.1
@@ -241,22 +245,22 @@ github.com/blevesearch/vellum
github.com/blevesearch/vellum/levenshtein
github.com/blevesearch/vellum/regexp
github.com/blevesearch/vellum/utf8
# github.com/blevesearch/zapx/v11 v11.4.1
# github.com/blevesearch/zapx/v11 v11.4.2
## explicit; go 1.21
github.com/blevesearch/zapx/v11
# github.com/blevesearch/zapx/v12 v12.4.1
# github.com/blevesearch/zapx/v12 v12.4.2
## explicit; go 1.21
github.com/blevesearch/zapx/v12
# github.com/blevesearch/zapx/v13 v13.4.1
# github.com/blevesearch/zapx/v13 v13.4.2
## explicit; go 1.21
github.com/blevesearch/zapx/v13
# github.com/blevesearch/zapx/v14 v14.4.1
# github.com/blevesearch/zapx/v14 v14.4.2
## explicit; go 1.21
github.com/blevesearch/zapx/v14
# github.com/blevesearch/zapx/v15 v15.4.1
# github.com/blevesearch/zapx/v15 v15.4.2
## explicit; go 1.21
github.com/blevesearch/zapx/v15
# github.com/blevesearch/zapx/v16 v16.2.2
# github.com/blevesearch/zapx/v16 v16.2.3
## explicit; go 1.21
github.com/blevesearch/zapx/v16
# github.com/bluele/gcache v0.0.2
@@ -671,12 +675,6 @@ github.com/golang-jwt/jwt/v4
# github.com/golang-jwt/jwt/v5 v5.2.2
## explicit; go 1.18
github.com/golang-jwt/jwt/v5
# github.com/golang/geo v0.0.0-20210211234256-740aa86cb551
## explicit; go 1.12
github.com/golang/geo/r1
github.com/golang/geo/r2
github.com/golang/geo/r3
github.com/golang/geo/s1
# github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
## explicit
github.com/golang/groupcache/lru