mirror of
https://github.com/dolthub/dolt.git
synced 2026-02-08 02:59:37 -06:00
Merge pull request #2388 from dolthub/aaron/nbs-table-reader-limit-read-batch-size
go/store/nbs: Fix bug in table_reader where max read size was 128GB instead of 128MB.
This commit is contained in:
@@ -41,7 +41,7 @@ import (
|
||||
)
|
||||
|
||||
// Do not read more than 128MB at a time.
|
||||
const maxReadSize = 128 * 1024 * 1024 * 1024
|
||||
const maxReadSize = 128 * 1024 * 1024
|
||||
|
||||
// CompressedChunk represents a chunk of data in a table file which is still compressed via snappy.
|
||||
type CompressedChunk struct {
|
||||
|
||||
@@ -115,7 +115,7 @@ func TestCanReadAhead(t *testing.T) {
|
||||
testCase{offsetRec{offset: 8191, length: 2048}, 0, 4096, 4096, expected{end: 10239, can: true}},
|
||||
testCase{offsetRec{offset: 8191, length: 2048}, 0, 4096, 2048, expected{end: 4096, can: false}},
|
||||
testCase{offsetRec{offset: 2048, length: 2048}, 0, 4096, 2048, expected{end: 4096, can: true}},
|
||||
testCase{offsetRec{offset: 137438953472, length: 2048}, 0, 137438953472, 4096, expected{end: 137438953472, can: false}},
|
||||
testCase{offsetRec{offset: (1 << 27), length: 2048}, 0, 128 * 1024 * 1024, 4096, expected{end: 134217728, can: false}},
|
||||
} {
|
||||
end, can := canReadAhead(c.rec, c.start, c.end, c.blockSize)
|
||||
assert.Equal(t, c.ex.end, end)
|
||||
|
||||
Reference in New Issue
Block a user