Merge pull request #2388 from dolthub/aaron/nbs-table-reader-limit-read-batch-size

go/store/nbs: Fix bug in table_reader where max read size was 128GB instead of 128MB.
This commit is contained in:
Aaron Son
2021-11-15 12:37:37 -08:00
committed by GitHub
2 changed files with 2 additions and 2 deletions

View File

@@ -41,7 +41,7 @@ import (
)
// Do not read more than 128MB at a time.
const maxReadSize = 128 * 1024 * 1024 * 1024
const maxReadSize = 128 * 1024 * 1024
// CompressedChunk represents a chunk of data in a table file which is still compressed via snappy.
type CompressedChunk struct {

View File

@@ -115,7 +115,7 @@ func TestCanReadAhead(t *testing.T) {
testCase{offsetRec{offset: 8191, length: 2048}, 0, 4096, 4096, expected{end: 10239, can: true}},
testCase{offsetRec{offset: 8191, length: 2048}, 0, 4096, 2048, expected{end: 4096, can: false}},
testCase{offsetRec{offset: 2048, length: 2048}, 0, 4096, 2048, expected{end: 4096, can: true}},
testCase{offsetRec{offset: 137438953472, length: 2048}, 0, 137438953472, 4096, expected{end: 137438953472, can: false}},
testCase{offsetRec{offset: (1 << 27), length: 2048}, 0, 128 * 1024 * 1024, 4096, expected{end: 134217728, can: false}},
} {
end, can := canReadAhead(c.rec, c.start, c.end, c.blockSize)
assert.Equal(t, c.ex.end, end)