Merge pull request #2211 from Juneezee/deprecate-ioutil

refactor: move from io/ioutil to io and os package
This commit is contained in:
Zach Musgrave
2021-10-04 14:10:04 -07:00
committed by GitHub
59 changed files with 132 additions and 166 deletions
+2 -2
View File
@@ -17,7 +17,7 @@ package tblcmds
import (
"context"
"fmt"
"io/ioutil"
"io"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/cmd/dolt/commands"
@@ -91,7 +91,7 @@ func (cmd CpCmd) Exec(ctx context.Context, commandStr string, args []string, dEn
queryStr = fmt.Sprintf("%sCREATE TABLE `%s` LIKE `%s`;", queryStr, newTbl, oldTbl)
queryStr = fmt.Sprintf("%sINSERT INTO `%s` SELECT * FROM `%s`;", queryStr, newTbl, oldTbl)
cli.CliOut = ioutil.Discard // display nothing on success
cli.CliOut = io.Discard // display nothing on success
return commands.SqlCmd{}.Exec(ctx, "", []string{
fmt.Sprintf("--%s", commands.BatchFlag),
fmt.Sprintf(`--%s`, commands.QueryFlag),
+2 -2
View File
@@ -17,7 +17,7 @@ package tblcmds
import (
"context"
"fmt"
"io/ioutil"
"io"
eventsapi "github.com/dolthub/dolt/go/gen/proto/dolt/services/eventsapi/v1alpha1"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
@@ -90,7 +90,7 @@ func (cmd RmCmd) Exec(ctx context.Context, commandStr string, args []string, dEn
queryStr = fmt.Sprintf("%sDROP TABLE `%s`;", queryStr, tableName)
}
cli.CliOut = ioutil.Discard // display nothing on success
cli.CliOut = io.Discard // display nothing on success
return commands.SqlCmd{}.Exec(ctx, "", []string{
fmt.Sprintf("--%s", commands.BatchFlag),
fmt.Sprintf(`--%s`, commands.QueryFlag),
+1 -2
View File
@@ -16,7 +16,6 @@ package main
import (
"fmt"
"io/ioutil"
"os"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
@@ -28,7 +27,7 @@ import (
func canMoveTempFile() bool {
const testfile = "./testfile"
f, err := ioutil.TempFile("", "")
f, err := os.CreateTemp("", "")
if err != nil {
return false
+2 -2
View File
@@ -17,7 +17,7 @@ package main
import (
"bufio"
"fmt"
"io/ioutil"
"io"
"log"
"os"
@@ -30,7 +30,7 @@ func main() {
// Because this is a git smudge filter, the pointer file contents
// are read through stdin.
r := bufio.NewReader(os.Stdin)
bs, err := ioutil.ReadAll(r)
bs, err := io.ReadAll(r)
if err != nil {
log.Fatal(err)
}
+3 -3
View File
@@ -18,7 +18,7 @@ package config
import (
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/dolthub/dolt/go/cmd/git-dolt/env"
@@ -70,7 +70,7 @@ func Parse(c string) (GitDoltConfig, error) {
// Load loads a GitDoltConfig from the pointer file with the given filename.
func Load(ptrFname string) (GitDoltConfig, error) {
ptrFname = utils.EnsureSuffix(ptrFname, ".git-dolt")
ptrData, err := ioutil.ReadFile(ptrFname)
ptrData, err := os.ReadFile(ptrFname)
if err != nil {
return GitDoltConfig{}, fmt.Errorf("can't find pointer file %s", ptrFname)
}
@@ -87,7 +87,7 @@ func Load(ptrFname string) (GitDoltConfig, error) {
// creating or overwriting it with the given contents.
func Write(ptrFname string, ptrContents string) error {
ptrFname = utils.EnsureSuffix(ptrFname, ".git-dolt")
if err := ioutil.WriteFile(ptrFname, []byte(ptrContents), 0644); err != nil {
if err := os.WriteFile(ptrFname, []byte(ptrContents), 0644); err != nil {
return fmt.Errorf("error writing git-dolt pointer file at %s: %v", ptrFname, err)
}
@@ -15,7 +15,6 @@
package utils
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
@@ -27,7 +26,7 @@ import (
func TestFindGitConfigDir(t *testing.T) {
// Setup
tmpDir, err := ioutil.TempDir("", "git-dolt-test")
tmpDir, err := os.MkdirTemp("", "git-dolt-test")
if err != nil {
t.Errorf("Error creating temp directory: %v", err)
}
+1 -2
View File
@@ -18,7 +18,6 @@ import (
"encoding/base64"
"encoding/json"
"io"
"io/ioutil"
"path/filepath"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
@@ -98,7 +97,7 @@ func JWKCredsWrite(wr io.Writer, dc DoltCreds) error {
}
func JWKCredsRead(rd io.Reader) (DoltCreds, error) {
data, err := ioutil.ReadAll(rd)
data, err := io.ReadAll(rd)
if err != nil {
return DoltCreds{}, err
+2 -2
View File
@@ -16,7 +16,7 @@ package doltdb
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"testing"
@@ -226,7 +226,7 @@ func TestLoadBadLocalFSRepo(t *testing.T) {
}
contents := []byte("not a directory")
ioutil.WriteFile(filepath.Join(testDir, dbfactory.DoltDataDir), contents, 0644)
os.WriteFile(filepath.Join(testDir, dbfactory.DoltDataDir), contents, 0644)
ddb, err := LoadDoltDB(context.Background(), types.Format_Default, LocalDirDoltDB, filesys.LocalFS)
assert.Nil(t, ddb, "Should return nil when loading a non-directory data dir file")
@@ -17,7 +17,6 @@ package testcommands
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
@@ -58,12 +57,12 @@ const (
// TODO this is not a proper builder, dbs need to be added before remotes
func NewMultiRepoTestSetup(t *testing.T) *MultiRepoTestSetup {
dir, err := ioutil.TempDir("", "")
dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
homeDir, err := ioutil.TempDir(dir, homePrefix)
homeDir, err := os.MkdirTemp(dir, homePrefix)
if err != nil {
t.Fatal(err)
}
+3 -3
View File
@@ -17,7 +17,7 @@ package env
import (
"context"
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
@@ -173,10 +173,10 @@ func TestMigrateWorkingSet(t *testing.T) {
// TODO: t.TempDir breaks on windows because of automatic cleanup (files still in use)
// dir := t.TempDir()
working, err := ioutil.TempDir("", "TestMigrateWorkingSet*")
working, err := os.MkdirTemp("", "TestMigrateWorkingSet*")
require.NoError(t, err)
homeDir, err := ioutil.TempDir("", "TestMigrateWorkingSet*")
homeDir, err := os.MkdirTemp("", "TestMigrateWorkingSet*")
require.NoError(t, err)
dEnv := createFileTestEnv(t, working, homeDir)
@@ -18,7 +18,6 @@ import (
"context"
"errors"
"io"
"io/ioutil"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
@@ -62,12 +61,12 @@ func (dl StreamDataLocation) NewReader(ctx context.Context, root *doltdb.RootVal
}
}
rd, err := csv.NewCSVReader(root.VRW().Format(), ioutil.NopCloser(dl.Reader), csv.NewCSVInfo().SetDelim(delim))
rd, err := csv.NewCSVReader(root.VRW().Format(), io.NopCloser(dl.Reader), csv.NewCSVInfo().SetDelim(delim))
return rd, false, err
case PsvFile:
rd, err := csv.NewCSVReader(root.VRW().Format(), ioutil.NopCloser(dl.Reader), csv.NewCSVInfo().SetDelim("|"))
rd, err := csv.NewCSVReader(root.VRW().Format(), io.NopCloser(dl.Reader), csv.NewCSVInfo().SetDelim("|"))
return rd, false, err
}
@@ -18,7 +18,7 @@ import (
"bytes"
"context"
"fmt"
"io/ioutil"
"io"
"strconv"
"strings"
"sync"
@@ -85,7 +85,7 @@ func TestPipeline(t *testing.T) {
func() {
csvInfo := &csv.CSVFileInfo{Delim: ",", HasHeaderLine: true, Columns: nil, EscapeQuotes: true}
rd, _ := csv.NewCSVReader(types.Format_Default, ioutil.NopCloser(buf), csvInfo)
rd, _ := csv.NewCSVReader(types.Format_Default, io.NopCloser(buf), csvInfo)
wr, _ := csv.NewCSVWriter(iohelp.NopWrCloser(outBuf), schOut, csvInfo)
tc := NewTransformCollection(
@@ -123,7 +123,7 @@ func TestAddingStages(t *testing.T) {
func() {
csvInfo := &csv.CSVFileInfo{Delim: ",", HasHeaderLine: true, Columns: nil, EscapeQuotes: true}
rd, _ := csv.NewCSVReader(types.Format_Default, ioutil.NopCloser(buf), csvInfo)
rd, _ := csv.NewCSVReader(types.Format_Default, io.NopCloser(buf), csvInfo)
wr, _ := csv.NewCSVWriter(iohelp.NopWrCloser(outBuf), schOut, csvInfo)
tc := NewTransformCollection(
@@ -193,7 +193,7 @@ Don,Beddoe,Bewitched (episode Humbug Not to Be Spoken Here - Season 4),1967,true
func() {
csvInfo := &csv.CSVFileInfo{Delim: ",", HasHeaderLine: true, Columns: nil, EscapeQuotes: true}
rd, _ := csv.NewCSVReader(types.Format_Default, ioutil.NopCloser(buf), csvInfo)
rd, _ := csv.NewCSVReader(types.Format_Default, io.NopCloser(buf), csvInfo)
wr, _ := csv.NewCSVWriter(iohelp.NopWrCloser(outBuf), schOut, csvInfo)
addedStages := []NamedTransform{
@@ -281,7 +281,7 @@ func TestAbort(t *testing.T) {
func() {
csvInfo := &csv.CSVFileInfo{Delim: ",", HasHeaderLine: true, Columns: nil, EscapeQuotes: true}
rd, _ := csv.NewCSVReader(types.Format_Default, ioutil.NopCloser(buf), csvInfo)
rd, _ := csv.NewCSVReader(types.Format_Default, io.NopCloser(buf), csvInfo)
wr, _ := csv.NewCSVWriter(iohelp.NopWrCloser(outBuf), schOut, csvInfo)
var wg = sync.WaitGroup{}
@@ -17,7 +17,7 @@ package xlsx
import (
"context"
"fmt"
"io/ioutil"
"io"
"os"
"reflect"
"testing"
@@ -91,7 +91,7 @@ func getBytesFromXlsx() []byte {
}
defer f.Close()
bs, err := ioutil.ReadAll(f)
bs, err := io.ReadAll(f)
if err != nil {
panic(err)
}
@@ -19,7 +19,6 @@ import (
"context"
"errors"
"io"
"io/ioutil"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
@@ -42,7 +41,7 @@ type XLSXReader struct {
func OpenXLSXReaderFromBinary(ctx context.Context, vrw types.ValueReadWriter, r io.ReadCloser, info *XLSXFileInfo) (*XLSXReader, error) {
br := bufio.NewReaderSize(r, ReadBufSize)
contents, err := ioutil.ReadAll(r)
contents, err := io.ReadAll(r)
if err != nil {
return nil, err
}
+2 -3
View File
@@ -16,7 +16,6 @@ package editor
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
@@ -28,7 +27,7 @@ import (
//OpenCommitEditor allows user to write/edit commit message in temporary file
func OpenCommitEditor(ed string, initialContents string) (string, error) {
filename := filepath.Join(os.TempDir(), uuid.New().String())
err := ioutil.WriteFile(filename, []byte(initialContents), os.ModePerm)
err := os.WriteFile(filename, []byte(initialContents), os.ModePerm)
if err != nil {
return "", err
@@ -53,7 +52,7 @@ func OpenCommitEditor(ed string, initialContents string) (string, error) {
fmt.Printf("Waiting for command to finish.\n")
err = cmd.Wait()
data, err := ioutil.ReadFile(filename)
data, err := os.ReadFile(filename)
if err != nil {
return "", err
+2 -3
View File
@@ -18,7 +18,6 @@ import (
"bytes"
"errors"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
@@ -257,7 +256,7 @@ func (fs *InMemFS) OpenForRead(fp string) (io.ReadCloser, error) {
fileObj := fs.objs[fp].(*memFile)
buf := bytes.NewReader(fileObj.data)
return ioutil.NopCloser(buf), nil
return io.NopCloser(buf), nil
}
// ReadFile reads the entire contents of a file
@@ -269,7 +268,7 @@ func (fs *InMemFS) ReadFile(fp string) ([]byte, error) {
return nil, err
}
return ioutil.ReadAll(r)
return io.ReadAll(r)
}
type inMemFSWriteCloser struct {
+10 -6
View File
@@ -18,7 +18,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"time"
@@ -85,14 +84,19 @@ func (fs *localFS) Iter(path string, recursive bool, cb FSIterCB) error {
}
if !recursive {
info, err := ioutil.ReadDir(path)
dirEntries, err := os.ReadDir(path)
if err != nil {
return err
}
for _, curr := range info {
stop := cb(filepath.Join(path, curr.Name()), curr.Size(), curr.IsDir())
for _, entry := range dirEntries {
fi, err := entry.Info()
if err != nil {
return err
}
stop := cb(filepath.Join(path, fi.Name()), fi.Size(), fi.IsDir())
if stop {
return nil
@@ -158,7 +162,7 @@ func (fs *localFS) ReadFile(fp string) ([]byte, error) {
return nil, err
}
return ioutil.ReadFile(fp)
return os.ReadFile(fp)
}
// OpenForWrite opens a file for writing. The file will be created if it does not exist, and if it does exist
@@ -184,7 +188,7 @@ func (fs *localFS) WriteFile(fp string, data []byte) error {
return err
}
return ioutil.WriteFile(fp, data, os.ModePerm)
return os.WriteFile(fp, data, os.ModePerm)
}
// MkDirs creates a folder and all the parent folders that are necessary to create it.
+1 -2
View File
@@ -16,7 +16,6 @@ package test
import (
"io"
"io/ioutil"
"os"
"path/filepath"
"testing"
@@ -39,7 +38,7 @@ func TestLDTestUtils(t *testing.T) {
}
fName := "test.data"
err = ioutil.WriteFile(fName, data, os.ModePerm)
err = os.WriteFile(fName, data, os.ModePerm)
if err != nil {
t.Fatal("Couldn't write to current directory")
+1 -2
View File
@@ -17,7 +17,6 @@ package main
import (
"context"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
@@ -227,7 +226,7 @@ func getStdinForSQLBenchmark(fs filesys.Filesys, pathToImportFile string) *os.Fi
log.Fatal(err)
}
tmpfile, err := ioutil.TempFile("", "temp")
tmpfile, err := os.CreateTemp("", "temp")
if err != nil {
log.Fatal(err)
}
+1 -2
View File
@@ -18,7 +18,6 @@ import (
"bytes"
"context"
"io"
"io/ioutil"
)
// Blobstore is an interface for storing and retrieving blobs of data by key
@@ -39,7 +38,7 @@ func GetBytes(ctx context.Context, bs Blobstore, key string, br BlobRange) ([]by
}
defer rc.Close()
data, err := ioutil.ReadAll(rc)
data, err := io.ReadAll(rc)
if err != nil {
return nil, "", err
+1 -2
View File
@@ -19,7 +19,6 @@ import (
"context"
"encoding/binary"
"fmt"
"io/ioutil"
"log"
"math/rand"
"os"
@@ -73,7 +72,7 @@ func appendGCSTest(tests []BlobstoreTest) []BlobstoreTest {
}
func appendLocalTest(tests []BlobstoreTest) []BlobstoreTest {
dir, err := ioutil.TempDir("", uuid.New().String())
dir, err := os.MkdirTemp("", uuid.New().String())
if err != nil {
panic("Could not create temp dir")
+3 -4
View File
@@ -18,7 +18,6 @@ import (
"bytes"
"context"
"io"
"io/ioutil"
"sync"
"github.com/google/uuid"
@@ -31,7 +30,7 @@ type byteSliceReadCloser struct {
func newByteSliceReadCloser(data []byte) *byteSliceReadCloser {
reader := bytes.NewReader(data)
return &byteSliceReadCloser{reader, ioutil.NopCloser(reader)}
return &byteSliceReadCloser{reader, io.NopCloser(reader)}
}
// InMemoryBlobstore provides an in memory implementation of the Blobstore interface
@@ -81,7 +80,7 @@ func (bs *InMemoryBlobstore) Put(ctx context.Context, key string, reader io.Read
defer bs.mutex.Unlock()
ver := uuid.New().String()
data, err := ioutil.ReadAll(reader)
data, err := io.ReadAll(reader)
if err != nil {
return "", err
@@ -107,7 +106,7 @@ func (bs *InMemoryBlobstore) CheckAndPut(ctx context.Context, expectedVersion, k
}
newVer := uuid.New().String()
data, err := ioutil.ReadAll(reader)
data, err := io.ReadAll(reader)
if err != nil {
return "", err
+2 -2
View File
@@ -25,7 +25,7 @@ import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
@@ -70,7 +70,7 @@ func (s *nbeSuite) TestNomsBlobGet() {
filePath := filepath.Join(s.TempDir, "out")
s.MustRun(main, []string{"blob", "export", hashSpec, filePath})
fileBytes, err := ioutil.ReadFile(filePath)
fileBytes, err := os.ReadFile(filePath)
s.NoError(err)
s.Equal(blobBytes, fileBytes)
+1 -2
View File
@@ -22,7 +22,6 @@ import (
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
@@ -110,7 +109,7 @@ func runCat(ctx context.Context, args []string) int {
return 1
}
fileBytes, err := ioutil.ReadFile(chunkFile)
fileBytes, err := os.ReadFile(chunkFile)
if err != nil {
fmt.Fprintln(os.Stderr, "Failed to read "+chunkFile, err)
+2 -2
View File
@@ -24,7 +24,7 @@ package main
import (
"bytes"
"context"
"io/ioutil"
"io"
"testing"
"github.com/stretchr/testify/assert"
@@ -264,7 +264,7 @@ func TestNomsMergeCliResolve(t *testing.T) {
for _, c := range cases {
input := bytes.NewBufferString(c.input)
changeType, newVal, ok := cliResolve(input, ioutil.Discard, c.aChange, c.bChange, c.aVal, c.bVal, types.Path{})
changeType, newVal, ok := cliResolve(input, io.Discard, c.aChange, c.bChange, c.aVal, c.bVal, types.Path{})
if !c.success {
assert.False(t, ok)
} else if assert.True(t, ok) {
+2 -3
View File
@@ -24,7 +24,6 @@ package config
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
@@ -93,7 +92,7 @@ func FindNomsConfig() (*Config, error) {
}
func ReadConfig(name string) (*Config, error) {
data, err := ioutil.ReadFile(name)
data, err := os.ReadFile(name)
if err != nil {
return nil, err
}
@@ -118,7 +117,7 @@ func (c *Config) WriteTo(configHome string) (string, error) {
if err := os.MkdirAll(filepath.Dir(file), os.ModePerm); err != nil {
return "", err
}
if err := ioutil.WriteFile(file, []byte(c.writeableString()), os.ModePerm); err != nil {
if err := os.WriteFile(file, []byte(c.writeableString()), os.ModePerm); err != nil {
return "", err
}
return file, nil
+1 -2
View File
@@ -22,7 +22,6 @@
package config
import (
"io/ioutil"
"os"
"path/filepath"
"strings"
@@ -228,7 +227,7 @@ func TestBadConfig(t *testing.T) {
path := getPaths(assert, "home.bad")
cfile := writeConfig(assert, ldbConfig, path.home)
// overwrite with something invalid
assert.NoError(ioutil.WriteFile(cfile, []byte("invalid config"), os.ModePerm))
assert.NoError(os.WriteFile(cfile, []byte("invalid config"), os.ModePerm))
assert.NoError(os.Chdir(path.home))
_, err := FindNomsConfig()
assert.Error(err, path.config)
+2 -3
View File
@@ -26,7 +26,6 @@ import (
"context"
"errors"
"io"
"io/ioutil"
"reflect"
"sync"
"testing"
@@ -386,7 +385,7 @@ func (ttf *TestFailingTableFile) NumChunks() int {
}
func (ttf *TestFailingTableFile) Open(ctx context.Context) (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader([]byte{0x00})), errors.New("this is a test error")
return io.NopCloser(bytes.NewReader([]byte{0x00})), errors.New("this is a test error")
}
type TestTableFile struct {
@@ -404,7 +403,7 @@ func (ttf *TestTableFile) NumChunks() int {
}
func (ttf *TestTableFile) Open(ctx context.Context) (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(ttf.data)), nil
return io.NopCloser(bytes.NewReader(ttf.data)), nil
}
type TestTableFileWriter struct {
+2 -3
View File
@@ -24,7 +24,6 @@ package main
import (
"context"
"fmt"
"io/ioutil"
"os"
"regexp"
"sort"
@@ -125,7 +124,7 @@ func main() {
d.PanicIfError(err)
}()
open = func() (chunks.ChunkStore, error) {
f, err := ioutil.TempFile(dir, "")
f, err := os.CreateTemp(dir, "")
d.Chk.NoError(err)
return newFileBlockStore(f)
}
@@ -224,7 +223,7 @@ func main() {
}
func makeTempDir(tmpdir string, t assert.TestingT) (dir string) {
dir, err := ioutil.TempDir(tmpdir, "")
dir, err := os.MkdirTemp(tmpdir, "")
assert.NoError(t, err)
return
}
+1 -2
View File
@@ -26,7 +26,6 @@ import (
"context"
"crypto/rand"
"errors"
"io/ioutil"
"os"
"path/filepath"
"sort"
@@ -59,7 +58,7 @@ type BlockStoreSuite struct {
func (suite *BlockStoreSuite) SetupTest() {
var err error
suite.dir, err = ioutil.TempDir("", "")
suite.dir, err = os.MkdirTemp("", "")
suite.NoError(err)
suite.store, err = NewLocalStore(context.Background(), constants.FormatDefaultString, suite.dir, testMemTableSize)
suite.NoError(err)
+1 -2
View File
@@ -16,7 +16,6 @@ package nbs
import (
"bytes"
"io/ioutil"
"os"
"path/filepath"
"testing"
@@ -111,7 +110,7 @@ func (suite *TableSinkSuite) TestWriteAndFlushToFile() {
err = sink.FlushToFile(path)
require.NoError(suite.t, err)
data, err := ioutil.ReadFile(path)
data, err := os.ReadFile(path)
require.NoError(suite.t, err)
verifyContents(suite.t, data)
+2 -2
View File
@@ -24,7 +24,7 @@ package nbs
import (
"context"
"errors"
"io/ioutil"
"os"
"github.com/dolthub/dolt/go/libraries/utils/file"
"github.com/dolthub/dolt/go/store/chunks"
@@ -37,7 +37,7 @@ const (
)
func NewCache(ctx context.Context) (*NomsBlockCache, error) {
dir, err := ioutil.TempDir("", "")
dir, err := os.MkdirTemp("", "")
if err != nil {
return nil, err
+1 -2
View File
@@ -23,7 +23,6 @@ package nbs
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
@@ -44,7 +43,7 @@ func TestFDCache(t *testing.T) {
for i := range paths {
name := fmt.Sprintf("file%d", i)
paths[i] = filepath.Join(dir, name)
err := ioutil.WriteFile(paths[i], []byte(name), 0644)
err := os.WriteFile(paths[i], []byte(name), 0644)
require.NoError(t, err)
}
+2 -3
View File
@@ -26,7 +26,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
@@ -219,7 +218,7 @@ func (fm fileManifest) UpdateGCGen(ctx context.Context, lastLock addr, newConten
// |-- String --|- String --|...|-- String --|- String --|
// :table 1 hash:table 1 cnt:...:table N hash:table N cnt|
func parseV5Manifest(r io.Reader) (manifestContents, error) {
manifest, err := ioutil.ReadAll(r)
manifest, err := io.ReadAll(r)
if err != nil {
return manifestContents{}, err
@@ -302,7 +301,7 @@ func writeManifest(temp io.Writer, contents manifestContents) error {
// |-- String --|-- String --|-------- String --------|-------- String --------|-- String --|- String --|...|-- String --|- String --|
// | nbs version:Noms version:Base32-encoded lock hash:Base32-encoded root hash:table 1 hash:table 1 cnt:...:table N hash:table N cnt|
func parseV4Manifest(r io.Reader) (manifestContents, error) {
manifest, err := ioutil.ReadAll(r)
manifest, err := io.ReadAll(r)
if err != nil {
return manifestContents{}, err
+4 -4
View File
@@ -23,7 +23,7 @@ package nbs
import (
"context"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
@@ -39,7 +39,7 @@ import (
)
func makeFileManifestTempDir(t *testing.T) fileManifest {
dir, err := ioutil.TempDir("", "")
dir, err := os.MkdirTemp("", "")
require.NoError(t, err)
return fileManifest{dir: dir} //, cache: newManifestCache(defaultManifestCacheSize)}
}
@@ -220,10 +220,10 @@ func tryClobberManifest(dir, contents string) ([]byte, error) {
// clobberManifest simulates another process writing dir/manifestFileName concurrently. It ignores the lock file, so it's up to the caller to ensure correctness.
func clobberManifest(dir, contents string) error {
if err := ioutil.WriteFile(filepath.Join(dir, lockFileName), nil, 0666); err != nil {
if err := os.WriteFile(filepath.Join(dir, lockFileName), nil, 0666); err != nil {
return err
}
return ioutil.WriteFile(filepath.Join(dir, manifestFileName), []byte(contents), 0666)
return os.WriteFile(filepath.Join(dir, manifestFileName), []byte(contents), 0666)
}
func runClobber(dir, contents string) ([]byte, error) {
+1 -2
View File
@@ -26,7 +26,6 @@ import (
"context"
"errors"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
@@ -215,7 +214,7 @@ func (ftp *fsTablePersister) ConjoinAll(ctx context.Context, sources chunkSource
func (ftp *fsTablePersister) PruneTableFiles(ctx context.Context, contents manifestContents) error {
ss := contents.getSpecSet()
fileInfos, err := ioutil.ReadDir(ftp.dir)
fileInfos, err := os.ReadDir(ftp.dir)
if err != nil {
return err
+5 -6
View File
@@ -25,7 +25,6 @@ import (
"context"
"crypto/rand"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
@@ -85,7 +84,7 @@ func TestFSTableCacheOnOpen(t *testing.T) {
}
func makeTempDir(t *testing.T) string {
dir, err := ioutil.TempDir("", "")
dir, err := os.MkdirTemp("", "")
require.NoError(t, err)
return dir
}
@@ -97,7 +96,7 @@ func writeTableData(dir string, chunx ...[]byte) (addr, error) {
return addr{}, err
}
err = ioutil.WriteFile(filepath.Join(dir, name.String()), tableData, 0666)
err = os.WriteFile(filepath.Join(dir, name.String()), tableData, 0666)
if err != nil {
return addr{}, err
@@ -126,7 +125,7 @@ func TestFSTablePersisterPersist(t *testing.T) {
src, err := persistTableData(fts, testChunks...)
require.NoError(t, err)
if assert.True(mustUint32(src.count()) > 0) {
buff, err := ioutil.ReadFile(filepath.Join(dir, mustAddr(src.hash()).String()))
buff, err := os.ReadFile(filepath.Join(dir, mustAddr(src.hash()).String()))
require.NoError(t, err)
ti, err := parseTableIndex(buff)
require.NoError(t, err)
@@ -226,7 +225,7 @@ func TestFSTablePersisterConjoinAll(t *testing.T) {
require.NoError(t, err)
if assert.True(mustUint32(src.count()) > 0) {
buff, err := ioutil.ReadFile(filepath.Join(dir, mustAddr(src.hash()).String()))
buff, err := os.ReadFile(filepath.Join(dir, mustAddr(src.hash()).String()))
require.NoError(t, err)
ti, err := parseTableIndex(buff)
require.NoError(t, err)
@@ -264,7 +263,7 @@ func TestFSTablePersisterConjoinAllDups(t *testing.T) {
require.NoError(t, err)
if assert.True(mustUint32(src.count()) > 0) {
buff, err := ioutil.ReadFile(filepath.Join(dir, mustAddr(src.hash()).String()))
buff, err := os.ReadFile(filepath.Join(dir, mustAddr(src.hash()).String()))
require.NoError(t, err)
ti, err := parseTableIndex(buff)
require.NoError(t, err)
+3 -4
View File
@@ -24,7 +24,6 @@ package nbs
import (
"bytes"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
@@ -85,7 +84,7 @@ func TestFSTableCache(t *testing.T) {
var names []addr
for i := byte(0); i < 4; i++ {
name := computeAddr([]byte{i})
require.NoError(t, ioutil.WriteFile(filepath.Join(dir, name.String()), nil, 0666))
require.NoError(t, os.WriteFile(filepath.Join(dir, name.String()), nil, 0666))
names = append(names, name)
}
@@ -103,7 +102,7 @@ func TestFSTableCache(t *testing.T) {
dir := makeTempDir(t)
defer file.RemoveAll(dir)
require.NoError(t, ioutil.WriteFile(filepath.Join(dir, "boo"), nil, 0666))
require.NoError(t, os.WriteFile(filepath.Join(dir, "boo"), nil, 0666))
_, err := newFSTableCache(dir, 1024, 4)
assert.Error(t, err)
})
@@ -114,7 +113,7 @@ func TestFSTableCache(t *testing.T) {
defer file.RemoveAll(dir)
tempFile := filepath.Join(dir, tempTablePrefix+"boo")
require.NoError(t, ioutil.WriteFile(tempFile, nil, 0666))
require.NoError(t, os.WriteFile(tempFile, nil, 0666))
_, err := newFSTableCache(dir, 1024, 4)
require.NoError(t, err)
_, fserr := os.Stat(tempFile)
+2 -3
View File
@@ -24,7 +24,6 @@ package nbs
import (
"bytes"
"context"
"io/ioutil"
"os"
"testing"
@@ -68,12 +67,12 @@ func TestWriteChunks(t *testing.T) {
t.Error(err)
}
dir, err := ioutil.TempDir("", "write_chunks_test")
dir, err := os.MkdirTemp("", "write_chunks_test")
if err != nil {
t.Error(err)
}
err = ioutil.WriteFile(dir+name, data, os.ModePerm)
err = os.WriteFile(dir+name, data, os.ModePerm)
if err != nil {
t.Error(err)
}
+3 -3
View File
@@ -22,7 +22,7 @@
package nbs
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
@@ -34,7 +34,7 @@ import (
func TestMmapTableReader(t *testing.T) {
assert := assert.New(t)
dir, err := ioutil.TempDir("", "")
dir, err := os.MkdirTemp("", "")
require.NoError(t, err)
defer file.RemoveAll(dir)
@@ -49,7 +49,7 @@ func TestMmapTableReader(t *testing.T) {
tableData, h, err := buildTable(chunks)
require.NoError(t, err)
err = ioutil.WriteFile(filepath.Join(dir, h.String()), tableData, 0666)
err = os.WriteFile(filepath.Join(dir, h.String()), tableData, 0666)
require.NoError(t, err)
trc, err := newMmapTableReader(dir, h, uint32(len(chunks)), nil, fc)
+2 -3
View File
@@ -24,7 +24,6 @@ package nbs
import (
"bytes"
"io"
"io/ioutil"
"net/url"
"strconv"
"strings"
@@ -146,7 +145,7 @@ func (m *fakeS3) UploadPartWithContext(ctx aws.Context, input *s3.UploadPartInpu
m.assert.NotNil(input.UploadId, "UploadId is a required field")
m.assert.NotNil(input.Body, "Body is a required field")
data, err := ioutil.ReadAll(input.Body)
data, err := io.ReadAll(input.Body)
m.assert.NoError(err)
m.mu.Lock()
@@ -233,7 +232,7 @@ func (m *fakeS3) GetObjectWithContext(ctx aws.Context, input *s3.GetObjectInput,
}
return &s3.GetObjectOutput{
Body: ioutil.NopCloser(bytes.NewReader(obj)),
Body: io.NopCloser(bytes.NewReader(obj)),
ContentLength: aws.Int64(int64(len(obj))),
}, nil
}
+2 -2
View File
@@ -23,7 +23,7 @@ package nbs
import (
"context"
"io/ioutil"
"io"
"net"
"os"
"syscall"
@@ -105,7 +105,7 @@ func (fs3 *flakyS3) GetObjectWithContext(ctx aws.Context, input *s3.GetObjectInp
if _, ok := fs3.alreadyFailed[*input.Key]; !ok {
fs3.alreadyFailed[*input.Key] = struct{}{}
output.Body = ioutil.NopCloser(resettingReader{})
output.Body = io.NopCloser(resettingReader{})
}
return output, nil
+2 -2
View File
@@ -23,7 +23,7 @@ package nbs
import (
"context"
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
@@ -42,7 +42,7 @@ func TestStats(t *testing.T) {
return store.Stats().(Stats)
}
dir, err := ioutil.TempDir("", "")
dir, err := os.MkdirTemp("", "")
require.NoError(t, err)
store, err := NewLocalStore(context.Background(), constants.FormatDefaultString, dir, testMemTableSize)
require.NoError(t, err)
+1 -2
View File
@@ -25,7 +25,6 @@ import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"reflect"
@@ -1206,7 +1205,7 @@ func newTableFile(cs chunkSource, info tableSpec) tableFile {
return nil, err
}
return ioutil.NopCloser(r), nil
return io.NopCloser(r), nil
},
}
}
+4 -4
View File
@@ -18,7 +18,7 @@ import (
"bytes"
"context"
"fmt"
"io/ioutil"
"io"
"math/rand"
"os"
"path/filepath"
@@ -97,7 +97,7 @@ func TestNBSAsTableFileStore(t *testing.T) {
rd, err := src.Open(context.Background())
require.NoError(t, err)
data, err := ioutil.ReadAll(rd)
data, err := io.ReadAll(rd)
require.NoError(t, err)
err = rd.Close()
@@ -164,7 +164,7 @@ func TestNBSPruneTableFiles(t *testing.T) {
assert.NotEmpty(t, absent)
currTableFiles := func(dirName string) *set.StrSet {
infos, err := ioutil.ReadDir(dirName)
infos, err := os.ReadDir(dirName)
require.NoError(t, err)
curr := set.NewStrSet(nil)
for _, fi := range infos {
@@ -193,7 +193,7 @@ func TestNBSPruneTableFiles(t *testing.T) {
for _, fileName := range absent {
assert.False(t, postGC.Contains(fileName))
}
infos, err := ioutil.ReadDir(nomsDir)
infos, err := os.ReadDir(nomsDir)
require.NoError(t, err)
// assert that we only have files for current sources,
+3 -3
View File
@@ -15,7 +15,7 @@
package nbs
import (
"io/ioutil"
"io"
"os"
"testing"
@@ -36,7 +36,7 @@ func TestParseTableIndex(t *testing.T) {
f, err := os.Open("testdata/0oa7mch34jg1rvghrnhr4shrp2fm4ftd.idx")
require.NoError(t, err)
defer f.Close()
bs, err := ioutil.ReadAll(f)
bs, err := io.ReadAll(f)
require.NoError(t, err)
idx, err := parseTableIndex(bs)
require.NoError(t, err)
@@ -60,7 +60,7 @@ func TestMMapIndex(t *testing.T) {
f, err := os.Open("testdata/0oa7mch34jg1rvghrnhr4shrp2fm4ftd.idx")
require.NoError(t, err)
defer f.Close()
bs, err := ioutil.ReadAll(f)
bs, err := io.ReadAll(f)
require.NoError(t, err)
idx, err := parseTableIndex(bs)
require.NoError(t, err)
+2 -3
View File
@@ -91,7 +91,6 @@ import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
@@ -382,7 +381,7 @@ func (suite *PerfSuite) NewAssert() *assert.Assertions {
// TempFile creates a temporary file, which will be automatically cleaned up by
// the perf test suite. Files will be prefixed with the test's dataset ID
func (suite *PerfSuite) TempFile() *os.File {
f, err := ioutil.TempFile("", suite.tempPrefix())
f, err := os.CreateTemp("", suite.tempPrefix())
require.NoError(suite.T, err)
suite.tempFiles = append(suite.tempFiles, f)
return f
@@ -392,7 +391,7 @@ func (suite *PerfSuite) TempFile() *os.File {
// up by the perf test suite. Directories will be prefixed with the test's
// dataset ID.
func (suite *PerfSuite) TempDir() string {
d, err := ioutil.TempDir("", suite.tempPrefix())
d, err := os.MkdirTemp("", suite.tempPrefix())
require.NoError(suite.T, err)
suite.tempDirs = append(suite.tempDirs, d)
return d
+2 -3
View File
@@ -23,7 +23,6 @@ package suite
import (
"context"
"io/ioutil"
"os"
"testing"
"time"
@@ -174,7 +173,7 @@ func runTestSuite(t *testing.T, mem bool) {
assert := assert.New(t)
// Write test results to our own temporary LDB database.
ldbDir, err := ioutil.TempDir("", "suite.TestSuite")
ldbDir, err := os.MkdirTemp("", "suite.TestSuite")
require.NoError(t, err)
defer file.RemoveAll(ldbDir)
@@ -289,7 +288,7 @@ func TestPrefixFlag(t *testing.T) {
assert := assert.New(t)
// Write test results to a temporary database.
ldbDir, err := ioutil.TempDir("", "suite.TestSuite")
ldbDir, err := os.MkdirTemp("", "suite.TestSuite")
require.NoError(t, err)
defer file.RemoveAll(ldbDir)
+5 -6
View File
@@ -24,7 +24,6 @@ package spec
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
@@ -160,7 +159,7 @@ func TestNBSDatabaseSpec(t *testing.T) {
assert := assert.New(t)
run := func(prefix string) {
tmpDir, err := ioutil.TempDir("", "spec_test")
tmpDir, err := os.MkdirTemp("", "spec_test")
assert.NoError(err)
defer file.RemoveAll(tmpDir)
@@ -262,7 +261,7 @@ func TestForDatabase(t *testing.T) {
assert.Error(err, spec)
}
tmpDir, err := ioutil.TempDir("", "spec_test")
tmpDir, err := os.MkdirTemp("", "spec_test")
assert.NoError(err)
defer file.RemoveAll(tmpDir)
@@ -326,7 +325,7 @@ func TestForDataset(t *testing.T) {
assert.NoError(err)
}
tmpDir, err := ioutil.TempDir("", "spec_test")
tmpDir, err := os.MkdirTemp("", "spec_test")
assert.NoError(err)
defer file.RemoveAll(tmpDir)
@@ -371,7 +370,7 @@ func TestForPath(t *testing.T) {
assert.Error(err)
}
tmpDir, err := ioutil.TempDir("", "spec_test")
tmpDir, err := os.MkdirTemp("", "spec_test")
assert.NoError(err)
defer file.RemoveAll(tmpDir)
@@ -498,7 +497,7 @@ func TestAlreadyPinnedPathSpec(t *testing.T) {
func TestMultipleSpecsSameNBS(t *testing.T) {
assert := assert.New(t)
tmpDir, err := ioutil.TempDir("", "spec_test")
tmpDir, err := os.MkdirTemp("", "spec_test")
assert.NoError(err)
defer file.RemoveAll(tmpDir)
+1 -2
View File
@@ -25,7 +25,6 @@ import (
"bytes"
"context"
"io"
"io/ioutil"
"math/rand"
"strings"
"testing"
@@ -283,7 +282,7 @@ func TestBlobNewParallel(t *testing.T) {
vrw := newTestValueStore()
readAll := func(b Blob) []byte {
data, err := ioutil.ReadAll(b.Reader(context.Background()))
data, err := io.ReadAll(b.Reader(context.Background()))
require.NoError(t, err)
return data
}
@@ -17,8 +17,8 @@ package edits
import (
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"testing"
"github.com/stretchr/testify/assert"
@@ -53,7 +53,7 @@ func testDBE(t *testing.T, kvps []types.KVP) {
ctx := context.Background()
nbf := types.Format_Default
vrw := types.NewMemoryValueStore()
tmpDir, err := ioutil.TempDir("", "TestDiskBackedEdits")
tmpDir, err := os.MkdirTemp("", "TestDiskBackedEdits")
require.NoError(t, err)
newEA := func() types.EditAccumulator {
@@ -18,7 +18,6 @@ import (
"bytes"
"context"
"io"
"io/ioutil"
"math/rand"
"strconv"
"testing"
@@ -78,7 +77,7 @@ func readerForTuples(t *testing.T, nbf *types.NomsBinFormat, vrw types.ValueRead
err := wr.WriteTuples(tuples...)
require.NoError(t, err)
return types.NewTupleReader(nbf, vrw, ioutil.NopCloser(bytes.NewBuffer(buf.Bytes())))
return types.NewTupleReader(nbf, vrw, io.NopCloser(bytes.NewBuffer(buf.Bytes())))
}
func newTuple(t *testing.T, nbf *types.NomsBinFormat, vals ...types.Value) types.Tuple {
@@ -159,7 +158,7 @@ func genReader(t *testing.T, r *rand.Rand, nbf *types.NomsBinFormat, vrw types.V
require.NoError(t, err)
}
return numItems, types.NewTupleReader(nbf, vrw, ioutil.NopCloser(bytes.NewBuffer(buf.Bytes())))
return numItems, types.NewTupleReader(nbf, vrw, io.NopCloser(bytes.NewBuffer(buf.Bytes())))
}
func TestTupleStreamMerger(t *testing.T) {
+1 -2
View File
@@ -24,7 +24,6 @@ package perf
import (
"context"
"io"
"io/ioutil"
"math/rand"
"os"
"testing"
@@ -172,7 +171,7 @@ func (s *perfSuite) testBuild500megBlob(p int) {
s.Pause(func() {
for i := range readers {
f, err := ioutil.TempFile("", "testBuildBlob")
f, err := os.CreateTemp("", "testBuildBlob")
assert.NoError(err)
_, err = f.Write(s.randomBytes(int64(i), size/p))
assert.NoError(err)
+1 -2
View File
@@ -18,7 +18,6 @@ import (
"bytes"
"context"
"io"
"io/ioutil"
"math/rand"
"sync"
"testing"
@@ -213,7 +212,7 @@ func TestTupleReadersAndWriters(t *testing.T) {
require.NoError(t, err)
vrw := NewMemoryValueStore()
rd := NewTupleReader(Format_Default, vrw, ioutil.NopCloser(buf))
rd := NewTupleReader(Format_Default, vrw, io.NopCloser(buf))
read := make([]*Tuple, 2*numTuples)
for i := 0; i < 2; i++ {
@@ -22,7 +22,7 @@
package clienttest
import (
"io/ioutil"
"io"
"os"
"path"
@@ -54,11 +54,11 @@ type ExitError struct {
func (suite *ClientTestSuite) SetupSuite() {
td := tempfiles.MovableTempFileProvider.GetTempDir()
dir, err := ioutil.TempDir(td, "nomstest")
dir, err := os.MkdirTemp(td, "nomstest")
d.Chk.NoError(err)
stdOutput, err := ioutil.TempFile(dir, "out")
stdOutput, err := os.CreateTemp(dir, "out")
d.Chk.NoError(err)
errOutput, err := ioutil.TempFile(dir, "err")
errOutput, err := os.CreateTemp(dir, "err")
d.Chk.NoError(err)
suite.TempDir = dir
@@ -112,7 +112,7 @@ func (suite *ClientTestSuite) Run(m func(), args []string) (stdout string, stder
_, err := suite.out.Seek(0, 0)
d.Chk.NoError(err)
capturedOut, err := ioutil.ReadAll(suite.out)
capturedOut, err := io.ReadAll(suite.out)
d.Chk.NoError(err)
_, err = suite.out.Seek(0, 0)
@@ -122,7 +122,7 @@ func (suite *ClientTestSuite) Run(m func(), args []string) (stdout string, stder
_, err = suite.err.Seek(0, 0)
d.Chk.NoError(err)
capturedErr, err := ioutil.ReadAll(suite.err)
capturedErr, err := io.ReadAll(suite.err)
d.Chk.NoError(err)
_, err = suite.err.Seek(0, 0)
+2 -3
View File
@@ -15,7 +15,6 @@
package tempfiles
import (
"io/ioutil"
"os"
"sync"
@@ -62,7 +61,7 @@ func (tfp *TempFileProviderAt) NewFile(dir, pattern string) (*os.File, error) {
dir = tfp.tempDir
}
f, err := ioutil.TempFile(dir, pattern)
f, err := os.CreateTemp(dir, pattern)
if err == nil {
tfp.filesCreated = append(tfp.filesCreated, f.Name())
@@ -82,7 +81,7 @@ func (tfp *TempFileProviderAt) Clean() {
}
// MovableTemFile is an object that implements TempFileProvider that is used by the nbs to create temp files that
// ultimately will be renamed. It is important to use this instance rather than using os.TempDir, or ioutil.TempFile
// ultimately will be renamed. It is important to use this instance rather than using os.TempDir, or os.CreateTemp
// directly as those may have errors executing a rename against if the volume the default temporary directory lives on
// is different than the volume of the destination of the rename.
var MovableTempFileProvider TempFileProvider = NewTempFileProviderAt(os.TempDir())
+2 -3
View File
@@ -21,7 +21,6 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path"
@@ -110,7 +109,7 @@ func main() {
if err != nil {
log.Fatalf("Error opening -verify file %s: %v\n", *verifyFilename, err)
}
verifyContents, err := ioutil.ReadAll(verifyFile)
verifyContents, err := io.ReadAll(verifyFile)
if err != nil {
log.Fatalf("Error reading -verify file %s: %v\n", *verifyFilename, err)
}
@@ -170,7 +169,7 @@ func PrintLicense(out io.Writer, filepath string) {
if err != nil {
log.Fatalf("Error opening license file [%s] for copying: %v\n", filepath, err)
}
contents, err := ioutil.ReadAll(f)
contents, err := io.ReadAll(f)
if err != nil {
log.Fatalf("Error reading license file [%s] for copying: %v\n", filepath, err)
}
+3 -3
View File
@@ -16,7 +16,7 @@ package main
import (
"fmt"
"io/ioutil"
"io"
"os"
"path/filepath"
"regexp"
@@ -388,7 +388,7 @@ func CheckGo() bool {
panic(err)
}
defer f.Close()
bs, err := ioutil.ReadAll(f)
bs, err := io.ReadAll(f)
if err != nil {
panic(err)
}
@@ -422,7 +422,7 @@ func CheckProto() bool {
panic(err)
}
defer f.Close()
bs, err := ioutil.ReadAll(f)
bs, err := io.ReadAll(f)
if err != nil {
panic(err)
}
+2 -3
View File
@@ -20,7 +20,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
@@ -86,7 +85,7 @@ func writeTableFile(logger func(string), org, repo, fileId string, request *http
}
logger(fileId + " is valid")
data, err := ioutil.ReadAll(request.Body)
data, err := io.ReadAll(request.Body)
if tfd.ContentLength != 0 && tfd.ContentLength != uint64(len(data)) {
return http.StatusBadRequest
@@ -116,7 +115,7 @@ func writeTableFile(logger func(string), org, repo, fileId string, request *http
func writeLocal(logger func(string), org, repo, fileId string, data []byte) error {
path := filepath.Join(org, repo, fileId)
err := ioutil.WriteFile(path, data, os.ModePerm)
err := os.WriteFile(path, data, os.ModePerm)
if err != nil {
logger(fmt.Sprintf("failed to write file %s", path))