Merge pull request #708 from liquidata-inc/andy/table-sch-import-tests

Extra BATS for `table import` & `schema import`
This commit is contained in:
AndyA
2020-05-27 17:24:03 -05:00
committed by GitHub
5 changed files with 328 additions and 92 deletions

View File

@@ -3,22 +3,13 @@ load $BATS_TEST_DIRNAME/helper/common.bash
setup() {
setup_common
dolt sql <<SQL
CREATE TABLE int_table (
pk LONGTEXT NOT NULL,
c1 LONGTEXT,
c2 LONGTEXT,
c3 LONGTEXT,
c4 LONGTEXT,
c5 LONGTEXT,
PRIMARY KEY (pk)
);
SQL
cat <<DELIM > 1pk5col-ints.csv
pk,c1,c2,c3,c4,c5
0,1,2,3,4,5
1,1,2,3,4,5
DELIM
cat <<DELIM > empty-strings-null-values.csv
pk,headerOne,headerTwo
a,"""""",1
@@ -29,6 +20,29 @@ e,row five,
f,row six,6
g, ,
DELIM
cat <<JSON > name-map.json
{
"one":"pk",
"two":"c1",
"three":"c2",
"four":"c3"
}
JSON
cat <<DELIM > name-map-data.csv
one,two,three,four
0,1,2,3
DELIM
cat <<SQL > name-map-sch.sql
CREATE TABLE test (
pk int not null,
c1 float,
c2 float,
c3 float,
primary key(pk)
);
SQL
}
teardown() {
@@ -71,7 +85,7 @@ teardown() {
}
@test "import data from csv and create the table" {
run dolt table import -c --pk=pk test `batshelper 1pk5col-ints.csv`
run dolt table import -c --pk=pk test 1pk5col-ints.csv
[ "$status" -eq 0 ]
[[ "$output" =~ "Import completed successfully." ]] || false
run dolt ls
@@ -83,21 +97,28 @@ teardown() {
}
@test "use -f to overwrite data in existing table" {
dolt table import -c --pk=pk test `batshelper 1pk5col-ints.csv`
run dolt table import -c --pk=pk test `batshelper 1pk5col-ints.csv`
cat <<DELIM > other.csv
pk,c1,c2,c3,c4,c5
8,1,2,3,4,5
9,1,2,3,4,5
DELIM
dolt table import -c --pk=pk test 1pk5col-ints.csv
run dolt table import -c --pk=pk test 1pk5col-ints.csv
[ "$status" -eq 1 ]
[[ "$output" =~ "test already exists. Use -f to overwrite." ]] || false
run dolt table import -f -c --pk=pk test `batshelper 1pk5col-ints.csv`
run dolt table import -f -c --pk=pk test other.csv
[ "$status" -eq 0 ]
[[ "$output" =~ "Import completed successfully." ]] || false
run dolt ls
[ "$status" -eq 0 ]
[[ "$output" =~ "test" ]] || false
run dolt sql -q "select * from test"
run dolt sql -r csv -q "select * from test"
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 6 ]
[ "${lines[0]}" = "pk,c1,c2,c3,c4,c5" ]
[ "${lines[1]}" = "8,1,2,3,4,5" ]
[ "${lines[2]}" = "9,1,2,3,4,5" ]
[ ! "${lines[1]}" = "0,1,2,3,4,5" ]
[ ! "${lines[2]}" = "1,1,2,3,4,5" ]
}
@test "try to create a table with a bad csv" {
@@ -112,23 +133,31 @@ teardown() {
}
@test "try to create a table with dolt table import with invalid name" {
run dolt table import -c --pk=pk 123 `batshelper 1pk5col-ints.csv`
run dolt table import -c --pk=pk 123 1pk5col-ints.csv
[ "$status" -eq 1 ]
[[ "$output" =~ "not a valid table name" ]] || false
run dolt table import -c --pk=pk dolt_docs `batshelper 1pk5col-ints.csv`
run dolt table import -c --pk=pk dolt_docs 1pk5col-ints.csv
[ "$status" -eq 1 ]
[[ "$output" =~ "not a valid table name" ]] || false
[[ "$output" =~ "reserved" ]] || false
run dolt table import -c --pk=pk dolt_query_catalog `batshelper 1pk5col-ints.csv`
run dolt table import -c --pk=pk dolt_query_catalog 1pk5col-ints.csv
[ "$status" -eq 1 ]
[[ "$output" =~ "not a valid table name" ]] || false
[[ "$output" =~ "reserved" ]] || false
run dolt table import -c --pk=pk dolt_reserved `batshelper 1pk5col-ints.csv`
run dolt table import -c --pk=pk dolt_reserved 1pk5col-ints.csv
[ "$status" -eq 1 ]
[[ "$output" =~ "not a valid table name" ]] || false
[[ "$output" =~ "reserved" ]] || false
}
@test "try to table import with nonexistant --pk arg" {
run dolt table import -c -pk="batmansparents" test 1pk5col-ints.csv
[ "$status" -ne 1 ]
skip "--pk args is not validated to be an existing column"
[[ ! "$output" =~ "panic" ]] || false
[[ "$output" =~ "column 'batmansparents' not found" ]] || false
}
@test "create a table with two primary keys from csv import" {
run dolt table import -c --pk=pk1,pk2 test `batshelper 2pk5col-ints.csv`
[ "$status" -eq 0 ]
@@ -142,7 +171,12 @@ teardown() {
}
@test "import data from psv and create the table" {
run dolt table import -c --pk=pk test `batshelper 1pk5col-ints.psv`
cat <<DELIM > 1pk5col-ints.psv
pk|c1|c2|c3|c4|c5
0|1|2|3|4|5
1|1|2|3|4|5
DELIM
run dolt table import -c --pk=pk test 1pk5col-ints.psv
[ "$status" -eq 0 ]
[[ "$output" =~ "Import completed successfully." ]] || false
run dolt ls
@@ -153,11 +187,90 @@ teardown() {
[ "${#lines[@]}" -eq 6 ]
}
@test "import table using --delim" {
cat <<DELIM > 1pk5col-ints.csv
pk||c1||c2||c3||c4||c5
0||1||2||3||4||5
1||1||2||3||4||5
DELIM
run dolt table import -c --delim="||" test 1pk5col-ints.csv
[ "$status" -eq 0 ]
run dolt sql -r csv -q "select * from test"
[ "$status" -eq 0 ]
[ "${lines[0]}" = "pk,c1,c2,c3,c4,c5" ]
[ "${lines[1]}" = "0,1,2,3,4,5" ]
[ "${lines[2]}" = "1,1,2,3,4,5" ]
}
@test "create a table with a name map" {
run dolt table import -c -pk=pk -m=name-map.json test name-map-data.csv
[ "$status" -eq 0 ]
run dolt sql -r csv -q 'select * from test'
[ "$status" -eq 0 ]
[ "${lines[0]}" = "pk,c1,c2,c3" ]
[ "${lines[1]}" = "0,1,2,3" ]
run dolt schema export test
[ "$status" -eq 0 ]
[[ "$output" =~ "PRIMARY KEY (\`pk\`)" ]] || false
}
@test "use a name map with missing and extra entries" {
cat <<JSON > partial-map.json
{
"one":"pk",
"ten":"c10"
}
JSON
run dolt table import -c -pk=pk -m=partial-map.json test name-map-data.csv
[ "$status" -eq 0 ]
run dolt schema export test
[ "$status" -eq 0 ]
[[ ! "$output" =~ "c10" ]] || false
[[ "${lines[1]}" =~ "pk" ]] || false
[[ "${lines[2]}" =~ "two" ]] || false
[[ "${lines[3]}" =~ "three" ]] || false
[[ "${lines[4]}" =~ "four" ]] || false
}
@test "create a table with a schema file" {
cat <<DELIM > sch-data.csv
pk,c1,c2,c3
0,1,2,3
DELIM
run dolt table import -c -s=name-map-sch.sql test sch-data.csv
[ "$status" -eq 0 ]
run dolt sql -r csv -q 'select * from test'
[ "$status" -eq 0 ]
[ "${lines[0]}" = "pk,c1,c2,c3" ]
[ "${lines[1]}" = "0,1,2,3" ]
run dolt schema export test
[ "$status" -eq 0 ]
[[ "$output" =~ "\`c1\` FLOAT" ]] || false
[[ "$output" =~ "\`c2\` FLOAT" ]] || false
[[ "$output" =~ "\`c3\` FLOAT" ]] || false
[[ "$output" =~ "PRIMARY KEY (\`pk\`)" ]] || false
}
@test "create a table with a name map and a schema file" {
run dolt table import -c -s=name-map-sch.sql -m=name-map.json test name-map-data.csv
[ "$status" -eq 0 ]
run dolt sql -r csv -q 'select * from test'
[ "$status" -eq 0 ]
[ "${lines[0]}" = "pk,c1,c2,c3" ]
[ "${lines[1]}" = "0,1,2,3" ]
run dolt schema export test
[ "$status" -eq 0 ]
[[ "$output" =~ "\`c1\` FLOAT" ]] || false
[[ "$output" =~ "\`c2\` FLOAT" ]] || false
[[ "$output" =~ "\`c3\` FLOAT" ]] || false
[[ "$output" =~ "PRIMARY KEY (\`pk\`)" ]] || false
}
@test "create a table from CSV with common column name patterns" {
run dolt table import -c --pk=UPPERCASE test `batshelper caps-column-names.csv`
[ "$status" -eq 0 ]
[[ "$output" =~ "Import completed successfully." ]] || false
run dolt sql -q "select * from test"
run dolt sql -r csv -q "select * from test"
[ "$status" -eq 0 ]
[[ "$output" =~ "UPPERCASE" ]] || false
}

View File

@@ -3,6 +3,31 @@ load $BATS_TEST_DIRNAME/helper/common.bash
setup() {
setup_common
cat <<DELIM > 1pk5col-ints.csv
pk,c1,c2,c3,c4,c5
0,1,2,3,4,5
1,1,2,3,4,5
DELIM
cat <<DELIM > 1pksupportedtypes.csv
pk, int, string, boolean, float, uint, uuid
0, 0, "asdf", TRUE, 0.0, 0, "00000000-0000-0000-0000-000000000000"
1, -1, "qwerty", FALSE, -1.0, 1, "00000000-0000-0000-0000-000000000001"
2, 1, "", TRUE, 0.0, 0, "123e4567-e89b-12d3-a456-426655440000"
DELIM
cat <<DELIM > abc.csv
pk,a,b,c
0, red, 1.1, true
1, blue, 2.2, false
DELIM
cat <<DELIM > abc-xyz.csv
pk,a,b,c,x,y,z
0, red, 1.1, true, green, 3.14, -1
1, blue, 2.2, false, yellow, 2.71, -2
DELIM
}
teardown() {
@@ -10,7 +35,7 @@ teardown() {
}
@test "schema import create" {
run dolt schema import -c --pks=pk test `batshelper 1pk5col-ints.csv`
run dolt schema import -c --pks=pk test 1pk5col-ints.csv
[ "$status" -eq 0 ]
[[ "$output" =~ "Created table successfully." ]] || false
run dolt ls
@@ -30,7 +55,7 @@ teardown() {
}
@test "schema import dry run" {
run dolt schema import --dry-run -c --pks=pk test `batshelper 1pk5col-ints.csv`
run dolt schema import --dry-run -c --pks=pk test 1pk5col-ints.csv
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 9 ]
[[ "${lines[0]}" =~ "test" ]] || false
@@ -47,7 +72,7 @@ teardown() {
}
@test "schema import with a bunch of types" {
run dolt schema import --dry-run -c --pks=pk test `batshelper 1pksupportedtypes.csv`
run dolt schema import --dry-run -c --pks=pk test 1pksupportedtypes.csv
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 10 ]
[[ "${lines[0]}" =~ "test" ]] || false
@@ -61,14 +86,16 @@ teardown() {
}
@test "schema import with an empty csv" {
run dolt schema import --dry-run -c --pks=pk test `batshelper bad.csv`
cat <<DELIM > empty.csv
DELIM
run dolt schema import --dry-run -c --pks=pk test empty.csv
[ "$status" -eq 1 ]
[[ "$output" =~ "Header line is empty" ]] || false
}
@test "schema import replace" {
dolt schema import -c --pks=pk test `batshelper 1pk5col-ints.csv`
run dolt schema import -r --pks=pk test `batshelper 1pksupportedtypes.csv`
dolt schema import -c --pks=pk test 1pk5col-ints.csv
run dolt schema import -r --pks=pk test 1pksupportedtypes.csv
[ "$status" -eq 0 ]
run dolt schema show
[ "$status" -eq 0 ]
@@ -84,25 +111,30 @@ teardown() {
}
@test "schema import with invalid names" {
run dolt schema import -c --pks=pk 123 `batshelper 1pk5col-ints.csv`
run dolt schema import -c --pks=pk 123 1pk5col-ints.csv
[ "$status" -eq 1 ]
[[ "$output" =~ "not a valid table name" ]] || false
run dolt schema import -c --pks=pk dolt_docs `batshelper 1pk5col-ints.csv`
run dolt schema import -c --pks=pk dolt_docs 1pk5col-ints.csv
[ "$status" -eq 1 ]
[[ "$output" =~ "not a valid table name" ]] || false
[[ "$output" =~ "reserved" ]] || false
run dolt schema import -c --pks=pk dolt_query_catalog `batshelper 1pk5col-ints.csv`
run dolt schema import -c --pks=pk dolt_query_catalog 1pk5col-ints.csv
[ "$status" -eq 1 ]
[[ "$output" =~ "not a valid table name" ]] || false
[[ "$output" =~ "reserved" ]] || false
run dolt schema import -c --pks=pk dolt_reserved `batshelper 1pk5col-ints.csv`
run dolt schema import -c --pks=pk dolt_reserved 1pk5col-ints.csv
[ "$status" -eq 1 ]
[[ "$output" =~ "not a valid table name" ]] || false
[[ "$output" =~ "reserved" ]] || false
}
@test "schema import with multiple primary keys" {
run dolt schema import -c --pks=pk1,pk2 test `batshelper 2pk5col-ints.csv`
cat <<DELIM > 2pk5col-ints.csv
pk1,pk2,c1,c2,c3,c4,c5
0,0,1,2,3,4,5
1,1,1,2,3,4,5
DELIM
run dolt schema import -c --pks=pk1,pk2 test 2pk5col-ints.csv
[ "$status" -eq 0 ]
[[ "$output" =~ "Created table successfully." ]] || false
dolt schema show
@@ -140,11 +172,17 @@ DELIM
}
@test "schema import --keep-types" {
run dolt schema import -c --keep-types --pks=pk test `batshelper 1pk5col-ints.csv`
cat <<DELIM > 1pk5col-strings.csv
pk,c1,c2,c3,c4,c5,c6
"0","foo","bar","baz","car","dog","tim"
"1","1","2","3","4","5","6"
DELIM
run dolt schema import -c --keep-types --pks=pk test 1pk5col-ints.csv
[ "$status" -eq 1 ]
[[ "$output" =~ "parameter keep-types not supported for create operations" ]] || false
dolt schema import -c --pks=pk test `batshelper 1pk5col-ints.csv`
run dolt schema import -r --keep-types --pks=pk test `batshelper 1pk5col-strings.csv`
dolt schema import -c --pks=pk test 1pk5col-ints.csv
run dolt schema import -r --keep-types --pks=pk test 1pk5col-strings.csv
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 11 ]
[[ "${lines[0]}" =~ "test" ]] || false
@@ -207,6 +245,85 @@ DELIM
}
@test "schema import of two tables" {
dolt schema import -c --pks=pk test1 `batshelper 1pksupportedtypes.csv`
dolt schema import -c --pks=pk test2 `batshelper 1pk5col-ints.csv`
dolt schema import -c --pks=pk test1 1pksupportedtypes.csv
dolt schema import -c --pks=pk test2 1pk5col-ints.csv
}
@test "schema import --update adds new columns" {
dolt table import -c -pk=pk test abc.csv
dolt add test
dolt commit -m "added table"
run dolt schema import -pks=pk -u test abc-xyz.csv
[ "$status" -eq 0 ]
run dolt diff --schema
[ "$status" -eq 0 ]
[[ "$output" =~ "+ \`x\` LONGTEXT" ]] || false
[[ "$output" =~ "+ \`y\` FLOAT" ]] || false
[[ "$output" =~ "+ \`z\` INT" ]] || false
# assert no columns were deleted/replaced
[[ ! "$output" = "- \`" ]] || false
run dolt sql -r csv -q 'select * from test'
[ "$status" -eq 0 ]
[[ "$output" =~ "pk,a,b,c,x,y,z" ]] || false
skip "schema import --update is currently deleting table data"
[[ "$output" =~ "0,red,1.1,true,,," ]] || false
[[ "$output" =~ "1,blue,2.2,false,,," ]] || false
}
@test "schema import --replace adds new columns" {
dolt table import -c -pk=pk test abc.csv
dolt add test
dolt commit -m "added table"
run dolt schema import -pks=pk -r test abc-xyz.csv
[ "$status" -eq 0 ]
run dolt diff --schema
[ "$status" -eq 0 ]
[[ "$output" =~ "+ \`x\` LONGTEXT" ]] || false
[[ "$output" =~ "+ \`y\` FLOAT" ]] || false
[[ "$output" =~ "+ \`z\` INT" ]] || false
# assert no columns were deleted/replaced
[[ ! "$output" = "- \`" ]] || false
run dolt sql -r csv -q 'select count(*) from test'
[ "$status" -eq 0 ]
[[ "$output" =~ "COUNT(*)" ]] || false
[[ "$output" =~ "0" ]] || false
}
@test "schema import --replace drops missing columns" {
cat <<DELIM > xyz.csv
pk,x,y,z
0,green,3.14,-1
1,yellow,2.71,-2
DELIM
dolt table import -c -pk=pk test abc-xyz.csv
dolt add test
dolt commit -m "added test"
run dolt schema import -pks=pk -r test xyz.csv
[ "$status" -eq 0 ]
run dolt diff --schema
[ "$status" -eq 0 ]
[[ "$output" =~ "- \`a\`" ]] || false
[[ "$output" =~ "- \`b\`" ]] || false
[[ "$output" =~ "- \`c\`" ]] || false
# assert no columns were added
[[ ! "$output" = "+ \`" ]] || false
}
@test "schema import with name map" {
cat <<JSON > name-map.json
{
"a":"aa",
"b":"bb",
"c":"cc"
}
JSON
run dolt schema import -c -pks=pk -m=name-map.json test abc.csv
[ "$status" -eq 0 ]
[[ "$output" =~ "\`pk\` INT" ]] || false
[[ "$output" =~ "\`aa\`" ]] || false
[[ "$output" =~ "\`bb\`" ]] || false
[[ "$output" =~ "\`cc\`" ]] || false
[[ ! "$output" =~ "\`a\`" ]] || false
[[ ! "$output" =~ "\`b\`" ]] || false
[[ ! "$output" =~ "\`c\`" ]] || false
}

View File

@@ -3,6 +3,37 @@ load $BATS_TEST_DIRNAME/helper/common.bash
setup() {
setup_common
cat <<SQL > 1pk5col-ints-sch.sql
CREATE TABLE test (
pk BIGINT NOT NULL COMMENT 'tag:0',
c1 BIGINT COMMENT 'tag:1',
c2 BIGINT COMMENT 'tag:2',
c3 BIGINT COMMENT 'tag:3',
c4 BIGINT COMMENT 'tag:4',
c5 BIGINT COMMENT 'tag:5',
PRIMARY KEY (pk)
);
SQL
cat <<DELIM > 1pk5col-ints.csv
pk,c1,c2,c3,c4,c5
0,1,2,3,4,5
1,1,2,3,4,5
DELIM
cat <<SQL > employees-sch.sql
CREATE TABLE employees (
\`id\` LONGTEXT NOT NULL COMMENT 'tag:0',
\`first name\` LONGTEXT COMMENT 'tag:1',
\`last name\` LONGTEXT COMMENT 'tag:2',
\`title\` LONGTEXT COMMENT 'tag:3',
\`start date\` LONGTEXT COMMENT 'tag:4',
\`end date\` LONGTEXT COMMENT 'tag:5',
PRIMARY KEY (id)
);
SQL
}
teardown() {
@@ -10,17 +41,7 @@ teardown() {
}
@test "update table using csv" {
dolt sql <<SQL
CREATE TABLE test (
pk BIGINT NOT NULL COMMENT 'tag:0',
c1 BIGINT COMMENT 'tag:1',
c2 BIGINT COMMENT 'tag:2',
c3 BIGINT COMMENT 'tag:3',
c4 BIGINT COMMENT 'tag:4',
c5 BIGINT COMMENT 'tag:5',
PRIMARY KEY (pk)
);
SQL
dolt sql < 1pk5col-ints-sch.sql
run dolt table import -u test `batshelper 1pk5col-ints.csv`
[ "$status" -eq 0 ]
[[ "$output" =~ "Rows Processed: 2, Additions: 2, Modifications: 0, Had No Effect: 0" ]] || false
@@ -28,17 +49,7 @@ SQL
}
@test "update table using schema with csv" {
dolt sql <<SQL
CREATE TABLE test (
pk BIGINT NOT NULL COMMENT 'tag:0',
c1 BIGINT COMMENT 'tag:1',
c2 BIGINT COMMENT 'tag:2',
c3 BIGINT COMMENT 'tag:3',
c4 BIGINT COMMENT 'tag:4',
c5 BIGINT COMMENT 'tag:5',
PRIMARY KEY (pk)
);
SQL
dolt sql < 1pk5col-ints-sch.sql
run dolt table import -u -s `batshelper 1pk5col-ints-schema.json` test `batshelper 1pk5col-ints.csv`
[ "$status" -eq 1 ]
[[ "$output" =~ "fatal: schema is not supported for update or replace operations" ]] || false
@@ -61,17 +72,7 @@ SQL
}
@test "update table using json" {
dolt sql <<SQL
CREATE TABLE employees (
\`id\` LONGTEXT NOT NULL COMMENT 'tag:0',
\`first name\` LONGTEXT COMMENT 'tag:1',
\`last name\` LONGTEXT COMMENT 'tag:2',
\`title\` LONGTEXT COMMENT 'tag:3',
\`start date\` LONGTEXT COMMENT 'tag:4',
\`end date\` LONGTEXT COMMENT 'tag:5',
PRIMARY KEY (id)
);
SQL
dolt sql < employees-sch.sql
run dolt table import -u employees `batshelper employees-tbl.json`
[ "$status" -eq 0 ]
[[ "$output" =~ "Rows Processed: 3, Additions: 3, Modifications: 0, Had No Effect: 0" ]] || false
@@ -96,24 +97,14 @@ SQL
}
@test "update table using schema with json" {
dolt sql <<SQL
CREATE TABLE employees (
\`idz\` LONGTEXT NOT NULL COMMENT 'tag:0',
\`first namez\` LONGTEXT COMMENT 'tag:1',
\`last namez\` LONGTEXT COMMENT 'tag:2',
\`titlez\` LONGTEXT COMMENT 'tag:3',
\`start datez\` LONGTEXT COMMENT 'tag:4',
\`end datez\` LONGTEXT COMMENT 'tag:5',
PRIMARY KEY (idz)
);
SQL
run dolt table import -u -s `batshelper employees-sch.json` employees `batshelper employees-tbl.json`
dolt sql < employees-sch.sql
run dolt table import -u -s employees-sch.sql employees `batshelper employees-tbl.json`
[ "$status" -eq 1 ]
[[ "$output" =~ "fatal: schema is not supported for update or replace operations" ]] || false
}
@test "update table with existing imported data with different schema" {
run dolt table import -c -s `batshelper employees-sch.sql` employees `batshelper employees-tbl.json`
run dolt table import -c -s employees-sch.sql employees `batshelper employees-tbl.json`
[ "$status" -eq 0 ]
[[ "$output" =~ "Import completed successfully." ]] || false
run dolt table import -u employees `batshelper employees-tbl-schema-wrong.json`
@@ -127,7 +118,7 @@ SQL
[[ "$output" =~ "The following table could not be found:" ]] || false
}
@test "replace table with a json with columns in different order" {
@test "update table with a json with columns in different order" {
dolt sql <<SQL
CREATE TABLE employees (
\`id\` LONGTEXT NOT NULL COMMENT 'tag:0',
@@ -144,9 +135,17 @@ SQL
[ "$status" -eq 0 ]
[[ "$output" =~ "Rows Processed: 3, Additions: 3, Modifications: 0, Had No Effect: 0" ]] || false
[[ "$output" =~ "Import completed successfully." ]] || false
run dolt schema export employees
[[ "$status" -eq 0 ]]
[[ "${lines[1]}" =~ "id" ]] || false
[[ "${lines[2]}" =~ "first name" ]] || false
[[ "${lines[3]}" =~ "last name" ]] || false
[[ "${lines[4]}" =~ "title" ]] || false
[[ "${lines[5]}" =~ "start date" ]] || false
[[ "${lines[6]}" =~ "end date" ]] || false
}
@test "replace table with a csv with columns in different order" {
@test "update table with a csv with columns in different order" {
dolt sql <<SQL
CREATE TABLE employees (
\`id\` LONGTEXT NOT NULL COMMENT 'tag:0',
@@ -159,8 +158,15 @@ CREATE TABLE employees (
);
SQL
run dolt table import -u employees `batshelper employees-tbl-schema-unordered.csv`
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" =~ "Rows Processed: 3, Additions: 3, Modifications: 0, Had No Effect: 0" ]] || false
[[ "$output" =~ "Import completed successfully." ]] || false
run dolt schema export employees
[[ "$status" -eq 0 ]]
[[ "${lines[1]}" =~ "id" ]] || false
[[ "${lines[2]}" =~ "first name" ]] || false
[[ "${lines[3]}" =~ "last name" ]] || false
[[ "${lines[4]}" =~ "title" ]] || false
[[ "${lines[5]}" =~ "start date" ]] || false
[[ "${lines[6]}" =~ "end date" ]] || false
}

View File

@@ -153,7 +153,7 @@ func (cmd ImportCmd) createArgParser() *argparser.ArgParser {
ap.SupportsFlag(keepTypesParam, "", "When a column already exists in the table, and it's also in the {{.LessThan}}file{{.GreaterThan}} provided, use the type from the table.")
ap.SupportsString(fileTypeParam, "", "type", "Explicitly define the type of the file if it can't be inferred from the file extension.")
ap.SupportsString(pksParam, "", "comma-separated-col-names", "List of columns used as the primary key cols. Order of the columns will determine sort order.")
ap.SupportsString(mappingParam, "", "mapping-file", "A file that can map a column name in {{.LessThan}}file{{.GreaterThan}} to a new value.")
ap.SupportsString(mappingParam, "m", "mapping-file", "A file that can map a column name in {{.LessThan}}file{{.GreaterThan}} to a new value.")
ap.SupportsString(floatThresholdParam, "", "float", "Minimum value at which the fractional component of a value must exceed in order to be considered a float.")
ap.SupportsString(delimParam, "", "delimiter", "Specify a delimiter for a csv style file with a non-comma delimiter.")
return ap

View File

@@ -220,7 +220,7 @@ func NameMapperFromFile(mappingFile string, FS filesys.ReadableFS) (NameMapper,
err := filesys.UnmarshalJSONFile(FS, mappingFile, &nm)
if err != nil {
return nil, ErrMappingFileRead
return nil, errhand.BuildDError(ErrMappingFileRead.Error()).AddCause(err).Build()
}
return nm, nil