Merge pull request #986 from cmasone-attic/csv_refactor

Change csv.Write() signature to broaden uses
This commit is contained in:
cmasone-attic
2016-02-10 12:40:48 -08:00
4 changed files with 26 additions and 26 deletions

View File

@@ -5,7 +5,7 @@ import (
"unicode/utf8"
)
// Returns the rune contained in delimiter or an error.
// StringToRune returns the rune contained in delimiter or an error.
func StringToRune(delimiter string) (rune, error) {
dlimLen := len(delimiter)
if dlimLen == 0 {

1
clients/csv/exporter/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
exporter

View File

@@ -44,7 +44,8 @@ func main() {
}
err = d.Try(func() {
csv.Write(ds, comma, *p, os.Stdout)
nomsList, structDesc := csv.ValueToListAndElemDesc(ds.Head().Value(), ds.Store())
csv.Write(nomsList, structDesc, comma, *p, os.Stdout)
})
if err != nil {
fmt.Println("Failed to export dataset as CSV:")

View File

@@ -5,22 +5,13 @@ import (
"fmt"
"io"
"github.com/attic-labs/noms/chunks"
"github.com/attic-labs/noms/d"
"github.com/attic-labs/noms/dataset"
"github.com/attic-labs/noms/types"
)
func getFieldNamesFromStruct(structDesc types.StructDesc) (fieldNames []string) {
for _, f := range structDesc.Fields {
d.Exp.Equal(true, types.IsPrimitiveKind(f.T.Desc.Kind()),
"Non-primitive CSV export not supported:", f.T.Desc.Describe())
fieldNames = append(fieldNames, f.Name)
}
return
}
func datasetToHeaderAndList(ds *dataset.Dataset) (fieldNames []string, nomsList types.List) {
v := ds.Head().Value()
// ValueToListAndElemDesc ensures that v is a types.List of structs, pulls the types.StructDesc that describes the elements of v out of cs, and returns the List and related StructDesc.
func ValueToListAndElemDesc(v types.Value, cs chunks.ChunkSource) (types.List, types.StructDesc) {
d.Exp.Equal(types.ListKind, v.Type().Desc.Kind(),
"Dataset must be List<>, found: %s", v.Type().Desc.Describe())
@@ -28,29 +19,27 @@ func datasetToHeaderAndList(ds *dataset.Dataset) (fieldNames []string, nomsList
d.Exp.Equal(types.UnresolvedKind, u.Desc.Kind(),
"List<> must be UnresolvedKind, found: %s", u.Desc.Describe())
pkg := types.ReadPackage(u.PackageRef(), ds.Store())
pkg := types.ReadPackage(u.PackageRef(), cs)
d.Exp.Equal(types.PackageKind, pkg.Type().Desc.Kind(),
"Failed to read package: %s", pkg.Type().Desc.Describe())
structDesc := pkg.Types()[u.Ordinal()].Desc
d.Exp.Equal(types.StructKind, structDesc.Kind(),
"Did not find Struct: %s", structDesc.Describe())
fieldNames = getFieldNamesFromStruct(structDesc.(types.StructDesc))
nomsList = v.(types.List)
return
desc := pkg.Types()[u.Ordinal()].Desc
d.Exp.Equal(types.StructKind, desc.Kind(), "Did not find Struct: %s", desc.Describe())
return v.(types.List), desc.(types.StructDesc)
}
func Write(ds *dataset.Dataset, comma rune, concurrency int, output io.Writer) {
fieldNames, nomsList := datasetToHeaderAndList(ds)
// Write takes a types.List l of structs (described by sd) and writes it to output as comma-delineated values.
func Write(l types.List, sd types.StructDesc, comma rune, concurrency int, output io.Writer) {
d.Exp.Equal(types.StructKind, sd.Kind(), "Did not find Struct: %s", sd.Describe())
fieldNames := getFieldNamesFromStruct(sd)
csvWriter := csv.NewWriter(output)
csvWriter.Comma = comma
records := make([][]string, nomsList.Len()+1)
records := make([][]string, l.Len()+1)
records[0] = fieldNames // Write header
nomsList.IterAllP(concurrency, func(v types.Value, index uint64) {
l.IterAllP(concurrency, func(v types.Value, index uint64) {
for _, f := range fieldNames {
records[index+1] = append(
records[index+1],
@@ -63,3 +52,12 @@ func Write(ds *dataset.Dataset, comma rune, concurrency int, output io.Writer) {
err := csvWriter.Error()
d.Exp.Equal(nil, err, "error flushing csv:", err)
}
func getFieldNamesFromStruct(structDesc types.StructDesc) (fieldNames []string) {
for _, f := range structDesc.Fields {
d.Exp.Equal(true, types.IsPrimitiveKind(f.T.Desc.Kind()),
"Non-primitive CSV export not supported:", f.T.Desc.Describe())
fieldNames = append(fieldNames, f.Name)
}
return
}