mirror of
https://github.com/dolthub/dolt.git
synced 2026-05-05 11:21:58 -05:00
@@ -0,0 +1,296 @@
|
||||
// Copyright 2016 Attic Labs, Inc. All rights reserved.
|
||||
// Licensed under the Apache License, version 2.0:
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package diff
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/attic-labs/noms/go/d"
|
||||
"github.com/attic-labs/noms/go/types"
|
||||
)
|
||||
|
||||
// Apply applies a Patch (list of diffs) to a graph. It fulfills the
|
||||
// following contract:
|
||||
// Given 2 Noms graphs: a1 and a2:
|
||||
// ApplyPatch(a1, Diff(a1, a2)) == a2
|
||||
// This is useful for IncrementalUpdate() and possibly other problems. See
|
||||
// updater.go for more information.
|
||||
//
|
||||
// This function uses a patchStack to maintain state of the graph as it cycles
|
||||
// through the diffs in a patch, applying them to 'root' one by one. Because the
|
||||
// Difference objects in the patch can be sorted according to their path, each
|
||||
// one is applied in order. When done in combination with the stack, this enables
|
||||
// all Differences that change a particular node to be applied to that node
|
||||
// before it gets assigned back to it's parent.
|
||||
func Apply(root types.Value, patch Patch) types.Value {
|
||||
var lastPath types.Path
|
||||
stack := patchStack{}
|
||||
sort.Sort(patch)
|
||||
|
||||
// Push the element on the stack that corresponds to the root
|
||||
// node.
|
||||
stack.push(nil, nil, types.DiffChangeModified, root, nil, nil)
|
||||
|
||||
for _, dif := range patch {
|
||||
// get the path where this dif needs to be applied
|
||||
p := dif.Path
|
||||
|
||||
// idx will hold the index of the last common element between p and
|
||||
// lastPath (p from the last iteration).
|
||||
var idx int
|
||||
|
||||
// p can be identical to lastPath in certain cases. For example, when
|
||||
// one item gets removed from a list at the same place another item
|
||||
// is added to it. In this case, we need pop the last operation of the
|
||||
// stack early and set the idx to be the len(p) - 1.
|
||||
// Otherwise, if the paths are different we can call commonPrefixCount()
|
||||
if len(p) > 0 && p.Equals(lastPath) {
|
||||
stack.pop()
|
||||
idx = len(p) - 1
|
||||
} else {
|
||||
idx = commonPrefixCount(lastPath, p)
|
||||
}
|
||||
lastPath = p
|
||||
|
||||
// if the stack has elements on it leftover from the last iteration. Pop
|
||||
// those elements until the stack only has values in it that are
|
||||
// referenced by this p. Popping an element on the stack, folds that
|
||||
// value into it's parent.
|
||||
for idx < stack.Len()-1 {
|
||||
stack.pop()
|
||||
}
|
||||
|
||||
// tail is the part of the current path that has not yet been pushed
|
||||
// onto the stack. Iterate over those pathParts and push those values
|
||||
// onto the stack.
|
||||
tail := p[idx:]
|
||||
for i, pp := range tail {
|
||||
top := stack.top()
|
||||
parent := top.newestValue()
|
||||
oldValue := pp.Resolve(parent)
|
||||
var newValue types.Value
|
||||
if i == len(tail)-1 { // last pathPart in this path
|
||||
newValue = oldValue
|
||||
oldValue = dif.OldValue
|
||||
}
|
||||
// Any intermediate elements on the stack will have a changeType
|
||||
// of modified. Leaf elements will be updated below to reflect the
|
||||
// actual changeType.
|
||||
stack.push(p, pp, types.DiffChangeModified, oldValue, newValue, dif.NewKeyValue)
|
||||
}
|
||||
|
||||
// Update the top element in the stack with changeType from the dif and
|
||||
// the NewValue from the diff
|
||||
se := stack.top()
|
||||
se.newValue = dif.NewValue
|
||||
se.changeType = dif.ChangeType
|
||||
}
|
||||
|
||||
// We're done applying diffs to the graph. Pop any elements left on the
|
||||
// stack and return the new root.
|
||||
var newRoot stackElem
|
||||
for stack.Len() > 0 {
|
||||
newRoot = stack.pop()
|
||||
}
|
||||
return newRoot.newValue
|
||||
}
|
||||
|
||||
// updateNode handles the actual update of a node. It uses 'pp' to get the
|
||||
// information that it needs to update 'parent' with 'newVal'. 'oldVal' is also
|
||||
// passed in so that Sets can be updated correctly. This function is used by
|
||||
// the patchStack Pop() function to merge values into a new graph.
|
||||
func (stack *patchStack) updateNode(top *stackElem, parent types.Value) types.Value {
|
||||
d.PanicIfTrue(parent == nil)
|
||||
switch part := top.pathPart.(type) {
|
||||
case types.FieldPath:
|
||||
switch top.changeType {
|
||||
case types.DiffChangeAdded:
|
||||
return parent.(types.Struct).Set(part.Name, top.newValue)
|
||||
case types.DiffChangeRemoved:
|
||||
return parent.(types.Struct).Delete(part.Name)
|
||||
case types.DiffChangeModified:
|
||||
return parent.(types.Struct).Set(part.Name, top.newValue)
|
||||
}
|
||||
case types.IndexPath:
|
||||
switch el := parent.(type) {
|
||||
case types.List:
|
||||
idx := uint64(part.Index.(types.Number))
|
||||
offset := stack.adjustIndexOffset(top.path, top.changeType)
|
||||
realIdx := idx + uint64(offset)
|
||||
var nv types.Value
|
||||
switch top.changeType {
|
||||
case types.DiffChangeAdded:
|
||||
if realIdx > el.Len() {
|
||||
nv = el.Append(top.newValue)
|
||||
} else {
|
||||
nv = el.Insert(realIdx, top.newValue)
|
||||
}
|
||||
case types.DiffChangeRemoved:
|
||||
nv = el.RemoveAt(realIdx)
|
||||
case types.DiffChangeModified:
|
||||
nv = el.Set(realIdx, top.newValue)
|
||||
}
|
||||
return nv
|
||||
case types.Map:
|
||||
switch top.changeType {
|
||||
case types.DiffChangeAdded:
|
||||
return el.Set(part.Index, top.newValue)
|
||||
case types.DiffChangeRemoved:
|
||||
return el.Remove(part.Index)
|
||||
case types.DiffChangeModified:
|
||||
if part.IntoKey {
|
||||
newPart := types.IndexPath{Index: part.Index}
|
||||
ov := newPart.Resolve(parent)
|
||||
return el.Remove(part.Index).Set(top.newValue, ov)
|
||||
}
|
||||
return el.Set(part.Index, top.newValue)
|
||||
}
|
||||
case types.Set:
|
||||
if top.oldValue != nil {
|
||||
el = el.Remove(top.oldValue)
|
||||
}
|
||||
if top.newValue != nil {
|
||||
el = el.Insert(top.newValue)
|
||||
}
|
||||
return el
|
||||
}
|
||||
case types.HashIndexPath:
|
||||
switch el := parent.(type) {
|
||||
case types.Set:
|
||||
switch top.changeType {
|
||||
case types.DiffChangeAdded:
|
||||
return el.Insert(top.newValue)
|
||||
case types.DiffChangeRemoved:
|
||||
return el.Remove(top.oldValue)
|
||||
case types.DiffChangeModified:
|
||||
return el.Remove(top.oldValue).Insert(top.newValue)
|
||||
}
|
||||
case types.Map:
|
||||
keyPart := types.HashIndexPath{Hash: part.Hash, IntoKey: true}
|
||||
k := keyPart.Resolve(parent)
|
||||
switch top.changeType {
|
||||
case types.DiffChangeAdded:
|
||||
k := top.newKeyValue
|
||||
return el.Set(k, top.newValue)
|
||||
case types.DiffChangeRemoved:
|
||||
return el.Remove(k)
|
||||
case types.DiffChangeModified:
|
||||
if part.IntoKey {
|
||||
v := el.Get(k)
|
||||
return el.Remove(k).Set(top.newValue, v)
|
||||
}
|
||||
return el.Set(k, top.newValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
panic(fmt.Sprintf("unreachable, pp.(type): %T", top.pathPart))
|
||||
}
|
||||
|
||||
// Returns the count of the number of PathParts that two paths have in a common
|
||||
// prefix. The paths '.field1' and '.field2' have a 0 length common prefix.
|
||||
// Todo: move to types.Path?
|
||||
func commonPrefixCount(p1, p2 types.Path) int {
|
||||
cnt := 0
|
||||
|
||||
for i, pp1 := range p1 {
|
||||
var pp2 types.PathPart
|
||||
if i < len(p2) {
|
||||
pp2 = p2[i]
|
||||
}
|
||||
if pp1 != pp2 {
|
||||
return cnt
|
||||
}
|
||||
cnt += 1
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
type stackElem struct {
|
||||
path types.Path
|
||||
pathPart types.PathPart // from parent Value to this Value
|
||||
changeType types.DiffChangeType
|
||||
oldValue types.Value // can be nil if newValue is not nil
|
||||
newValue types.Value // can be nil if oldValue is not nil
|
||||
newKeyValue types.Value
|
||||
}
|
||||
|
||||
// newestValue returns newValue if not nil, otherwise oldValue. This is useful
|
||||
// when merging. Elements on the stack were 'push'ed there with the oldValue.
|
||||
// newValue may have been set when a value was 'pop'ed above it. This method
|
||||
// returns the last value that has been set.
|
||||
func (se stackElem) newestValue() types.Value {
|
||||
if se.newValue != nil {
|
||||
return se.newValue
|
||||
}
|
||||
return se.oldValue
|
||||
}
|
||||
type patchStack struct {
|
||||
vals []stackElem
|
||||
lastPath types.Path
|
||||
addCnt int
|
||||
rmCnt int
|
||||
}
|
||||
|
||||
func (stack *patchStack) push(p types.Path, pp types.PathPart, changeType types.DiffChangeType, oldValue, newValue, newKeyValue types.Value) {
|
||||
stack.vals = append(stack.vals, stackElem{path: p, pathPart: pp, changeType: changeType, oldValue: oldValue, newValue: newValue, newKeyValue: newKeyValue})
|
||||
}
|
||||
|
||||
func (stack *patchStack) top() *stackElem {
|
||||
return &stack.vals[len(stack.vals)-1]
|
||||
}
|
||||
|
||||
// pop applies the change to the graph. When an element is 'pop'ed from the stack,
|
||||
// this function uses the pathPart to merge that value into it's parent.
|
||||
func (stack *patchStack) pop() stackElem {
|
||||
top := stack.top()
|
||||
stack.vals = stack.vals[:len(stack.vals)-1]
|
||||
if stack.Len() > 0 {
|
||||
newTop := stack.top()
|
||||
parent := newTop.newestValue()
|
||||
newTop.newValue = stack.updateNode(top, parent)
|
||||
}
|
||||
return *top
|
||||
}
|
||||
|
||||
func (stack *patchStack) Len() int {
|
||||
return len(stack.vals)
|
||||
}
|
||||
|
||||
// adjustIndexOffset returns an offset that needs to be added to list indexes
|
||||
// when applying diffs to lists. Diffs are applied to lists beginning at the 0th
|
||||
// element. Changes to the list mean that subsequent changes to the same list
|
||||
// have to be adjusted accordingly. The stack keeps state for each list as it's
|
||||
// processed so updateNode() can get the correct index.
|
||||
// Whenever a list is encountered, diffs consist of add & remove operations. The
|
||||
// offset is calculated by keeping a count of each add & remove. Due to the way
|
||||
// way diffs are calculated, no offset is ever needed for 'add' operations. The
|
||||
// offset for 'remove' operations are calculated as:
|
||||
// stack.addCnt - stack.rmCnt
|
||||
func (stack *patchStack) adjustIndexOffset(p types.Path, changeType types.DiffChangeType) (res int) {
|
||||
parentPath := p[:len(p)-1]
|
||||
|
||||
// parentPath is different than the last parentPath so reset counters
|
||||
if stack.lastPath == nil || !stack.lastPath.Equals(parentPath) {
|
||||
stack.lastPath = parentPath
|
||||
stack.addCnt = 0
|
||||
stack.rmCnt = 0
|
||||
}
|
||||
|
||||
// offset for 'Add' operations are always 0, 'Remove' ops offset are
|
||||
// calculated here
|
||||
if changeType == types.DiffChangeRemoved {
|
||||
res = stack.addCnt - stack.rmCnt
|
||||
}
|
||||
|
||||
// Bump up the appropriate cnt for this operation.
|
||||
switch changeType {
|
||||
case types.DiffChangeAdded:
|
||||
stack.addCnt += 1
|
||||
case types.DiffChangeRemoved:
|
||||
stack.rmCnt += 1
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -0,0 +1,351 @@
|
||||
// Copyright 2016 Attic Labs, Inc. All rights reserved.
|
||||
// Licensed under the Apache License, version 2.0:
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package diff
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/attic-labs/noms/go/d"
|
||||
"github.com/attic-labs/noms/go/marshal"
|
||||
"github.com/attic-labs/noms/go/types"
|
||||
"github.com/attic-labs/testify/assert"
|
||||
)
|
||||
|
||||
func TestCommonPrefixCount(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
testCases := [][]interface{}{
|
||||
{".value[#94a2oa20oka0jdv5lha03vuvvumul1vb].sizes[#316j9oc39b09fbc2qf3klenm6p1o1d7h]", 0},
|
||||
{".value[#94a2oa20oka0jdv5lha03vuvvumul1vb].sizes[#77eavttned7llu1pkvhaei9a9qgcagir]", 3},
|
||||
{".value[#94a2oa20oka0jdv5lha03vuvvumul1vb].sizes[#hboaq9581drq4g9jf62d3s06al3us49s]", 3},
|
||||
{".value[#94a2oa20oka0jdv5lha03vuvvumul1vb].sizes[#l0hpa7sbr7qutrcfn5173kar4j2847m1]", 3},
|
||||
{".value[#9vj5m3049mav94bttcujhgfdfqcavsbn].sizes[#33f6tb4h8agh57s2bqlmi9vbhlkbtmct]", 1},
|
||||
{".value[#9vj5m3049mav94bttcujhgfdfqcavsbn].sizes[#a43ne9a8kotcqph4up5pqqdmr1e1qcsl]", 3},
|
||||
{".value[#9vj5m3049mav94bttcujhgfdfqcavsbn].sizes[#ppqg6pem2sb64h2i2ptnh8ckj8gogj9h]", 3},
|
||||
{".value[#9vj5m3049mav94bttcujhgfdfqcavsbn].sizes[#s7r2vpnqlk20sd72mg8ijerg9cmauaqo]", 3},
|
||||
{".value[#bpspmmlc41pk0r144a7682oah0tmge1e].sizes[#9vuc1gg3c3eude5v3j5deqopjsobe3no]", 1},
|
||||
{".value[#bpspmmlc41pk0r144a7682oah0tmge1e].sizes[#qo3gfdsf14v3dh0oer82vn1bg4o8nlsc]", 3},
|
||||
{".value[#bpspmmlc41pk0r144a7682oah0tmge1e].sizes[#rlidki5ipbjdofsm2rq3a66v908m5fpl]", 3},
|
||||
{".value[#bpspmmlc41pk0r144a7682oah0tmge1e].sizes[#st1n96rh89c2vgo090dt9lknd5ip4kck]", 3},
|
||||
{".value[#hjh5hpn55591k0gjvgckc14erli968ao].sizes[#267889uv3mtih6fij3fhio2jiqtl6nho]", 1},
|
||||
{".value[#hjh5hpn55591k0gjvgckc14erli968ao].sizes[#7ncb7guoip9e400bm2lcvr0dda29o9jn]", 3},
|
||||
{".value[#hjh5hpn55591k0gjvgckc14erli968ao].sizes[#afscb0on7rt8bq6eutup8juusmid7i96]", 3},
|
||||
{".value[#hjh5hpn55591k0gjvgckc14erli968ao].sizes[#drqe4lr0vdfdtmvejsjun1l3mfv6ums5]", 3},
|
||||
}
|
||||
|
||||
var lastPath types.Path
|
||||
|
||||
for i, tc := range testCases {
|
||||
path, expected := tc[0].(string), tc[1].(int)
|
||||
p, err := types.ParsePath(path)
|
||||
assert.NoError(err)
|
||||
assert.Equal(expected, commonPrefixCount(lastPath, p), "failed for paths[%d]: %s", i, path)
|
||||
lastPath = p
|
||||
}
|
||||
}
|
||||
|
||||
type testFunc func(parent types.Value) types.Value
|
||||
type testKey struct {
|
||||
X, Y int
|
||||
}
|
||||
|
||||
var (
|
||||
vm map[string]types.Value
|
||||
)
|
||||
|
||||
func vfk(keys ...string) []types.Value {
|
||||
var values []types.Value
|
||||
for _, k := range keys {
|
||||
values = append(values, vm[k])
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
func testValues() map[string]types.Value {
|
||||
if vm == nil {
|
||||
vm = map[string]types.Value{
|
||||
"k1": types.String("k1"),
|
||||
"k2": types.String("k2"),
|
||||
"k3": types.String("k3"),
|
||||
"s1": types.String("string1"),
|
||||
"s2": types.String("string2"),
|
||||
"s3": types.String("string3"),
|
||||
"s4": types.String("string4"),
|
||||
"n1": types.Number(1),
|
||||
"n2": types.Number(2),
|
||||
"n3": types.Number(3.3),
|
||||
"n4": types.Number(4.4),
|
||||
"b1": mustMarshal(true),
|
||||
"b2": mustMarshal(false),
|
||||
"l1": mustMarshal([]string{}),
|
||||
"l2": mustMarshal([]string{"one", "two", "three", "four"}),
|
||||
"l3": mustMarshal([]string{"two", "three", "four", "five"}),
|
||||
"l4": mustMarshal([]string{"two", "three", "four"}),
|
||||
"l5": mustMarshal([]string{"one", "two", "three", "four", "five"}),
|
||||
"l6": mustMarshal([]string{"one", "four"}),
|
||||
"struct1": types.NewStruct("test1", types.StructData{"f1": types.Number(1), "f2": types.Number(2)}),
|
||||
"struct2": types.NewStruct("test1", types.StructData{"f1": types.Number(11111), "f2": types.Number(2)}),
|
||||
"struct3": types.NewStruct("test1", types.StructData{"f1": types.Number(1), "f2": types.Number(2), "f3": types.Number(3)}),
|
||||
"struct4": types.NewStruct("test1", types.StructData{"f2": types.Number(2)}),
|
||||
"m1": mustMarshal(map[string]int{}),
|
||||
"m2": mustMarshal(map[string]int{"k1": 1, "k2": 2, "k3": 3}),
|
||||
"m3": mustMarshal(map[string]int{"k2": 2, "k3": 3, "k4": 4}),
|
||||
"m4": mustMarshal(map[string]int{"k1": 1, "k3": 3}),
|
||||
"m5": mustMarshal(map[string]int{"k1": 1, "k2": 2222, "k3": 3}),
|
||||
"ms1": mustMarshal(map[testKey]int{{1, 1}: 1, {2, 2}: 2, {3, 3}: 3}),
|
||||
"ms2": mustMarshal(map[testKey]int{{1, 1}: 1, {4, 4}: 4, {5, 5}: 5}),
|
||||
}
|
||||
|
||||
vm["mh1"] = types.NewMap(vfk("k1", "struct1", "k2", "l1")...)
|
||||
vm["mh2"] = types.NewMap(vfk("k1", "n1", "k2", "l2", "k3", "l3")...)
|
||||
vm["set1"] = types.NewSet()
|
||||
vm["set2"] = types.NewSet(vfk("s1", "s2")...)
|
||||
vm["set3"] = types.NewSet(vfk("s1", "s2", "s3")...)
|
||||
vm["set1"] = types.NewSet(vfk("s2")...)
|
||||
vm["seth1"] = types.NewSet(vfk("struct1", "struct2", "struct3")...)
|
||||
vm["seth2"] = types.NewSet(vfk("struct2", "struct3")...)
|
||||
vm["setj3"] = types.NewSet(vfk("struct1")...)
|
||||
vm["mk1"] = types.NewMap(vfk("struct1", "s1", "struct2", "s2")...)
|
||||
vm["mk2"] = types.NewMap(vfk("struct1", "s3", "struct4", "s4")...)
|
||||
}
|
||||
return vm
|
||||
}
|
||||
|
||||
func getPatch(g1, g2 types.Value) Patch {
|
||||
dChan := make(chan Difference)
|
||||
sChan := make(chan struct{})
|
||||
go func() {
|
||||
Diff(g1, g2, dChan, sChan, true)
|
||||
close(dChan)
|
||||
}()
|
||||
|
||||
patch := Patch{}
|
||||
for dif := range dChan {
|
||||
patch = append(patch, dif)
|
||||
}
|
||||
return patch
|
||||
}
|
||||
|
||||
func checkApplyPatch(assert *assert.Assertions, g1, expectedG2 types.Value, k1, k2 string) {
|
||||
patch := getPatch(g1, expectedG2)
|
||||
g2 := Apply(g1, patch)
|
||||
assert.True(expectedG2.Equals(g2), "failed to apply diffs for k1: %s and k2: %s", k1, k2)
|
||||
}
|
||||
|
||||
func TestPatches(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
cnt := 0
|
||||
for k1, g1 := range testValues() {
|
||||
for k2, expectedG2 := range testValues() {
|
||||
if k1 != k2 {
|
||||
cnt++
|
||||
checkApplyPatch(assert, g1, expectedG2, k1, k2)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNestedLists(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
ol1 := mustMarshal([]string{"one", "two", "three", "four"})
|
||||
nl1 := mustMarshal([]string{"two", "three"})
|
||||
ol2 := mustMarshal([]int{2, 3})
|
||||
nl2 := mustMarshal([]int{1, 2, 3, 4})
|
||||
nl3 := mustMarshal([]bool{true, false, true})
|
||||
g1 := types.NewList(ol1, ol2)
|
||||
g2 := types.NewList(nl1, nl2, nl3)
|
||||
checkApplyPatch(assert, g1, g2, "g1", "g2")
|
||||
}
|
||||
|
||||
func TestUpdateNode(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
vs := types.NewTestValueStore()
|
||||
defer vs.Close()
|
||||
|
||||
doTest := func(pp types.PathPart, parent, ov, nv, exp types.Value, f testFunc) {
|
||||
stack := &patchStack{}
|
||||
se := &stackElem{path: []types.PathPart{pp}, pathPart: pp, changeType: types.DiffChangeModified, oldValue: ov, newValue: nv}
|
||||
updated := stack.updateNode(se, parent)
|
||||
testVal := f(updated)
|
||||
assert.True(exp.Equals(testVal), "%s != %s", nv, testVal)
|
||||
}
|
||||
|
||||
var pp types.PathPart
|
||||
oldVal := types.String("Yo")
|
||||
newVal := types.String("YooHoo")
|
||||
|
||||
s1 := types.NewStruct("TestStruct", types.StructData{"f1": types.Number(1), "f2": oldVal})
|
||||
pp = types.FieldPath{Name: "f2"}
|
||||
doTest(pp, s1, oldVal, newVal, newVal, func(parent types.Value) types.Value {
|
||||
return parent.(types.Struct).Get("f2")
|
||||
})
|
||||
|
||||
l1 := types.NewList(types.String("one"), oldVal, types.String("three"))
|
||||
pp = types.IndexPath{Index: types.Number(1)}
|
||||
doTest(pp, l1, oldVal, newVal, newVal, func(parent types.Value) types.Value {
|
||||
return parent.(types.List).Get(1)
|
||||
})
|
||||
|
||||
m1 := types.NewMap(types.String("k1"), types.Number(1), types.String("k2"), oldVal)
|
||||
pp = types.IndexPath{Index: types.String("k2")}
|
||||
doTest(pp, m1, oldVal, newVal, newVal, func(parent types.Value) types.Value {
|
||||
return parent.(types.Map).Get(types.String("k2"))
|
||||
})
|
||||
|
||||
k1 := types.NewStruct("Sizes", types.StructData{"height": types.Number(200), "width": types.Number(300)})
|
||||
vs.WriteValue(k1)
|
||||
m1 = types.NewMap(k1, oldVal)
|
||||
pp = types.HashIndexPath{Hash: k1.Hash()}
|
||||
doTest(pp, m1, oldVal, newVal, newVal, func(parent types.Value) types.Value {
|
||||
return parent.(types.Map).Get(k1)
|
||||
})
|
||||
|
||||
set1 := types.NewSet(oldVal, k1)
|
||||
pp = types.IndexPath{Index: oldVal}
|
||||
exp := types.NewSet(newVal, k1)
|
||||
doTest(pp, set1, oldVal, newVal, exp, func(parent types.Value) types.Value {
|
||||
return parent
|
||||
})
|
||||
|
||||
k2 := types.NewStruct("Sizes", types.StructData{"height": types.Number(300), "width": types.Number(500)})
|
||||
set1 = types.NewSet(oldVal, k1)
|
||||
pp = types.HashIndexPath{Hash: k1.Hash()}
|
||||
exp = types.NewSet(oldVal, k2)
|
||||
doTest(pp, set1, k1, k2, exp, func(parent types.Value) types.Value {
|
||||
return parent
|
||||
})
|
||||
}
|
||||
|
||||
func checkApplyDiffs(a *assert.Assertions, n1, n2 types.Value, leftRight bool) {
|
||||
dChan := make(chan Difference)
|
||||
sChan := make(chan struct{})
|
||||
go func() {
|
||||
Diff(n1, n2, dChan, sChan, leftRight)
|
||||
close(dChan)
|
||||
}()
|
||||
|
||||
difs := Patch{}
|
||||
for dif := range dChan {
|
||||
difs = append(difs, dif)
|
||||
}
|
||||
|
||||
res := Apply(n1, difs)
|
||||
a.True(n2.Equals(res))
|
||||
}
|
||||
|
||||
func tryApplyDiff(a *assert.Assertions, a1, a2 interface{}) {
|
||||
n1 := mustMarshal(a1)
|
||||
n2 := mustMarshal(a2)
|
||||
|
||||
checkApplyDiffs(a, n1, n2, true)
|
||||
checkApplyDiffs(a, n1, n2, false)
|
||||
checkApplyDiffs(a, n2, n1, true)
|
||||
checkApplyDiffs(a, n2, n1, false)
|
||||
}
|
||||
|
||||
func TestUpdateList(t *testing.T) {
|
||||
a := assert.New(t)
|
||||
|
||||
// insert at beginning
|
||||
a1 := []interface{}{"five", "ten", "fifteen"}
|
||||
a2 := []interface{}{"one", "two", "three", "five", "ten", "fifteen"}
|
||||
tryApplyDiff(a, a1, a2)
|
||||
|
||||
// append at end
|
||||
a1 = []interface{}{"five", "ten", "fifteen"}
|
||||
a2 = []interface{}{"five", "ten", "fifteen", "twenty", "twenty-five"}
|
||||
tryApplyDiff(a, a1, a2)
|
||||
|
||||
// insert interleaved
|
||||
a1 = []interface{}{"one", "three", "five", "seven"}
|
||||
a2 = []interface{}{"one", "two", "three", "four", "five", "six", "seven"}
|
||||
tryApplyDiff(a, a1, a2)
|
||||
|
||||
// delete from beginning and append to end
|
||||
a1 = []interface{}{"one", "two", "three", "four", "five"}
|
||||
a2 = []interface{}{"four", "five", "six", "seven"}
|
||||
tryApplyDiff(a, a1, a2)
|
||||
|
||||
// replace entries at beginning
|
||||
a1 = []interface{}{"one", "two", "three", "four", "five"}
|
||||
a2 = []interface{}{"3.5", "four", "five"}
|
||||
tryApplyDiff(a, a1, a2)
|
||||
|
||||
// replace entries at end
|
||||
a1 = []interface{}{"one", "two", "three"}
|
||||
a2 = []interface{}{"one", "four"}
|
||||
tryApplyDiff(a, a1, a2)
|
||||
|
||||
// insert at beginning, replace at end
|
||||
a1 = []interface{}{"five", "ten", "fifteen"}
|
||||
a2 = []interface{}{"one", "two", "five", "eight", "eleven", "sixteen", "twenty"}
|
||||
tryApplyDiff(a, a1, a2)
|
||||
|
||||
// remove everything
|
||||
a1 = []interface{}{"five", "ten", "fifteen"}
|
||||
a2 = []interface{}{}
|
||||
tryApplyDiff(a, a1, a2)
|
||||
}
|
||||
|
||||
func TestUpdateMap(t *testing.T) {
|
||||
a := assert.New(t)
|
||||
|
||||
// insertions, deletions, and replacements
|
||||
a1 := map[string]int{"five": 5, "ten": 10, "fifteen": 15, "twenty": 20}
|
||||
a2 := map[string]int{"one": 1, "two": 2, "three": 3, "five": 5, "ten": 10, "fifteen": 15, "twenty": 2020}
|
||||
tryApplyDiff(a, a1, a2)
|
||||
|
||||
// delete everything
|
||||
a1 = map[string]int{"five": 5, "ten": 10, "fifteen": 15, "twenty": 20}
|
||||
a2 = map[string]int{}
|
||||
tryApplyDiff(a, a1, a2)
|
||||
}
|
||||
|
||||
func TestUpdateStruct(t *testing.T) {
|
||||
a := assert.New(t)
|
||||
|
||||
a1 := types.NewStruct("tStruct", types.StructData{
|
||||
"f1": types.Number(1),
|
||||
"f2": types.String("two"),
|
||||
"f3": mustMarshal([]string{"one", "two", "three"}),
|
||||
})
|
||||
a2 := types.NewStruct("tStruct", types.StructData{
|
||||
"f1": types.Number(2),
|
||||
"f2": types.String("twotwo"),
|
||||
"f3": mustMarshal([]interface{}{0, "one", 1, "two", 2, "three", 3}),
|
||||
})
|
||||
checkApplyDiffs(a, a1, a2, true)
|
||||
checkApplyDiffs(a, a1, a2, false)
|
||||
|
||||
a2 = types.NewStruct("tStruct", types.StructData{
|
||||
"f1": types.Number(2),
|
||||
"f2": types.String("two"),
|
||||
"f3": mustMarshal([]interface{}{0, "one", 1, "two", 2, "three", 3}),
|
||||
"f4": types.Bool(true),
|
||||
})
|
||||
checkApplyDiffs(a, a1, a2, true)
|
||||
checkApplyDiffs(a, a1, a2, false)
|
||||
}
|
||||
|
||||
func TestUpdateSet(t *testing.T) {
|
||||
a := assert.New(t)
|
||||
|
||||
a1 := types.NewSet(types.Number(1), types.String("two"), mustMarshal([]string{"one", "two", "three"}))
|
||||
a2 := types.NewSet(types.Number(3), types.String("three"), mustMarshal([]string{"one", "two", "three", "four"}))
|
||||
|
||||
checkApplyDiffs(a, a1, a2, true)
|
||||
checkApplyDiffs(a, a1, a2, false)
|
||||
checkApplyDiffs(a, a2, a1, true)
|
||||
checkApplyDiffs(a, a2, a1, false)
|
||||
}
|
||||
|
||||
func mustMarshal(v interface{}) types.Value {
|
||||
v1, err := marshal.Marshal(v)
|
||||
d.Chk.NoError(err)
|
||||
return v1
|
||||
}
|
||||
+16
-3
@@ -24,6 +24,13 @@ type Difference struct {
|
||||
OldValue types.Value
|
||||
// NewValue is Value after the change, can be nil if Value was removed
|
||||
NewValue types.Value
|
||||
// NewKeyValue is used for when elements are added to diffs with a
|
||||
// non-primitive key. The new key must available when the map gets updated.
|
||||
NewKeyValue types.Value
|
||||
}
|
||||
|
||||
func (dif Difference) IsEmpty() bool {
|
||||
return dif.Path == nil && dif.OldValue == nil && dif.NewValue == nil
|
||||
}
|
||||
|
||||
// differ is used internally to hold information necessary for diffing two graphs.
|
||||
@@ -116,7 +123,7 @@ func (d differ) diffLists(p types.Path, v1, v2 types.List) (stop bool) {
|
||||
stop = d.diff(append(p, types.NewIndexPath(idx)), lastEl, newEl)
|
||||
} else {
|
||||
p1 := p.Append(types.NewIndexPath(types.Number(splice.SpAt + i)))
|
||||
dif := Difference{p1, types.DiffChangeModified, v1.Get(splice.SpAt + i), v2.Get(splice.SpFrom + i)}
|
||||
dif := Difference{p1, types.DiffChangeModified, v1.Get(splice.SpAt + i), v2.Get(splice.SpFrom + i), nil}
|
||||
stop = !d.sendDiff(dif)
|
||||
}
|
||||
}
|
||||
@@ -147,7 +154,13 @@ func (d differ) diffLists(p types.Path, v1, v2 types.List) (stop bool) {
|
||||
|
||||
func (d differ) diffMaps(p types.Path, v1, v2 types.Map) bool {
|
||||
return d.diffOrdered(p,
|
||||
func(v types.Value) types.PathPart { return types.NewIndexPath(v) },
|
||||
func(v types.Value) types.PathPart {
|
||||
if types.ValueCanBePathIndex(v) {
|
||||
return types.NewIndexPath(v)
|
||||
} else {
|
||||
return types.NewHashIndexPath(v.Hash())
|
||||
}
|
||||
},
|
||||
func(cc chan<- types.ValueChanged, sc <-chan struct{}) {
|
||||
if d.leftRight {
|
||||
v2.DiffLeftRight(v1, cc, sc)
|
||||
@@ -216,7 +229,7 @@ func (d differ) diffOrdered(p types.Path, ppf pathPartFunc, df diffFunc, kf, v1,
|
||||
|
||||
switch change.ChangeType {
|
||||
case types.DiffChangeAdded:
|
||||
dif := Difference{Path: p1, ChangeType: types.DiffChangeAdded, OldValue: nil, NewValue: v2(change.V)}
|
||||
dif := Difference{Path: p1, ChangeType: types.DiffChangeAdded, OldValue: nil, NewValue: v2(change.V), NewKeyValue: k}
|
||||
stop = !d.sendDiff(dif)
|
||||
case types.DiffChangeRemoved:
|
||||
dif := Difference{Path: p1, ChangeType: types.DiffChangeRemoved, OldValue: v1(change.V), NewValue: nil}
|
||||
|
||||
@@ -0,0 +1,129 @@
|
||||
// Copyright 2016 Attic Labs, Inc. All rights reserved.
|
||||
// Licensed under the Apache License, version 2.0:
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package diff
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/attic-labs/noms/go/types"
|
||||
)
|
||||
|
||||
// Patch is a list of difference objects that can be applied to a graph
|
||||
// using ApplyPatch(). Patch implements a sort order that is useful for
|
||||
// applying the patch in an efficient way.
|
||||
type Patch []Difference
|
||||
|
||||
func (r Patch) Swap(i, j int) {
|
||||
r[i], r[j] = r[j], r[i]
|
||||
}
|
||||
|
||||
func (r Patch) Len() int {
|
||||
return len(r)
|
||||
}
|
||||
|
||||
var vals = map[types.DiffChangeType]int{types.DiffChangeRemoved: 0, types.DiffChangeModified: 1, types.DiffChangeAdded: 2}
|
||||
func (r Patch) Less(i, j int) bool {
|
||||
if r[i].Path.Equals(r[j].Path) {
|
||||
return vals[r[i].ChangeType] < vals[r[j].ChangeType]
|
||||
}
|
||||
return pathIsLess(r[i].Path, r[j].Path)
|
||||
}
|
||||
|
||||
// Utility methods on path
|
||||
// TODO: Should these be on types.Path & types.PathPart?
|
||||
func pathIsLess(p1, p2 types.Path) bool {
|
||||
for i, pp1 := range p1 {
|
||||
if len(p2) == i {
|
||||
return false // p1 > p2
|
||||
}
|
||||
switch pathPartCompare(pp1, p2[i]) {
|
||||
case -1:
|
||||
return true // p1 < p2
|
||||
case 1:
|
||||
return false // p1 > p2
|
||||
}
|
||||
}
|
||||
|
||||
return len(p2) > len(p1) // if true p1 < p2, else p1 == p2
|
||||
}
|
||||
|
||||
func fieldPathCompare(pp types.FieldPath, o types.PathPart) int {
|
||||
switch opp := o.(type) {
|
||||
case types.FieldPath:
|
||||
if pp.Name == opp.Name {
|
||||
return 0
|
||||
}
|
||||
if pp.Name < opp.Name {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
case types.IndexPath:
|
||||
return -1
|
||||
case types.HashIndexPath:
|
||||
return -1
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func indexPathCompare(pp types.IndexPath, o types.PathPart) int {
|
||||
switch opp := o.(type) {
|
||||
case types.FieldPath:
|
||||
return 1
|
||||
case types.IndexPath:
|
||||
if pp.Index.Equals(opp.Index) {
|
||||
if pp.IntoKey == opp.IntoKey {
|
||||
return 0
|
||||
}
|
||||
if pp.IntoKey {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
if pp.Index.Less(opp.Index) {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
case types.HashIndexPath:
|
||||
return -1
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func hashIndexPathCompare(pp types.HashIndexPath, o types.PathPart) int {
|
||||
switch opp := o.(type) {
|
||||
case types.FieldPath:
|
||||
return 1
|
||||
case types.IndexPath:
|
||||
return 1
|
||||
case types.HashIndexPath:
|
||||
switch bytes.Compare(pp.Hash.DigestSlice(), opp.Hash.DigestSlice()) {
|
||||
case -1:
|
||||
return -1
|
||||
case 0:
|
||||
if pp.IntoKey == opp.IntoKey {
|
||||
return 0
|
||||
}
|
||||
if pp.IntoKey {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
case 1:
|
||||
return 1
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func pathPartCompare(pp, pp2 types.PathPart) int {
|
||||
switch pp1 := pp.(type) {
|
||||
case types.FieldPath:
|
||||
return fieldPathCompare(pp1, pp2)
|
||||
case types.IndexPath:
|
||||
return indexPathCompare(pp1, pp2)
|
||||
case types.HashIndexPath:
|
||||
return hashIndexPathCompare(pp1, pp2)
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
@@ -0,0 +1,95 @@
|
||||
// Copyright 2016 Attic Labs, Inc. All rights reserved.
|
||||
// Licensed under the Apache License, version 2.0:
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package diff
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/attic-labs/noms/go/types"
|
||||
"github.com/attic-labs/testify/assert"
|
||||
)
|
||||
|
||||
func TestPatchPathPartCompare(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
fieldPath1 := mustParsePath(assert, `.field1`)[0]
|
||||
fieldPath2 := mustParsePath(assert, `.field2`)[0]
|
||||
indexPath1 := mustParsePath(assert, `["field1"]`)[0]
|
||||
indexPath2 := mustParsePath(assert, `["field2"]`)[0]
|
||||
indexPathKey1 := mustParsePath(assert, `["field1"]@key`)[0]
|
||||
indexPathKey2 := mustParsePath(assert, `["field2"]@key`)[0]
|
||||
hashIndexPath1 := mustParsePath(assert, `[#01234567890123456789012345678901]`)[0]
|
||||
hashIndexPath2 := mustParsePath(assert, `[#0123456789abcdef0123456789abcdef]`)[0]
|
||||
hashIndexPathKey1 := mustParsePath(assert, `[#01234567890123456789012345678901]`)[0]
|
||||
hashIndexPathKey2 := mustParsePath(assert, `[#0123456789abcdef0123456789abcdef]`)[0]
|
||||
|
||||
testCases := [][]types.PathPart{
|
||||
{fieldPath1, fieldPath2},
|
||||
{indexPath1, indexPath2},
|
||||
{indexPathKey1, indexPathKey2},
|
||||
{hashIndexPath1, hashIndexPath2},
|
||||
{hashIndexPathKey1, hashIndexPathKey2},
|
||||
{fieldPath2, indexPath1},
|
||||
{fieldPath2, indexPathKey1},
|
||||
{fieldPath2, hashIndexPath1},
|
||||
{fieldPath2, hashIndexPathKey1},
|
||||
{indexPath2, hashIndexPath1},
|
||||
{indexPath2, hashIndexPathKey1},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
assert.Equal(-1, pathPartCompare(tc[0], tc[1]), "test case %d failed, pp0: %s, pp1: %s", i, tc[0], tc[1])
|
||||
assert.Equal(0, pathPartCompare(tc[0], tc[0]), "test case %d failed, pp0: %s, pp1: %s", i, tc[0], tc[1])
|
||||
assert.Equal(1, pathPartCompare(tc[1], tc[0]), "test case %d failed, pp0: %s, pp1: %s", i, tc[0], tc[1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatchPathIsLess(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
testCases := [][]string{
|
||||
{``, `["field1"]`},
|
||||
{`["field1"]`, `["field1"].f1`},
|
||||
{`["field1"].f1`, `["field1"]["f1"]`},
|
||||
{`["field1"]["f1"]@key`, `["field1"]["f1"]`},
|
||||
{`["field1"]["f1"]`, `["field1"][#01234567890123456789012345678901]`},
|
||||
{`["field1"][#01234567890123456789012345678901]`, `["field1"][#0123456789abcdef0123456789abcdef]`},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
p0 := mustParsePath(assert, tc[0])
|
||||
p1 := mustParsePath(assert, tc[1])
|
||||
assert.True(pathIsLess(p0, p1), "test case %d failed", i)
|
||||
assert.False(pathIsLess(p0, p0), "test case %d failed", i)
|
||||
assert.False(pathIsLess(p1, p0), "test case %d failed", i)
|
||||
}
|
||||
//p := mustParsePath(assert, `#0123456789abcdef0123456789abcdef.value`)
|
||||
//fmt.Printf("p[0]: %s, type: %T\n", p[0], p[0])
|
||||
}
|
||||
|
||||
func TestPatchSort(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
sortedPaths := Patch{
|
||||
{Path: mustParsePath(assert, `["field1"]`)},
|
||||
{Path: mustParsePath(assert, `["field1"].f1`)},
|
||||
{Path: mustParsePath(assert, `["field1"]["f1"]`), ChangeType: types.DiffChangeRemoved},
|
||||
{Path: mustParsePath(assert, `["field1"]["f1"]`), ChangeType: types.DiffChangeModified},
|
||||
{Path: mustParsePath(assert, `["field1"]["f1"]`), ChangeType: types.DiffChangeAdded},
|
||||
{Path: mustParsePath(assert, `["field1"][#01234567890123456789012345678901]`)},
|
||||
{Path: mustParsePath(assert, `["field1"][#0123456789abcdef0123456789abcdef]`)},
|
||||
}
|
||||
|
||||
rand.Perm(len(sortedPaths))
|
||||
shuffledPaths := Patch{}
|
||||
for _, idx := range rand.Perm(len(sortedPaths)) {
|
||||
shuffledPaths = append(shuffledPaths, sortedPaths[idx])
|
||||
}
|
||||
|
||||
sort.Sort(shuffledPaths)
|
||||
assert.Equal(sortedPaths, shuffledPaths)
|
||||
}
|
||||
@@ -0,0 +1,325 @@
|
||||
// Copyright 2016 Attic Labs, Inc. All rights reserved.
|
||||
// Licensed under the Apache License, version 2.0:
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/attic-labs/noms/go/config"
|
||||
"github.com/attic-labs/noms/go/d"
|
||||
"github.com/attic-labs/noms/go/datas"
|
||||
"github.com/attic-labs/noms/go/diff"
|
||||
"github.com/attic-labs/noms/go/marshal"
|
||||
"github.com/attic-labs/noms/go/spec"
|
||||
"github.com/attic-labs/noms/go/types"
|
||||
"github.com/attic-labs/noms/go/util/exit"
|
||||
"github.com/attic-labs/noms/go/util/status"
|
||||
"github.com/attic-labs/noms/go/util/verbose"
|
||||
flag "github.com/juju/gnuflag"
|
||||
)
|
||||
|
||||
var (
|
||||
lastSourcePathFieldName = "sourcePath"
|
||||
foundInCacheCnt = &counter{}
|
||||
)
|
||||
|
||||
func main() {
|
||||
if !download() {
|
||||
exit.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Println("usage: downloader [--cache-ds <dsname>] [--concurrency <int>] <in-path> <outdsname>")
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
type RemoteResource struct {
|
||||
Url string
|
||||
}
|
||||
|
||||
type LocalResource struct {
|
||||
Url string
|
||||
Downloaded bool
|
||||
Err string `noms:",omitempty"`
|
||||
BlobRef types.Ref `noms:",omitempty"`
|
||||
}
|
||||
|
||||
func download() (win bool) {
|
||||
var cacheDsArg = flag.String("cache-ds", "", "name of photo-cache dataset")
|
||||
var concurrencyArg = flag.Uint("concurrency", 4, "number of concurrent HTTP calls to retrieve remote resources")
|
||||
verbose.RegisterVerboseFlags(flag.CommandLine)
|
||||
flag.Usage = usage
|
||||
flag.Parse(false)
|
||||
|
||||
if flag.NArg() != 2 {
|
||||
fmt.Fprintln(os.Stderr, "error: missing required argument")
|
||||
flag.Usage()
|
||||
return
|
||||
}
|
||||
inPath := flag.Arg(0)
|
||||
outDsName := flag.Arg(1)
|
||||
|
||||
if *concurrencyArg < 1 {
|
||||
fmt.Fprintln(os.Stderr, "error, concurrency cannot be less than 1")
|
||||
flag.Usage()
|
||||
return
|
||||
}
|
||||
|
||||
// Resolve the in-path arg and get the inRoot
|
||||
cfg := config.NewResolver()
|
||||
db, inRoot, err := cfg.GetPath(inPath)
|
||||
if err != nil || inRoot == nil {
|
||||
if err == nil {
|
||||
err = errors.New("Could not find referenced value.")
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "Invalid input path '%s': %s\n", inPath, err)
|
||||
return
|
||||
}
|
||||
|
||||
if datas.IsCommitType(inRoot.Type()) {
|
||||
fmt.Fprintln(os.Stderr, "Input cannot be a commit. Consider appending '.value' to your in-path argument")
|
||||
return
|
||||
}
|
||||
|
||||
// In order to pin the path, we need to get the path after running it through
|
||||
// the config file processing.
|
||||
resolvedPath := cfg.ResolvePathSpec(inPath)
|
||||
inSpec, err := spec.ParsePathSpec(resolvedPath)
|
||||
d.PanicIfError(err)
|
||||
pinnedPath := pinPath(db, inSpec.Path)
|
||||
fmt.Println("Resolved in-path:", resolvedPath, "\nPinned path:", pinnedPath)
|
||||
|
||||
// Get the current head of out-ds. If there is one, assume it was created
|
||||
// by an earlier run of this program.
|
||||
var lastOutCommit types.Value
|
||||
if loc, ok := db.GetDataset(outDsName).MaybeHead(); ok {
|
||||
lastOutCommit = loc
|
||||
if lastOutCommit != nil {
|
||||
fmt.Println("Last out commit:", lastOutCommit.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
// If there was an earlier version of out-ds, then look in the meta info
|
||||
// on the commit to see if there is a record of the inRoot used by the
|
||||
// previous run. If so, we can do an incremental sync.
|
||||
var lastInRoot types.Value
|
||||
if lastOutCommit != nil {
|
||||
lastInRoot = getLastInRoot(db, lastOutCommit.(types.Struct))
|
||||
if lastInRoot != nil {
|
||||
fmt.Println("lastInRoot:", lastInRoot.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
// Get a resourceCache specified by the cache-ds arg
|
||||
var cache *resourceCache
|
||||
if *cacheDsArg != "" {
|
||||
cache, err = getResourceCache(db, *cacheDsArg)
|
||||
if err != nil {
|
||||
fmt.Println("error: ", err)
|
||||
flag.Usage()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
newRoot := downloadPhotos(db, inRoot, lastInRoot, lastOutCommit, cache, *cacheDsArg, *concurrencyArg)
|
||||
|
||||
// Commit latest value for resourceCache
|
||||
if cache != nil {
|
||||
d.PanicIfError(cache.commit(db, *cacheDsArg))
|
||||
}
|
||||
|
||||
// Commit new root
|
||||
meta := newMeta(db, pinnedPath.String())
|
||||
outDs := db.GetDataset(outDsName)
|
||||
if _, err = db.Commit(outDs, newRoot, datas.CommitOptions{Meta: meta}); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Could not commit: %s\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
win = true
|
||||
return
|
||||
}
|
||||
|
||||
func downloadPhotos(db datas.Database, inRoot, lastInRoot, lastOutCommit types.Value, cache *resourceCache, cacheDsName string, concurrency uint) (newRoot types.Value) {
|
||||
// return true whenever we find a RemoteResource
|
||||
shouldCnt := &counter{}
|
||||
shouldUpdateCb := func(p types.Path, root, parent, v types.Value) (res bool) {
|
||||
shouldCnt.Increment()
|
||||
return v != nil && v.Type().Kind() == types.StructKind && v.Type().Desc.(types.StructDesc).Name == "RemoteResource"
|
||||
}
|
||||
|
||||
updateCnt := &counter{}
|
||||
failedCnt := &counter{}
|
||||
rwMutex := sync.RWMutex{}
|
||||
|
||||
// Use info from dif to create a new RemoteResource and return it.
|
||||
// Also keeps a counter of how many times this has been called and commits
|
||||
// the current state of the cache.
|
||||
// Todo: remove rwMutex when issue #2792 is resolved
|
||||
updateCb := func(dif diff.Difference) diff.Difference {
|
||||
doDownload := func(db datas.Database, url string, cache *resourceCache) LocalResource {
|
||||
rwMutex.RLock()
|
||||
defer rwMutex.RUnlock()
|
||||
return downloadRemoteResource(db, url, cache)
|
||||
}
|
||||
|
||||
var remote RemoteResource
|
||||
err := marshal.Unmarshal(dif.OldValue, &remote)
|
||||
d.PanicIfError(err)
|
||||
|
||||
localResource := doDownload(db, remote.Url, cache)
|
||||
if !localResource.Downloaded {
|
||||
failedCnt.Increment()
|
||||
}
|
||||
newValue, err := marshal.Marshal(localResource)
|
||||
d.PanicIfError(err)
|
||||
dif.NewValue = newValue
|
||||
|
||||
updateCnt.Increment()
|
||||
if cache != nil && updateCnt.Cnt()%1000 == 0 {
|
||||
rwMutex.Lock()
|
||||
defer rwMutex.Unlock()
|
||||
err := cache.commit(db, cacheDsName)
|
||||
d.PanicIfError(err)
|
||||
}
|
||||
|
||||
status.Printf("walked: %d, updated %d, found in cache: %d, errors retrieving: %d", shouldCnt.Cnt(), updateCnt.Cnt(), foundInCacheCnt.Cnt(), failedCnt.Cnt())
|
||||
return dif
|
||||
}
|
||||
|
||||
if lastInRoot != nil && lastInRoot.Equals(inRoot) {
|
||||
// The current inRoot is the same as the last one we worked on, so there
|
||||
// is nothing to do. Just return the inRoot so a new commit can be added
|
||||
// latest meta data
|
||||
fmt.Println("No change since last run, doing nothing")
|
||||
return inRoot
|
||||
}
|
||||
|
||||
var lastOutRoot types.Value
|
||||
if lastOutCommit != nil {
|
||||
lastOutRoot = lastOutCommit.(types.Struct).Get("value")
|
||||
}
|
||||
|
||||
newRoot = IncrementalUpdate(db, inRoot, lastInRoot, lastOutRoot, shouldUpdateCb, updateCb, concurrency)
|
||||
status.Done()
|
||||
return
|
||||
}
|
||||
|
||||
// downloadRemoteResource takes a url and creates a LocalResource by making an
|
||||
// HTTP call to get the resource and storing it locally.
|
||||
func downloadRemoteResource(db datas.Database, url string, cache *resourceCache) LocalResource {
|
||||
errorstring := ""
|
||||
downloaded := true
|
||||
blobRef, err := downloadAndCacheBlob(db, url, cache)
|
||||
if err != nil {
|
||||
errorstring = err.Error()
|
||||
downloaded = false
|
||||
}
|
||||
return LocalResource{Url: url, Downloaded: downloaded, Err: errorstring, BlobRef: blobRef}
|
||||
}
|
||||
|
||||
// downloadAndCacheBlob wraps downloadBlob in a wrapper that first checks the
|
||||
// cache to see if the blob has already been stored and then adding the blob to
|
||||
// a persistent cache once it has been retrieved.
|
||||
func downloadAndCacheBlob(db datas.Database, url string, cache *resourceCache) (types.Ref, error) {
|
||||
if cache == nil {
|
||||
return downloadBlob(db, url)
|
||||
}
|
||||
|
||||
nurl := types.String(url)
|
||||
hs := types.String(nurl.Hash().String())
|
||||
if blobRef, ok := cache.get(hs); ok {
|
||||
foundInCacheCnt.Increment()
|
||||
return blobRef, nil
|
||||
}
|
||||
blobRef, err := downloadBlob(db, url)
|
||||
if err != nil {
|
||||
return types.Ref{}, err
|
||||
}
|
||||
cache.set(hs, blobRef)
|
||||
return blobRef, nil
|
||||
}
|
||||
|
||||
// downloadBlob makes the http call to get the resource and store it in a blob
|
||||
func downloadBlob(db datas.Database, url string) (types.Ref, error) {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return types.Ref{}, err
|
||||
}
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
err = fmt.Errorf("bad status from http download request, status code: %d, status: %s", resp.StatusCode, resp.Status)
|
||||
return types.Ref{}, err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
blob := types.NewStreamingBlob(db, resp.Body)
|
||||
return types.NewRef(blob), nil
|
||||
}
|
||||
|
||||
// getLastInRoot checks the lastOutPhotos struct to see if it contains a "meta"
|
||||
// attribute struct that has an InPath field. If so, and if it's able to resolve
|
||||
// the path, it's returned to be used for incremental update.
|
||||
func getLastInRoot(db datas.Database, lastOutPhotos types.Struct) (res types.Value) {
|
||||
var metaV types.Value
|
||||
var ok bool
|
||||
if metaV, ok = lastOutPhotos.MaybeGet("meta"); !ok {
|
||||
return
|
||||
}
|
||||
|
||||
meta := metaV.(types.Struct)
|
||||
var lastInRootSpecV types.Value
|
||||
if lastInRootSpecV, ok = meta.MaybeGet(lastSourcePathFieldName); !ok {
|
||||
return
|
||||
}
|
||||
|
||||
lastInRootSpec := string(lastInRootSpecV.(types.String))
|
||||
absPath, err := spec.NewAbsolutePath(lastInRootSpec)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
res = absPath.Resolve(db)
|
||||
return
|
||||
}
|
||||
|
||||
func newMeta(db datas.Database, source string) types.Struct {
|
||||
meta, err := spec.CreateCommitMetaStruct(db, "", "", map[string]string{lastSourcePathFieldName: source}, nil)
|
||||
d.PanicIfError(err)
|
||||
return meta
|
||||
}
|
||||
|
||||
// pinPath takes an absolute path. If it begins with a dataset, it changes it
|
||||
// to begin with a hash of the current dataset head.
|
||||
func pinPath(db datas.Database, absPath spec.AbsolutePath) spec.AbsolutePath {
|
||||
h := absPath.Hash
|
||||
if h.IsEmpty() {
|
||||
r, ok := db.GetDataset(absPath.Dataset).MaybeHeadRef()
|
||||
d.PanicIfFalse(ok)
|
||||
h = r.TargetHash()
|
||||
}
|
||||
return spec.AbsolutePath{Hash: h, Path: absPath.Path}
|
||||
}
|
||||
|
||||
type counter struct {
|
||||
cnt uint32
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
func (uc *counter) Cnt() uint32 {
|
||||
uc.mutex.Lock()
|
||||
defer uc.mutex.Unlock()
|
||||
return uc.cnt
|
||||
}
|
||||
func (uc *counter) Increment() {
|
||||
uc.mutex.Lock()
|
||||
defer uc.mutex.Unlock()
|
||||
uc.cnt += 1
|
||||
}
|
||||
@@ -0,0 +1,215 @@
|
||||
// Copyright 2016 Attic Labs, Inc. All rights reserved.
|
||||
// Licensed under the Apache License, version 2.0:
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/attic-labs/noms/go/datas"
|
||||
"github.com/attic-labs/noms/go/spec"
|
||||
"github.com/attic-labs/noms/go/types"
|
||||
"github.com/attic-labs/noms/go/util/clienttest"
|
||||
"github.com/attic-labs/testify/assert"
|
||||
"github.com/attic-labs/testify/suite"
|
||||
)
|
||||
|
||||
func TestDownloader(t *testing.T) {
|
||||
suite.Run(t, &testSuite{})
|
||||
}
|
||||
|
||||
type testSuite struct {
|
||||
clienttest.ClientTestSuite
|
||||
}
|
||||
|
||||
func (s testSuite) TestMain() {
|
||||
commitToDb := func(v types.Value, dsName string, dbSpec string) {
|
||||
db, err := spec.GetDatabase(dbSpec)
|
||||
s.NoError(err)
|
||||
defer db.Close()
|
||||
|
||||
ds := db.GetDataset(dsName)
|
||||
db.Commit(ds, v, datas.CommitOptions{})
|
||||
}
|
||||
|
||||
testBlobValue := func(db datas.Database, m types.Map, key, expected string) {
|
||||
k := types.String(key)
|
||||
localResource1 := m.Get(k).(types.Struct)
|
||||
blob1 := localResource1.Get("blobRef").(types.Ref).TargetValue(db).(types.Blob)
|
||||
s.Equal("BlobText, url: "+expected, stringFromBlob(blob1))
|
||||
}
|
||||
|
||||
mustRunTest := func(args []string, expected string) {
|
||||
stdout, _ := s.MustRun(main, args)
|
||||
s.Contains(lastLine(stdout), expected)
|
||||
}
|
||||
|
||||
errorTest := func(args []string, expected string) {
|
||||
_, stderr, recoveredErr := s.Run(main, args)
|
||||
exitError, ok := recoveredErr.(clienttest.ExitError)
|
||||
if s.True(ok) {
|
||||
s.Equal(exitError.Code, 1)
|
||||
}
|
||||
s.Contains(stderr, expected)
|
||||
}
|
||||
|
||||
dbSpecString := spec.CreateDatabaseSpecString("ldb", s.LdbDir)
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, "BlobText, url: "+r.URL.Path)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
m := map[string]RemoteResource{
|
||||
"k1": RemoteResource{ts.URL + "/one"},
|
||||
}
|
||||
commitToDb(mustMarshal(m), "in-ds", dbSpecString)
|
||||
mustRunTest(
|
||||
[]string{"--cache-ds", "cache", dbSpecString + "::in-ds.value", "out-ds"},
|
||||
"walked: 3, updated 1, found in cache: 0, errors retrieving: 0",
|
||||
)
|
||||
|
||||
m["k2"] = RemoteResource{ts.URL + "/two"}
|
||||
commitToDb(mustMarshal(m), "in-ds", dbSpecString)
|
||||
|
||||
mustRunTest(
|
||||
[]string{"--cache-ds", "cache", dbSpecString + "::in-ds.value", "out-ds"},
|
||||
"walked: 1, updated 1, found in cache: 0, errors retrieving: 0",
|
||||
)
|
||||
|
||||
db, v, err := spec.GetPath(dbSpecString + "::out-ds.value")
|
||||
s.NoError(err)
|
||||
defer db.Close()
|
||||
|
||||
testBlobValue(db, v.(types.Map), "k1", "/one")
|
||||
testBlobValue(db, v.(types.Map), "k2", "/two")
|
||||
|
||||
mustRunTest(
|
||||
[]string{"--cache-ds", "cache", dbSpecString + "::in-ds.value", "out-ds"},
|
||||
"No change since last run, doing nothing",
|
||||
)
|
||||
|
||||
errorTest(
|
||||
[]string{"--cache-ds", "cache", "--concurrency", "0", dbSpecString + "::in-ds.value", "out-ds"},
|
||||
"concurrency cannot be less than 1",
|
||||
)
|
||||
|
||||
errorTest(
|
||||
[]string{dbSpecString + "::in-ds.value"},
|
||||
"missing required argument",
|
||||
)
|
||||
|
||||
errorTest(
|
||||
[]string{"--cache-ds", "cache", dbSpecString + "::in-ds", "out-ds"},
|
||||
"Input cannot be a commit.",
|
||||
)
|
||||
|
||||
errorTest(
|
||||
[]string{"--cache-ds", "cache", dbSpecString + "::not-there.value", "out-ds"},
|
||||
"Could not find referenced value",
|
||||
)
|
||||
}
|
||||
|
||||
func TestDownloadBlob(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
db, err := spec.GetDatabase("mem")
|
||||
assert.NoError(err)
|
||||
defer db.Close()
|
||||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, "Hello, client, url: "+r.URL.String())
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
r, err := downloadBlob(db, ts.URL+"/one")
|
||||
assert.NoError(err)
|
||||
|
||||
blob := r.TargetValue(db)
|
||||
assert.NotNil(blob)
|
||||
assert.Equal("Hello, client, url: /one", stringFromBlob(blob.(types.Blob)))
|
||||
}
|
||||
|
||||
func TestPinPath(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
db, err := spec.GetDatabase("mem")
|
||||
assert.NoError(err)
|
||||
defer db.Close()
|
||||
|
||||
dsName := "testds"
|
||||
ds := db.GetDataset(dsName)
|
||||
ds, err = db.CommitValue(ds, types.NewMap(
|
||||
types.String("k1"), types.String("v1"),
|
||||
types.String("k2"), types.String("v2"),
|
||||
))
|
||||
assert.NoError(err)
|
||||
|
||||
absPath, err := spec.NewAbsolutePath(`testds.value["k1"]`)
|
||||
assert.NoError(err)
|
||||
|
||||
// call pin path on AbsolutePath containing dataset
|
||||
pinnedPath := pinPath(db, absPath)
|
||||
assert.Equal(`#e4ahkeask7na1s3okcp3rmqt89rkv928.value["k1"]`, pinnedPath.String())
|
||||
|
||||
// call pin path on AbsolutePath that is already pinned
|
||||
pinnedPath = pinPath(db, pinnedPath)
|
||||
assert.Equal(`#e4ahkeask7na1s3okcp3rmqt89rkv928.value["k1"]`, pinnedPath.String())
|
||||
|
||||
assert.True(types.String("v1").Equals(pinnedPath.Resolve(db)))
|
||||
}
|
||||
|
||||
func TestGetLastInRoot(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
db, err := spec.GetDatabase("mem")
|
||||
assert.NoError(err)
|
||||
defer db.Close()
|
||||
|
||||
k1 := types.String("k1")
|
||||
|
||||
// commit source ds with no sourcePath field
|
||||
sourceM := types.NewMap(k1, types.String("source commit 1"))
|
||||
sourceDs := db.GetDataset("test-source-ds")
|
||||
sourceDs, err = db.Commit(sourceDs, sourceM, datas.CommitOptions{})
|
||||
assert.NoError(err)
|
||||
lastInRoot := getLastInRoot(db, sourceDs.Head())
|
||||
assert.Nil(lastInRoot)
|
||||
|
||||
pinnedPath, err := spec.NewAbsolutePath("test-source-ds.value")
|
||||
assert.NoError(err)
|
||||
|
||||
// commit dest ds with valid sourcePath field on meta
|
||||
destDs := db.GetDataset("test-dest-ds")
|
||||
destM := sourceM.Set(k1, types.String("dest commit 1"))
|
||||
meta := newMeta(db, pinnedPath.String())
|
||||
destDs, err = db.Commit(destDs, destM, datas.CommitOptions{Meta: meta})
|
||||
assert.NoError(err)
|
||||
root := getLastInRoot(db, destDs.Head())
|
||||
assert.True(sourceM.Equals(root))
|
||||
|
||||
// commit with unparsable path
|
||||
destM = destM.Set(k1, types.String("dest commit 2"))
|
||||
meta = newMeta(db, "bad path")
|
||||
destDs, err = db.Commit(destDs, destM, datas.CommitOptions{Meta: meta})
|
||||
assert.NoError(err)
|
||||
root = getLastInRoot(db, destDs.Head())
|
||||
assert.Nil(lastInRoot)
|
||||
|
||||
// commit with unresolveable path
|
||||
meta = newMeta(db, "#12345678901234567890123456789012.value")
|
||||
destDs, err = db.Commit(destDs, destM, datas.CommitOptions{Meta: meta})
|
||||
assert.NoError(err)
|
||||
root = getLastInRoot(db, destDs.Head())
|
||||
}
|
||||
|
||||
func lastLine(output string) string {
|
||||
lines := strings.Split(output, "\n")
|
||||
for i := len(lines) - 1; i >= 0; i-- {
|
||||
if len(lines[i]) > 0 {
|
||||
return lines[i]
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -0,0 +1,92 @@
|
||||
// Copyright 2016 Attic Labs, Inc. All rights reserved.
|
||||
// Licensed under the Apache License, version 2.0:
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/attic-labs/noms/go/datas"
|
||||
"github.com/attic-labs/noms/go/spec"
|
||||
"github.com/attic-labs/noms/go/types"
|
||||
"github.com/attic-labs/noms/go/marshal"
|
||||
)
|
||||
|
||||
// ResourceCache is a Map<String, Ref<Blob>>
|
||||
type resourceCache struct {
|
||||
cache types.Map // gets updated when set is called
|
||||
orig types.Map // original state of the map, commit is a noop if orig==cache
|
||||
rwMutex sync.RWMutex
|
||||
}
|
||||
|
||||
func checkCacheType(c types.Value) (err error) {
|
||||
err = errors.New("resourceCache value is not Map<String, Ref<Blob>>")
|
||||
var m types.Map
|
||||
|
||||
if err1 := marshal.Unmarshal(c, &m); err1 != nil {
|
||||
return
|
||||
}
|
||||
keyType := c.Type().Desc.(types.CompoundDesc).ElemTypes[0]
|
||||
if keyType.Kind() != types.StringKind {
|
||||
return
|
||||
}
|
||||
valueType := c.Type().Desc.(types.CompoundDesc).ElemTypes[1]
|
||||
if valueType.Kind() != types.RefKind {
|
||||
return
|
||||
}
|
||||
if valueType.Desc.(types.CompoundDesc).ElemTypes[0].Kind() != types.BlobKind {
|
||||
return
|
||||
}
|
||||
|
||||
err = nil
|
||||
return
|
||||
}
|
||||
|
||||
func getResourceCache(db datas.Database, dsname string) (*resourceCache, error) {
|
||||
m, ok := db.GetDataset(dsname).MaybeHeadValue()
|
||||
if ok {
|
||||
if err := checkCacheType(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
m = types.NewMap()
|
||||
}
|
||||
return &resourceCache{cache: m.(types.Map), orig: m.(types.Map)}, nil
|
||||
}
|
||||
|
||||
func (c *resourceCache) commit(db datas.Database, dsname string) error {
|
||||
c.rwMutex.Lock()
|
||||
defer c.rwMutex.Unlock()
|
||||
if !c.cache.Equals(c.orig) {
|
||||
meta, _ := spec.CreateCommitMetaStruct(db, "", "", nil, nil)
|
||||
dset := db.GetDataset(dsname)
|
||||
commitOptions := datas.CommitOptions{Meta: meta}
|
||||
_, err := db.Commit(dset, c.cache, commitOptions)
|
||||
if err == nil {
|
||||
c.orig = c.cache
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *resourceCache) get(k types.String) (types.Ref, bool) {
|
||||
c.rwMutex.RLock()
|
||||
defer c.rwMutex.RUnlock()
|
||||
if v, ok := c.cache.MaybeGet(k); ok {
|
||||
return v.(types.Ref), true
|
||||
}
|
||||
return types.Ref{}, false
|
||||
}
|
||||
|
||||
func (c *resourceCache) set(k types.String, v types.Ref) {
|
||||
c.rwMutex.Lock()
|
||||
defer c.rwMutex.Unlock()
|
||||
c.cache = c.cache.Set(k, v)
|
||||
}
|
||||
|
||||
func (c *resourceCache) len() uint64 {
|
||||
return c.cache.Len()
|
||||
}
|
||||
@@ -0,0 +1,130 @@
|
||||
// Copyright 2016 Attic Labs, Inc. All rights reserved.
|
||||
// Licensed under the Apache License, version 2.0:
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/attic-labs/noms/go/d"
|
||||
"github.com/attic-labs/noms/go/spec"
|
||||
"github.com/attic-labs/noms/go/types"
|
||||
"github.com/attic-labs/noms/go/util/clienttest"
|
||||
"github.com/attic-labs/testify/suite"
|
||||
)
|
||||
|
||||
func randomBytes(blen int) []byte {
|
||||
key := make([]byte, blen)
|
||||
|
||||
_, err := rand.Read(key)
|
||||
d.Chk.NoError(err)
|
||||
return key
|
||||
}
|
||||
|
||||
func randomString(slen int) string {
|
||||
bs := randomBytes(slen)
|
||||
return base64.StdEncoding.EncodeToString(bs)
|
||||
}
|
||||
|
||||
func randomBlob(s *resourceCacheTestSuite, slen int) types.Blob {
|
||||
str := spec.CreateDatabaseSpecString("ldb", s.LdbDir)
|
||||
db, err := spec.GetDatabase(str)
|
||||
s.NoError(err)
|
||||
defer db.Close()
|
||||
|
||||
s1 := randomString(slen)
|
||||
blob := types.NewStreamingBlob(db, strings.NewReader(s1))
|
||||
return blob
|
||||
}
|
||||
|
||||
func stringFromBlob(blob types.Blob) string {
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(blob.Reader())
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
type resourceCacheTestSuite struct {
|
||||
clienttest.ClientTestSuite
|
||||
}
|
||||
|
||||
func TestResourceCache(t *testing.T) {
|
||||
suite.Run(t, &resourceCacheTestSuite{})
|
||||
}
|
||||
|
||||
func (s *resourceCacheTestSuite) TestResourceCacheGet() {
|
||||
dsName := "testCache"
|
||||
cache1 := func(k types.String, v types.Blob, setNewValue bool) (types.Ref, types.Ref) {
|
||||
str := spec.CreateDatabaseSpecString("ldb", s.LdbDir)
|
||||
db, err := spec.GetDatabase(str)
|
||||
s.NoError(err)
|
||||
defer db.Close()
|
||||
|
||||
hr, _ := db.GetDataset(dsName).MaybeHeadRef()
|
||||
rc, err := getResourceCache(db, dsName)
|
||||
s.NoError(err)
|
||||
|
||||
r := db.WriteValue(v)
|
||||
if setNewValue {
|
||||
rc.set(k, r)
|
||||
}
|
||||
cachedVal, ok := rc.get(k)
|
||||
s.True(ok)
|
||||
s.Equal(r, cachedVal)
|
||||
|
||||
err = rc.commit(db, dsName)
|
||||
s.NoError(err)
|
||||
hr1 := db.GetDataset(dsName).HeadRef()
|
||||
return hr, hr1
|
||||
}
|
||||
|
||||
blob1 := randomBlob(s, 30)
|
||||
blob2 := randomBlob(s, 30)
|
||||
s1 := stringFromBlob(blob1)
|
||||
|
||||
hr1, hr2 := cache1(types.String("key1"), blob1, true)
|
||||
s.True(types.Ref{} == hr1)
|
||||
s.False(types.Ref{} == hr2)
|
||||
|
||||
hr1, hr2 = cache1(types.String("key1"), blob1, false)
|
||||
s.True(hr1.Equals(hr2))
|
||||
|
||||
hr1, hr2 = cache1(types.String("key2"), blob2, true)
|
||||
s.False(hr1.Equals(hr2))
|
||||
|
||||
str := spec.CreateDatabaseSpecString("ldb", s.LdbDir)
|
||||
db, err := spec.GetDatabase(str)
|
||||
s.NoError(err)
|
||||
rc, err := getResourceCache(db, dsName)
|
||||
s.NoError(err)
|
||||
s.Equal(uint64(2), rc.len())
|
||||
br1, _ := rc.get("key1")
|
||||
b1 := br1.TargetValue(db).(types.Blob)
|
||||
s2 := stringFromBlob(b1)
|
||||
|
||||
s.Equal(s1, s2)
|
||||
}
|
||||
|
||||
func (s *resourceCacheTestSuite) TestCheckCacheType() {
|
||||
blob1 := randomBlob(s, 30)
|
||||
|
||||
badTestCases := []types.Value {
|
||||
types.NewStruct("testStruct", types.StructData{"f1": types.String("f1value")}),
|
||||
types.NewMap(types.Number(1), types.NewRef(blob1)),
|
||||
types.NewMap(types.String("s1"), types.String("badtype")),
|
||||
types.NewMap(types.String("s1"), types.NewRef(types.String("badtype"))),
|
||||
}
|
||||
|
||||
for _, tc := range badTestCases {
|
||||
err := checkCacheType(tc)
|
||||
s.Error(err)
|
||||
}
|
||||
|
||||
c1 := types.NewMap(types.String("s1"), types.NewRef(blob1))
|
||||
err := checkCacheType(c1)
|
||||
s.NoError(err)
|
||||
}
|
||||
@@ -0,0 +1,82 @@
|
||||
// Copyright 2016 Attic Labs, Inc. All rights reserved.
|
||||
// Licensed under the Apache License, version 2.0:
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/attic-labs/noms/go/d"
|
||||
"github.com/attic-labs/noms/go/hash"
|
||||
"github.com/attic-labs/noms/go/types"
|
||||
)
|
||||
|
||||
// TreeWalkCallback defines a function prototype submitting successive values
|
||||
// from TreeWalk to the caller. At any point, the function can return true
|
||||
// to prevent traversing any of the current value's children.
|
||||
type TreeWalkCallback func(p types.Path, parent, v types.Value) (stop bool)
|
||||
|
||||
// TreeWalk walks over a noms graph starting at 'v' and calls 'twcb' for each
|
||||
// value it encounters. It calls 'twcb' with a path from the original value to
|
||||
// the current value, the current value's parent, and the current value. TreeWalk
|
||||
// also takes a ValueReader so that it can traverse values across refs.
|
||||
func TreeWalk(vr types.ValueReader, p types.Path, v types.Value, twcb TreeWalkCallback) {
|
||||
var processVal func(p types.Path, parent, v types.Value)
|
||||
var processRef func(p types.Path, parent types.Value, r types.Ref)
|
||||
visited := map[hash.Hash]bool{}
|
||||
|
||||
processVal = func(p types.Path, parent, v types.Value) {
|
||||
if sr, ok := v.(types.Ref); ok {
|
||||
processRef(p, parent, sr)
|
||||
return
|
||||
}
|
||||
|
||||
if !twcb(p, parent, v) {
|
||||
switch value := v.(type) {
|
||||
case types.List:
|
||||
value.IterAll(func(c types.Value, index uint64) {
|
||||
p1 := p.Append(types.NewIndexPath(types.Number(index)))
|
||||
processVal(p1, v, c)
|
||||
})
|
||||
case types.Set:
|
||||
value.IterAll(func(c types.Value) {
|
||||
p1 := p.Append(types.NewHashIndexPath(c.Hash()))
|
||||
processVal(p1, v, c)
|
||||
})
|
||||
case types.Map:
|
||||
value.IterAll(func(k, c types.Value) {
|
||||
var kp1, vp1 types.Path
|
||||
if types.ValueCanBePathIndex(k) {
|
||||
kp1 = p.Append(types.NewIndexIntoKeyPath(k))
|
||||
vp1 = p.Append(types.NewIndexPath(k))
|
||||
} else {
|
||||
kp1 = p.Append(types.NewHashIndexIntoKeyPath(k.Hash()))
|
||||
vp1 = p.Append(types.NewHashIndexPath(k.Hash()))
|
||||
}
|
||||
processVal(kp1, v, k)
|
||||
processVal(vp1, v, c)
|
||||
})
|
||||
case types.Struct:
|
||||
value.Type().Desc.(types.StructDesc).IterFields(func(name string, typ *types.Type) {
|
||||
p1 := p.Append(types.NewFieldPath(name))
|
||||
c := value.Get(name)
|
||||
processVal(p1, v, c)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Todo: this resolves the path through the ref transparently. Is that right?
|
||||
processRef = func(p types.Path, parent types.Value, r types.Ref) {
|
||||
if visited[r.TargetHash()] {
|
||||
return
|
||||
}
|
||||
visited[r.TargetHash()] = true
|
||||
|
||||
target := r.TargetHash()
|
||||
c := vr.ReadValue(target)
|
||||
d.PanicIfTrue(c == nil)
|
||||
processVal(p, parent, c)
|
||||
}
|
||||
|
||||
processVal(p, nil, v)
|
||||
}
|
||||
@@ -0,0 +1,104 @@
|
||||
// Copyright 2016 Attic Labs, Inc. All rights reserved.
|
||||
// Licensed under the Apache License, version 2.0:
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/attic-labs/noms/go/marshal"
|
||||
"github.com/attic-labs/noms/go/types"
|
||||
"github.com/attic-labs/testify/assert"
|
||||
)
|
||||
|
||||
type Point struct {
|
||||
X int
|
||||
Y int
|
||||
}
|
||||
|
||||
type TStruct struct {
|
||||
M1 map[string]int
|
||||
M2 map[Point]string
|
||||
L1 []Point
|
||||
L2 []string
|
||||
}
|
||||
|
||||
func mustMarshal(v interface{}) types.Value {
|
||||
v1, _ := marshal.Marshal(v)
|
||||
return v1
|
||||
}
|
||||
|
||||
func TestTreeWalk(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
store := types.NewTestValueStore()
|
||||
|
||||
m1 := map[string]int{"map1-key1": 1, "map1-key2": 2}
|
||||
m2 := map[Point]string{{100, 200}: "map2-val1", {300, 400}: "map2-val2"}
|
||||
l1 := []Point{{11, 12}, {13, 14}}
|
||||
l2 := []string{"list2-val1", "list2-val2"}
|
||||
ts := TStruct{M1: m1, M2: m2, L1: l1, L2: l2}
|
||||
tv := mustMarshal(ts).(types.Struct)
|
||||
|
||||
nomsL1 := mustMarshal(l1).(types.List)
|
||||
nomsL2 := mustMarshal(l2).(types.List)
|
||||
nomsM1 := mustMarshal(m1).(types.Map)
|
||||
nomsM2 := mustMarshal(m2).(types.Map)
|
||||
nomsP1 := mustMarshal(Point{100, 200})
|
||||
nomsP2 := mustMarshal(Point{300, 400})
|
||||
|
||||
nomsR1 := store.WriteValue(nomsP1)
|
||||
nomsS1 := types.NewSet(types.String("one"), types.Number(2), nomsP1)
|
||||
tv = tv.Set("r1", nomsR1).Set("s1", nomsS1)
|
||||
store.WriteValue(tv)
|
||||
|
||||
expected := map[string][]types.Value{
|
||||
`.l1`: {tv, nomsL1},
|
||||
`.l1[0]`: {nomsL1, nomsL1.Get(0)},
|
||||
`.l1[0].x`: {nomsL1.Get(0), types.Number(11)},
|
||||
`.l1[0].y`: {nomsL1.Get(0), types.Number(12)},
|
||||
`.l1[1]`: {nomsL1, nomsL1.Get(1)},
|
||||
`.l1[1].x`: {nomsL1.Get(1), types.Number(13)},
|
||||
`.l1[1].y`: {nomsL1.Get(1), types.Number(14)},
|
||||
`.l2`: {tv, nomsL2},
|
||||
`.l2[0]`: {nomsL2, nomsL2.Get(0)},
|
||||
`.l2[1]`: {nomsL2, nomsL2.Get(1)},
|
||||
`.m1`: {tv, nomsM1},
|
||||
`.m1["map1-key1"]@key`: {nomsM1, types.String("map1-key1")},
|
||||
`.m1["map1-key1"]`: {nomsM1, types.Number(1)},
|
||||
`.m1["map1-key2"]@key`: {nomsM1, types.String("map1-key2")},
|
||||
`.m1["map1-key2"]`: {nomsM1, types.Number(2)},
|
||||
`.m2`: {tv, nomsM2},
|
||||
fmt.Sprintf(`.m2[#%s]@key`, nomsP1.Hash()): {nomsM2, nomsP1},
|
||||
fmt.Sprintf(`.m2[#%s]@key.x`, nomsP1.Hash()): {nomsP1, types.Number(100)},
|
||||
fmt.Sprintf(`.m2[#%s]@key.y`, nomsP1.Hash()): {nomsP1, types.Number(200)},
|
||||
fmt.Sprintf(`.m2[#%s]`, nomsP1.Hash()): {nomsM2, types.String("map2-val1")},
|
||||
fmt.Sprintf(`.m2[#%s]@key`, nomsP2.Hash()): {nomsM2, nomsP2},
|
||||
fmt.Sprintf(`.m2[#%s]@key.x`, nomsP2.Hash()): {nomsP2, types.Number(300)},
|
||||
fmt.Sprintf(`.m2[#%s]@key.y`, nomsP2.Hash()): {nomsP2, types.Number(400)},
|
||||
fmt.Sprintf(`.m2[#%s]`, nomsP2.Hash()): {nomsM2, types.String("map2-val2")},
|
||||
`.s1`: {tv, nomsS1},
|
||||
fmt.Sprintf(`.s1[#%s]`, types.String("one").Hash()): {nomsS1, types.String("one")},
|
||||
fmt.Sprintf(`.s1[#%s]`, types.Number(2).Hash()): {nomsS1, types.Number(2)},
|
||||
fmt.Sprintf(`.s1[#%s]`, nomsP1.Hash()): {nomsS1, nomsP1},
|
||||
fmt.Sprintf(`.s1[#%s].x`, nomsP1.Hash()): {nomsP1, types.Number(100)},
|
||||
fmt.Sprintf(`.s1[#%s].y`, nomsP1.Hash()): {nomsP1, types.Number(200)},
|
||||
`.r1`: {tv, nomsP1},
|
||||
`.r1.x`: {nomsP1, types.Number(100)},
|
||||
`.r1.y`: {nomsP1, types.Number(200)},
|
||||
}
|
||||
|
||||
cnt := 0
|
||||
callback := func(p types.Path, parent, value types.Value) bool {
|
||||
if vs, ok := expected[p.String()]; ok {
|
||||
cnt += 1
|
||||
assert.True(parent.Equals(vs[0]), "p: %s, parent: %s, value: %s", p, types.EncodedValue(parent), types.EncodedValue(value))
|
||||
assert.True(value.Equals(vs[1]), "p: %s, parent: %s, value: %s", p, types.EncodedValue(parent), types.EncodedValue(value))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
TreeWalk(store, nil, tv, callback)
|
||||
assert.Equal(len(expected), cnt)
|
||||
}
|
||||
@@ -0,0 +1,176 @@
|
||||
// Copyright 2016 Attic Labs, Inc. All rights reserved.
|
||||
// Licensed under the Apache License, version 2.0:
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/attic-labs/noms/go/diff"
|
||||
"github.com/attic-labs/noms/go/types"
|
||||
)
|
||||
|
||||
// Update() and IncrementalUpdate() are useful for transforming a graph and then
|
||||
// for efficiently re-applying the same transform function whenever the original
|
||||
// graph is updated.
|
||||
//
|
||||
// For example, imagine a large dataset (let's call it Graph-A1) is imported
|
||||
// into a Noms database. Then some update function 'U' is applied to that
|
||||
// graph which results in graph: 'Graph-B1'. Over time data is added and changed
|
||||
// in the original dataset and that gets imported as the next version of the
|
||||
// original graph: Graph-A2. In most cases, we'll want to reapply the transform
|
||||
// function to Graph-A2 and create Graph-B2 which now has the latest data with
|
||||
// the update function applied to it. Here's how that make look in a diagram:
|
||||
//
|
||||
// dataset1 dataset2
|
||||
//
|
||||
// Graph-A2 ----> U(Graph-A2) ----> Graph-B2
|
||||
// | |
|
||||
// | |
|
||||
// V V
|
||||
// Graph-A1 ----> U(Graph-A1) ----> Graph-B1
|
||||
//
|
||||
// The problem here is that, if Graph-A1 is large and the diffs between Graph-A2
|
||||
// and Graph-A1 are small, then Update(Graph-A2) is duplicating a lot of
|
||||
// effort.
|
||||
//
|
||||
// IncrementalUpdate relies on diff.Diff() and diff.Apply() (see
|
||||
// apply_patch.go) to do this more efficiently. Rather than applying the tranform
|
||||
// function to the entire Graph-A2, IncrementalUpdate gets the Diff of Graph-A1
|
||||
// and Graph-A2. It then applies the update function to just those diffs which
|
||||
// creates a new "Patch" which can then be applied directly to Graph-B2 to
|
||||
// generate Graph-B2. That results in a diagram like the one below:
|
||||
//
|
||||
// dataset1 dataset2
|
||||
//
|
||||
// Graph-A2 ----> U(Diff(GraphA2, Graph-A3)) ----> Graph-B3
|
||||
// | |
|
||||
// | |
|
||||
// V V
|
||||
// Graph-A2 ----> U(Diff(GraphA1, Graph-A2)) ----> Graph-B2
|
||||
// | |
|
||||
// | |
|
||||
// V V
|
||||
// Graph-A1 ----> U(Graph-A1) ----> Graph-B1
|
||||
//
|
||||
|
||||
// ShouldUpdateCallback defines a function that is called on each node in a
|
||||
// graph. If it returns true, the node is added to a Difference object and sent
|
||||
// to the 'found' channel for processing.
|
||||
type ShouldUpdateCallback func(p types.Path, root, parent, value types.Value) bool
|
||||
|
||||
// UpdateCallback defines a function that takes a Difference and returns a
|
||||
// modified difference that's suitable for patching into the target graph
|
||||
type UpdateCallback func(diff diff.Difference) diff.Difference
|
||||
|
||||
// This function takes lastInRoot(GraphA1), inRoot(GraphA2) and
|
||||
// lastOutRoot(GraphB1) as arguments and returns a types.Value(GraphB2). Invoking
|
||||
// this function with either lastInRoot or lastOutRoot having a nil value is the
|
||||
// same as calling Update directly.
|
||||
func IncrementalUpdate(vr types.ValueReader, inRoot, lastInRoot, lastOutRoot types.Value, shouldUpdateCb ShouldUpdateCallback, updateCb UpdateCallback, concurrency uint) types.Value {
|
||||
if lastInRoot == nil || lastOutRoot == nil {
|
||||
return Update(vr, inRoot, shouldUpdateCb, updateCb, concurrency)
|
||||
}
|
||||
|
||||
// Get the differences between lastInRoot and inRoot.
|
||||
dChan := make(chan diff.Difference, 128)
|
||||
sChan := make(chan struct{})
|
||||
go func() {
|
||||
diff.Diff(lastInRoot, inRoot, dChan, sChan, true)
|
||||
close(dChan)
|
||||
}()
|
||||
|
||||
patch := diff.Patch{}
|
||||
for d := range dChan {
|
||||
// Transform each NewValue in Differences and add new diff to patch
|
||||
newValue := Update(vr, d.NewValue, shouldUpdateCb, updateCb, concurrency)
|
||||
|
||||
// If newValue is nil, then call transform again on the oldValue because
|
||||
// we may need that to find an object to delete in the new graph.
|
||||
var oldValue types.Value
|
||||
if d.NewValue == nil {
|
||||
oldValue = Update(vr, d.OldValue, shouldUpdateCb, updateCb, concurrency)
|
||||
}
|
||||
dif := diff.Difference{Path: d.Path, ChangeType: d.ChangeType, OldValue: oldValue, NewValue: newValue}
|
||||
patch = append(patch, dif)
|
||||
}
|
||||
|
||||
return diff.Apply(lastOutRoot, patch)
|
||||
}
|
||||
|
||||
// Update walks through each node in a graph starting at root. It calls
|
||||
// shouldUpdate() on each node it encounters. If true is returned it wraps the
|
||||
// value in a Difference object and sends it to 'foundChan' to be 'transformed'
|
||||
// by the caller. Any nodes that are sent to the foundChannel are processed by
|
||||
// the caller and sent to 'updatedChan' when done.
|
||||
// The 'concurrency' argument determines how many concurrent routines are
|
||||
// are started for the updateCb to run in. If 'concurrency' > 1, updateCallbackCb
|
||||
// must be thread-safe.
|
||||
func Update(vr types.ValueReader, root types.Value, shouldUpdateCb ShouldUpdateCallback, updateCb UpdateCallback, concurrency uint) types.Value {
|
||||
foundChan := make(chan diff.Difference, 128)
|
||||
updatedChan := make(chan diff.Difference, 128)
|
||||
|
||||
// reads all the updated Differences from updatedChan into this patch.
|
||||
patch := diff.Patch{}
|
||||
updateDoneChan := make(chan struct{})
|
||||
go func() {
|
||||
for dlr := range updatedChan {
|
||||
patch = append(patch, dlr)
|
||||
}
|
||||
updateDoneChan <- struct{}{}
|
||||
}()
|
||||
|
||||
// Create 'concurrency' go routines for processing diffs from foundChan
|
||||
transformWg := sync.WaitGroup{}
|
||||
for i := uint(0); i < concurrency; i++ {
|
||||
transformWg.Add(1)
|
||||
go func() {
|
||||
runCallback := func(dif diff.Difference) diff.Difference {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
fmt.Println("Recovered in runCallback", r)
|
||||
}
|
||||
}()
|
||||
return updateCb(dif)
|
||||
}
|
||||
|
||||
for dif := range foundChan {
|
||||
d1 := runCallback(dif)
|
||||
if !d1.IsEmpty() {
|
||||
updatedChan <- d1
|
||||
}
|
||||
}
|
||||
transformWg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
// The treewalkCallback calls the shouldReplace callback on each node being
|
||||
// traversed. If true is returned, the Value is wrapped in a Difference object
|
||||
// and sent to foundChan for processing. TreeWalk does not traverse any of
|
||||
// children of Values sent to foundChan.
|
||||
twcb := func(p types.Path, parent, v types.Value) bool {
|
||||
if shouldUpdateCb(p, root, parent, v) {
|
||||
p1 := make(types.Path, len(p))
|
||||
copy(p1, p)
|
||||
foundChan <- diff.Difference{Path: p1, ChangeType: types.DiffChangeModified, OldValue: v}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
TreeWalk(vr, types.Path{}, root, twcb)
|
||||
close(foundChan)
|
||||
transformWg.Wait()
|
||||
close(updatedChan)
|
||||
<-updateDoneChan
|
||||
|
||||
// If no diffs were generated, then no transforms were made, just return the
|
||||
// original root
|
||||
if len(patch) == 0 {
|
||||
return root
|
||||
}
|
||||
|
||||
return diff.Apply(root, patch)
|
||||
}
|
||||
@@ -0,0 +1,255 @@
|
||||
// Copyright 2016 Attic Labs, Inc. All rights reserved.
|
||||
// Licensed under the Apache License, version 2.0:
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/attic-labs/noms/go/diff"
|
||||
"github.com/attic-labs/noms/go/marshal"
|
||||
"github.com/attic-labs/noms/go/types"
|
||||
"github.com/attic-labs/testify/assert"
|
||||
)
|
||||
|
||||
type orig struct {
|
||||
AValue string
|
||||
}
|
||||
|
||||
type replaced struct {
|
||||
AValue string
|
||||
}
|
||||
|
||||
type origstruct struct {
|
||||
F1 map[string]interface{}
|
||||
F2 map[string]interface{}
|
||||
F3 orig
|
||||
F4 orig
|
||||
F5 string
|
||||
}
|
||||
|
||||
var (
|
||||
origType = mustMarshal(orig{"v1"}).Type()
|
||||
replacedType = mustMarshal(replaced{"v1"}).Type()
|
||||
)
|
||||
|
||||
func shouldUpdateCb(p types.Path, root, parent, v types.Value) (res bool) {
|
||||
return v != nil && v.Type().Kind() == types.StructKind && v.Type().Desc.(types.StructDesc).Name == "Orig"
|
||||
}
|
||||
|
||||
func transformCb(dif diff.Difference) diff.Difference {
|
||||
s1 := string(dif.OldValue.(types.Struct).Get("aValue").(types.String))
|
||||
nv, _ := marshal.Marshal(replaced{s1})
|
||||
return diff.Difference{Path: dif.Path, ChangeType: types.DiffChangeModified, OldValue: dif.OldValue, NewValue: nv}
|
||||
}
|
||||
|
||||
func getPaths(vr types.ValueReader, a1 types.Value, typ *types.Type) ([]types.Path, []types.Path) {
|
||||
paths := []types.Path{}
|
||||
typedPaths := []types.Path{}
|
||||
TreeWalk(vr, nil, a1, func(p types.Path, parent, v types.Value) bool {
|
||||
paths = append(paths, p)
|
||||
if types.IsSubtype(v.Type(), typ) {
|
||||
typedPaths = append(typedPaths, p)
|
||||
}
|
||||
return false
|
||||
})
|
||||
return paths, typedPaths
|
||||
|
||||
}
|
||||
|
||||
func checkParallellGraphs(assert *assert.Assertions, a, b types.Value) {
|
||||
vs := types.NewTestValueStore()
|
||||
paths1, typedPaths1 := getPaths(vs, a, origType)
|
||||
paths2, typedPaths2 := getPaths(vs, b, replacedType)
|
||||
assert.Equal(len(paths1), len(paths2))
|
||||
assert.Equal(len(typedPaths1), len(typedPaths2))
|
||||
|
||||
for i, p := range paths1 {
|
||||
assert.True(p.Equals(paths2[i]), "p1: %s != p2: %s", p, paths2[i])
|
||||
}
|
||||
|
||||
for i, p := range typedPaths1 {
|
||||
assert.True(p.Equals(typedPaths2[i]), "p1: %s != p2: %s", p, paths2[i])
|
||||
}
|
||||
}
|
||||
|
||||
func updateTest(assert *assert.Assertions, a1, a2 types.Value) {
|
||||
vs := types.NewTestValueStore()
|
||||
b1 := Update(vs, a1, shouldUpdateCb, transformCb, 1)
|
||||
checkParallellGraphs(assert, a1, b1)
|
||||
b2 := IncrementalUpdate(vs, a2, a1, b1, shouldUpdateCb, transformCb, 1)
|
||||
checkParallellGraphs(assert, a2, b2)
|
||||
}
|
||||
|
||||
func updateSetTest(assert *assert.Assertions, a1, a2 types.Value) {
|
||||
vs := types.NewTestValueStore()
|
||||
b1 := Update(vs, a1, shouldUpdateCb, transformCb, 1)
|
||||
paths1, typedPaths1 := getPaths(vs, a1, origType)
|
||||
paths2, typedPaths2 := getPaths(vs, b1, replacedType)
|
||||
assert.Equal(len(paths1), len(paths2))
|
||||
assert.Equal(len(typedPaths1), len(typedPaths2))
|
||||
|
||||
b2 := IncrementalUpdate(vs, a2, a1, b1, shouldUpdateCb, transformCb, 1)
|
||||
paths1, typedPaths1 = getPaths(vs, a2, origType)
|
||||
paths2, typedPaths2 = getPaths(vs, b2, replacedType)
|
||||
assert.Equal(len(paths1), len(paths2))
|
||||
assert.Equal(len(typedPaths1), len(typedPaths2))
|
||||
}
|
||||
|
||||
func TestUpdateList(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
vs := types.NewTestValueStore()
|
||||
defer vs.Close()
|
||||
|
||||
a1 := mustMarshal(map[string][]interface{}{
|
||||
"l1": []interface{}{"five", "ten", "fifteen"},
|
||||
"l2": []interface{}{orig{"o1"}, "two", "three", "four"},
|
||||
"l3": []interface{}{"one", orig{"o1"}, "three", "four"},
|
||||
"l4": []interface{}{"one", "two", "three", orig{"o1"}},
|
||||
"l5": []interface{}{orig{"o1"}, orig{"o1"}, orig{"o1"}, orig{"o1"}},
|
||||
})
|
||||
|
||||
a2 := mustMarshal(map[string][]interface{}{
|
||||
"l1": []interface{}{"one", "two", "five", "eight", "eleven", "sixteen"},
|
||||
"l2": []interface{}{"two", "three", "four", orig{"o2"}, orig{"o3"}},
|
||||
"l3": []interface{}{"one", orig{"o2"}, "three", "four"},
|
||||
"l4": []interface{}{"one", "two", "three", "xyxyxy"},
|
||||
"l5": []interface{}{orig{"o2"}, orig{"o1"}, orig{"o1"}, orig{"o1"}},
|
||||
})
|
||||
|
||||
updateTest(assert, a1, a2)
|
||||
}
|
||||
|
||||
func TestUpdateSet(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
a1 := types.NewMap(
|
||||
ts("s1"), types.NewSet(ts("one"), ts("two"), mustMarshal(orig{"s1"}), mustMarshal(orig{"s2"})),
|
||||
ts("s2"), types.NewSet(
|
||||
types.NewMap(
|
||||
ts("k11"), ts("v11"),
|
||||
ts("k12"), mustMarshal(orig{"s3"}),
|
||||
),
|
||||
types.NewMap(
|
||||
ts("k21"), ts("v21"),
|
||||
ts("k22"), mustMarshal(orig{"v4"}),
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
a2 := types.NewMap(
|
||||
ts("s1"), types.NewSet(ts("one"), ts("two"), mustMarshal(orig{"s44"}), mustMarshal(orig{"s55"})),
|
||||
ts("s2"), types.NewSet(
|
||||
types.NewMap(
|
||||
ts("k11"), ts("v11"),
|
||||
ts("k12"), mustMarshal(orig{"s3"}),
|
||||
),
|
||||
types.NewMap(
|
||||
ts("k21"), ts("v21"),
|
||||
ts("k22"), ts("changed"),
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
updateSetTest(assert, a1, a2)
|
||||
}
|
||||
|
||||
func TestUpdateMap(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
a1 := mustMarshal(map[string]orig{
|
||||
"o1": orig{"o1"},
|
||||
"o2": orig{"o2"},
|
||||
})
|
||||
|
||||
a2 := mustMarshal(map[string]interface{}{
|
||||
"o2": orig{"o2"},
|
||||
"s1": "new field",
|
||||
"o3": orig{"o3"},
|
||||
})
|
||||
|
||||
updateTest(assert, a1, a2)
|
||||
}
|
||||
|
||||
func TestUpdateMapMixed(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
a1 := mustMarshal(map[string][]interface{}{
|
||||
"l1": {"one", "two", orig{"o1"}},
|
||||
"l2": {"one", "two", orig{"o2"}},
|
||||
})
|
||||
|
||||
a2 := mustMarshal(map[string][]interface{}{
|
||||
"l1": {"one", "two", orig{"o11"}, orig{"o12"}},
|
||||
"l2": {"one"},
|
||||
})
|
||||
updateTest(assert, a1, a2)
|
||||
}
|
||||
|
||||
func TestUpdateMapNonPrimitives(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
a1 := mustMarshal(map[string][]interface{}{
|
||||
"l1": {orig{"o11"}, orig{"o12"}, orig{"o13"}},
|
||||
"l2": {orig{"o21"}, orig{"o12"}, orig{"o23"}},
|
||||
})
|
||||
|
||||
a2 := mustMarshal(map[string][]interface{}{
|
||||
"l1": {orig{"o11"}, orig{"o14"}},
|
||||
"l2": {orig{"o21"}, orig{"o222"}, orig{"o23"}},
|
||||
})
|
||||
updateTest(assert, a1, a2)
|
||||
}
|
||||
|
||||
func TestUpdateStruct(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
a1 := mustMarshal(map[string]origstruct{
|
||||
"t1": origstruct{
|
||||
F1: map[string]interface{}{"o1": orig{"o1"}, "two": 2},
|
||||
F2: map[string]interface{}{"o2": orig{"o2"}},
|
||||
F3: orig{"o3"},
|
||||
F4: orig{"o4"},
|
||||
F5: "field 5",
|
||||
},
|
||||
"t2": origstruct{
|
||||
F1: map[string]interface{}{"one": 1, "two": 2},
|
||||
F2: map[string]interface{}{"o2": orig{"o2"}},
|
||||
F3: orig{"o23"},
|
||||
F4: orig{"o24"},
|
||||
F5: "field 25",
|
||||
},
|
||||
}).(types.Map)
|
||||
|
||||
a2 := mustMarshal(map[string]origstruct{
|
||||
"t1": origstruct{
|
||||
F1: map[string]interface{}{"two": 2},
|
||||
F2: map[string]interface{}{"o1": orig{"o1"}, "o2": orig{"o2"}},
|
||||
F3: orig{"o33"},
|
||||
F4: orig{"o4"},
|
||||
F5: "field 55",
|
||||
},
|
||||
"t2": origstruct{
|
||||
F1: map[string]interface{}{"one": 1, "two": 2},
|
||||
F2: map[string]interface{}{"o2": orig{"o2"}, "o3": orig{"o3"}},
|
||||
F3: orig{"o233"},
|
||||
F4: orig{"o24"},
|
||||
F5: "field 25",
|
||||
},
|
||||
}).(types.Map)
|
||||
|
||||
// add a field
|
||||
a3 := types.NewMap(
|
||||
ts("t1"), a2.Get(ts("t1")).(types.Struct).Set("x1", mustMarshal(orig{"x1"})),
|
||||
ts("t2"), a2.Get(ts("t2")),
|
||||
)
|
||||
|
||||
updateTest(assert, a1, a2)
|
||||
updateTest(assert, a2, a3)
|
||||
}
|
||||
|
||||
func ts(s1 string) types.String {
|
||||
return types.String(s1)
|
||||
}
|
||||
Reference in New Issue
Block a user