Merge branch 'main' into concurrent-branches-map

This commit is contained in:
Alex Giurgiu
2023-11-21 13:25:17 +02:00
52 changed files with 896 additions and 444 deletions
+2 -2
View File
@@ -208,7 +208,7 @@ func printBackups(dEnv *env.DoltEnv, apr *argparser.ArgParseResults) errhand.Ver
return errhand.BuildDError("Unable to get backups from the local directory").AddCause(err).Build()
}
for _, r := range backups {
for _, r := range backups.Snapshot() {
if apr.Contains(cli.VerboseFlag) {
paramStr := make([]byte, 0)
if len(r.Params) > 0 {
@@ -256,7 +256,7 @@ func syncBackup(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgParseR
return errhand.BuildDError("Unable to get backups from the local directory").AddCause(err).Build()
}
b, ok := backups[backupName]
b, ok := backups.Get(backupName)
if !ok {
return errhand.BuildDError("error: unknown backup: '%s' ", backupName).Build()
}
+47 -9
View File
@@ -53,6 +53,7 @@ import (
const (
LocalConnectionUser = "__dolt_local_user__"
ApiSqleContextKey = "__sqle_context__"
)
// ExternalDisableUsers is called by implementing applications to disable users. This is not used by Dolt itself,
@@ -384,7 +385,7 @@ func Serve(
}
ctxFactory := func() (*sql.Context, error) { return sqlEngine.NewDefaultContext(ctx) }
authenticator := newAuthenticator(ctxFactory, sqlEngine.GetUnderlyingEngine().Analyzer.Catalog.MySQLDb)
authenticator := newAccessController(ctxFactory, sqlEngine.GetUnderlyingEngine().Analyzer.Catalog.MySQLDb)
args = sqle.WithUserPasswordAuth(args, authenticator)
args.TLSConfig = serverConf.TLSConfig
@@ -587,29 +588,66 @@ func acquireGlobalSqlServerLock(port int, dEnv *env.DoltEnv) (*env.DBLock, error
return &lck, nil
}
// remotesapiAuth facilitates the implementation remotesrv.AccessControl for the remotesapi server.
type remotesapiAuth struct {
// ctxFactory is a function that returns a new sql.Context. This will create a new conext every time it is called,
// so it should be called once per API request.
ctxFactory func() (*sql.Context, error)
rawDb *mysql_db.MySQLDb
}
func newAuthenticator(ctxFactory func() (*sql.Context, error), rawDb *mysql_db.MySQLDb) remotesrv.Authenticator {
func newAccessController(ctxFactory func() (*sql.Context, error), rawDb *mysql_db.MySQLDb) remotesrv.AccessControl {
return &remotesapiAuth{ctxFactory, rawDb}
}
func (r *remotesapiAuth) Authenticate(creds *remotesrv.RequestCredentials) bool {
err := commands.ValidatePasswordWithAuthResponse(r.rawDb, creds.Username, creds.Password)
// ApiAuthenticate checks the provided credentials against the database and return a SQL context if the credentials are
// valid. If the credentials are invalid, then a nil context is returned. Failures to authenticate are logged.
func (r *remotesapiAuth) ApiAuthenticate(ctx context.Context) (context.Context, error) {
creds, err := remotesrv.ExtractBasicAuthCreds(ctx)
if err != nil {
return false
return nil, err
}
ctx, err := r.ctxFactory()
err = commands.ValidatePasswordWithAuthResponse(r.rawDb, creds.Username, creds.Password)
if err != nil {
return false
return nil, fmt.Errorf("API Authentication Failure: %v", err)
}
address := creds.Address
if strings.Index(address, ":") > 0 {
address, _, err = net.SplitHostPort(creds.Address)
if err != nil {
return nil, fmt.Errorf("Invlaid Host string for authentication: %s", creds.Address)
}
}
sqlCtx, err := r.ctxFactory()
if err != nil {
return nil, fmt.Errorf("API Runtime error: %v", err)
}
sqlCtx.Session.SetClient(sql.Client{User: creds.Username, Address: address, Capabilities: 0})
updatedCtx := context.WithValue(ctx, ApiSqleContextKey, sqlCtx)
return updatedCtx, nil
}
func (r *remotesapiAuth) ApiAuthorize(ctx context.Context) (bool, error) {
sqlCtx, ok := ctx.Value(ApiSqleContextKey).(*sql.Context)
if !ok {
return false, fmt.Errorf("Runtime error: could not get SQL context from context")
}
ctx.Session.SetClient(sql.Client{User: creds.Username, Address: creds.Address, Capabilities: 0})
privOp := sql.NewDynamicPrivilegedOperation(plan.DynamicPrivilege_CloneAdmin)
return r.rawDb.UserHasPrivileges(ctx, privOp)
authorized := r.rawDb.UserHasPrivileges(sqlCtx, privOp)
if !authorized {
return false, fmt.Errorf("API Authorization Failure: %s has not been granted CLONE_ADMIN access", sqlCtx.Session.Client().User)
}
return true, nil
}
func LoadClusterTLSConfig(cfg cluster.Config) (*tls.Config, error) {
+1 -1
View File
@@ -59,7 +59,7 @@ require (
github.com/cespare/xxhash v1.1.0
github.com/creasty/defaults v1.6.0
github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2
github.com/dolthub/go-mysql-server v0.17.1-0.20231116204523-f1ffd56a6d12
github.com/dolthub/go-mysql-server v0.17.1-0.20231117210301-8d70c233f221
github.com/dolthub/swiss v0.1.0
github.com/goccy/go-json v0.10.2
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
+2 -2
View File
@@ -181,8 +181,8 @@ github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
github.com/dolthub/go-icu-regex v0.0.0-20230524105445-af7e7991c97e h1:kPsT4a47cw1+y/N5SSCkma7FhAPw7KeGmD6c9PBZW9Y=
github.com/dolthub/go-icu-regex v0.0.0-20230524105445-af7e7991c97e/go.mod h1:KPUcpx070QOfJK1gNe0zx4pA5sicIK1GMikIGLKC168=
github.com/dolthub/go-mysql-server v0.17.1-0.20231116204523-f1ffd56a6d12 h1:pwkcOC2bcQ50mGPpi2MATwyz/cKrGeXJhPFhgfkHTzg=
github.com/dolthub/go-mysql-server v0.17.1-0.20231116204523-f1ffd56a6d12/go.mod h1:Z3EbOzC1yoK9MoYBxl6LDksV8GRRyjjHDZTu2lWpT/E=
github.com/dolthub/go-mysql-server v0.17.1-0.20231117210301-8d70c233f221 h1:4tXnkotftNS4nxPTCfwy5WFPPq+YFG3xBp5nfDz0Zto=
github.com/dolthub/go-mysql-server v0.17.1-0.20231117210301-8d70c233f221/go.mod h1:Z3EbOzC1yoK9MoYBxl6LDksV8GRRyjjHDZTu2lWpT/E=
github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488 h1:0HHu0GWJH0N6a6keStrHhUAK5/o9LVfkh44pvsV4514=
github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488/go.mod h1:ehexgi1mPxRTk0Mok/pADALuHbvATulTh6gzr7NzZto=
github.com/dolthub/jsonpath v0.0.2-0.20230525180605-8dc13778fd72 h1:NfWmngMi1CYUWU4Ix8wM+USEhjc+mhPlT9JUR/anvbQ=
@@ -25,11 +25,14 @@ import (
// DirToDBName takes the physical directory name, |dirName|, and replaces any unsupported characters to create a
// valid logical database name. For example, spaces are replaced with underscores.
func DirToDBName(dirName string) string {
// this environment variable is used whether to replace hyphens in the database name with underscores.
var translateHyphensToUnderscores = os.Getenv(dconfig.EnvDbNameReplaceHyphens) != ""
// this environment variable is used whether to replace hyphen and space characters in the database name with underscores.
if os.Getenv(dconfig.EnvDbNameReplace) == "" {
return dirName
}
dbName := strings.TrimSpace(dirName)
dbName = strings.Map(func(r rune) rune {
if unicode.IsSpace(r) || (translateHyphensToUnderscores && r == '-') {
if unicode.IsSpace(r) || r == '-' {
return '_'
}
return r
+9 -3
View File
@@ -33,6 +33,7 @@ import (
)
var GRPCDialProviderParam = "__DOLT__grpc_dial_provider"
var GRPCUsernameAuthParam = "__DOLT__grpc_username"
type GRPCRemoteConfig struct {
Endpoint string
@@ -100,10 +101,15 @@ func (fact DoltRemoteFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFo
var NoCachingParameter = "__dolt__NO_CACHING"
func (fact DoltRemoteFactory) newChunkStore(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}, dp GRPCDialProvider) (chunks.ChunkStore, error) {
var user string
if userParam := params[GRPCUsernameAuthParam]; userParam != nil {
user = userParam.(string)
}
cfg, err := dp.GetGRPCDialParams(grpcendpoint.Config{
Endpoint: urlObj.Host,
Insecure: fact.insecure,
WithEnvCreds: true,
Endpoint: urlObj.Host,
Insecure: fact.insecure,
UserIdForOsEnvAuth: user,
WithEnvCreds: true,
})
if err != nil {
return nil, err
+1 -1
View File
@@ -42,5 +42,5 @@ const (
EnvDoltAssistAgree = "DOLT_ASSIST_AGREE"
EnvDoltAuthorDate = "DOLT_AUTHOR_DATE"
EnvDoltCommitterDate = "DOLT_COMMITTER_DATE"
EnvDbNameReplaceHyphens = "DOLT_DBNAME_REPLACE_HYPHENS"
EnvDbNameReplace = "DOLT_DBNAME_REPLACE"
)
+16 -13
View File
@@ -115,12 +115,7 @@ func createRepoState(fs filesys.Filesys) (*RepoState, error) {
// deep copy remotes and backups ¯\_(ツ)_/¯ (see commit c59cbead)
if repoState != nil {
repoState.Remotes = repoState.Remotes.DeepCopy()
backups := make(map[string]Remote, len(repoState.Backups))
for n, r := range repoState.Backups {
backups[n] = r
}
repoState.Backups = backups
repoState.Backups = repoState.Backups.DeepCopy()
}
return repoState, rsErr
@@ -863,7 +858,7 @@ func (dEnv *DoltEnv) GetRemotes() (*concurrentmap.Map[string, Remote], error) {
// CheckRemoteAddressConflict checks whether any backups or remotes share the given URL. Returns the first remote if multiple match.
// Returns NoRemote and false if none match.
func CheckRemoteAddressConflict(absUrl string, remotes *concurrentmap.Map[string, Remote], backups map[string]Remote) (Remote, bool) {
func CheckRemoteAddressConflict(absUrl string, remotes *concurrentmap.Map[string, Remote], backups *concurrentmap.Map[string, Remote]) (Remote, bool) {
if remotes != nil {
var rm *Remote
remotes.Iter(func(key string, value Remote) bool {
@@ -878,9 +873,17 @@ func CheckRemoteAddressConflict(absUrl string, remotes *concurrentmap.Map[string
}
}
for _, r := range backups {
if r.Url == absUrl {
return r, true
if backups != nil {
var rm *Remote
backups.Iter(func(key string, value Remote) bool {
if value.Url == absUrl {
rm = &value
return false
}
return true
})
if rm != nil {
return *rm, true
}
}
return NoRemote, false
@@ -910,7 +913,7 @@ func (dEnv *DoltEnv) AddRemote(r Remote) error {
return dEnv.RepoState.Save(dEnv.FS)
}
func (dEnv *DoltEnv) GetBackups() (map[string]Remote, error) {
func (dEnv *DoltEnv) GetBackups() (*concurrentmap.Map[string, Remote], error) {
if dEnv.RSLoadErr != nil {
return nil, dEnv.RSLoadErr
}
@@ -919,7 +922,7 @@ func (dEnv *DoltEnv) GetBackups() (map[string]Remote, error) {
}
func (dEnv *DoltEnv) AddBackup(r Remote) error {
if _, ok := dEnv.RepoState.Backups[r.Name]; ok {
if _, ok := dEnv.RepoState.Backups.Get(r.Name); ok {
return ErrBackupAlreadyExists
}
@@ -976,7 +979,7 @@ func (dEnv *DoltEnv) RemoveRemote(ctx context.Context, name string) error {
}
func (dEnv *DoltEnv) RemoveBackup(ctx context.Context, name string) error {
backup, ok := dEnv.RepoState.Backups[name]
backup, ok := dEnv.RepoState.Backups.Get(name)
if !ok {
return ErrBackupNotFound
}
+1 -1
View File
@@ -53,7 +53,7 @@ func createTestEnv(isInitialized bool, hasLocalConfig bool) (*DoltEnv, *filesys.
initialDirs = append(initialDirs, doltDataDir)
mainRef := ref.NewBranchRef(DefaultInitBranch)
repoState := &RepoState{Head: ref.MarshalableRef{Ref: mainRef}, Remotes: concurrentmap.New[string, Remote](), Branches: concurrentmap.New[string, BranchConfig]()}
repoState := &RepoState{Head: ref.MarshalableRef{Ref: mainRef}, Remotes: concurrentmap.New[string, Remote](), Backups: concurrentmap.New[string, Remote](), Branches: concurrentmap.New[string, BranchConfig]()}
repoStateData, err := json.Marshal(repoState)
if err != nil {
+34 -3
View File
@@ -16,8 +16,10 @@ package env
import (
"crypto/tls"
"errors"
"net"
"net/http"
"os"
"runtime"
"strings"
"unicode"
@@ -25,7 +27,9 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"github.com/dolthub/dolt/go/libraries/doltcore/creds"
"github.com/dolthub/dolt/go/libraries/doltcore/dbfactory"
"github.com/dolthub/dolt/go/libraries/doltcore/dconfig"
"github.com/dolthub/dolt/go/libraries/doltcore/grpcendpoint"
)
@@ -88,9 +92,18 @@ func (p GRPCDialProvider) GetGRPCDialParams(config grpcendpoint.Config) (dbfacto
if config.Creds != nil {
opts = append(opts, grpc.WithPerRPCCredentials(config.Creds))
} else if config.WithEnvCreds {
rpcCreds, err := p.getRPCCreds(endpoint)
if err != nil {
return dbfactory.GRPCRemoteConfig{}, err
var rpcCreds credentials.PerRPCCredentials
var err error
if config.UserIdForOsEnvAuth != "" {
rpcCreds, err = p.getRPCCredsFromOSEnv(config.UserIdForOsEnvAuth)
if err != nil {
return dbfactory.GRPCRemoteConfig{}, err
}
} else {
rpcCreds, err = p.getRPCCreds(endpoint)
if err != nil {
return dbfactory.GRPCRemoteConfig{}, err
}
}
if rpcCreds != nil {
opts = append(opts, grpc.WithPerRPCCredentials(rpcCreds))
@@ -103,6 +116,24 @@ func (p GRPCDialProvider) GetGRPCDialParams(config grpcendpoint.Config) (dbfacto
}, nil
}
// getRPCCredsFromOSEnv returns RPC Credentials for the specified username, using the DOLT_REMOTE_PASSWORD
func (p GRPCDialProvider) getRPCCredsFromOSEnv(username string) (credentials.PerRPCCredentials, error) {
if username == "" {
return nil, errors.New("Runtime error: username must be provided to getRPCCredsFromOSEnv")
}
pass, found := os.LookupEnv(dconfig.EnvDoltRemotePassword)
if !found {
return nil, errors.New("error: must set DOLT_REMOTE_PASSWORD environment variable to use --user param")
}
c := creds.DoltCredsForPass{
Username: username,
Password: pass,
}
return c.RPCCreds(), nil
}
// getRPCCreds returns any RPC credentials available to this dial provider. If a DoltEnv has been configured
// in this dial provider, it will be used to load custom user credentials, otherwise nil will be returned.
func (p GRPCDialProvider) getRPCCreds(endpoint string) (credentials.PerRPCCredentials, error) {
+2 -2
View File
@@ -209,7 +209,7 @@ func (m MemoryRepoState) SetCWBHeadRef(_ context.Context, r ref.MarshalableRef)
}
func (m MemoryRepoState) GetRemotes() (*concurrentmap.Map[string, Remote], error) {
return &concurrentmap.Map[string, Remote]{}, nil
return concurrentmap.New[string, Remote](), nil
}
func (m MemoryRepoState) AddRemote(r Remote) error {
@@ -232,7 +232,7 @@ func (m MemoryRepoState) TempTableFilesDir() (string, error) {
return os.TempDir(), nil
}
func (m MemoryRepoState) GetBackups() (map[string]Remote, error) {
func (m MemoryRepoState) GetBackups() (*concurrentmap.Map[string, Remote], error) {
panic("cannot get backups on in memory database")
}
+5 -5
View File
@@ -38,7 +38,7 @@ func TestDirToDBName(t *testing.T) {
" real - name ": "real_name",
}
err := os.Setenv(dconfig.EnvDbNameReplaceHyphens, "true")
err := os.Setenv(dconfig.EnvDbNameReplace, "true")
require.NoError(t, err)
for dirName, expected := range replaceHyphenTests {
@@ -49,10 +49,10 @@ func TestDirToDBName(t *testing.T) {
allowHyphenTests := map[string]string{
"irs": "irs",
"corona-virus": "corona-virus",
" fake - name ": "fake_-_name",
" fake - name ": " fake - name ",
}
err = os.Setenv(dconfig.EnvDbNameReplaceHyphens, "")
err = os.Setenv(dconfig.EnvDbNameReplace, "")
require.NoError(t, err)
for dirName, expected := range allowHyphenTests {
@@ -133,7 +133,7 @@ func TestMultiEnvForDirectory(t *testing.T) {
expected := []envCmp{
{
name: "test---name_123",
name: " test---name _ 123",
doltDir: dEnv.GetDoltDir(),
},
}
@@ -164,7 +164,7 @@ func TestMultiEnvForDirectoryWithMultipleRepos(t *testing.T) {
assert.Len(t, mrEnv.envs, 3)
expected := make(map[string]string)
expected["test---name_123"] = dEnv.GetDoltDir()
expected[" test---name _ 123"] = dEnv.GetDoltDir()
expected["abc"] = subEnv1.GetDoltDir()
expected["def"] = subEnv2.GetDoltDir()
+10
View File
@@ -132,6 +132,16 @@ func (r *Remote) GetRemoteDBWithoutCaching(ctx context.Context, nbf *types.NomsB
return doltdb.LoadDoltDBWithParams(ctx, nbf, r.Url, filesys2.LocalFS, params)
}
func (r Remote) WithParams(params map[string]string) Remote {
fetchSpecs := make([]string, len(r.FetchSpecs))
copy(fetchSpecs, r.FetchSpecs)
for k, v := range r.Params {
params[k] = v
}
r.Params = params
return r
}
// PushOptions contains information needed for push for
// one or more branches or a tag for a specific remote database.
type PushOptions struct {
+17 -8
View File
@@ -32,7 +32,7 @@ type RepoStateReader interface {
CWBHeadRef() (ref.DoltRef, error)
CWBHeadSpec() (*doltdb.CommitSpec, error)
GetRemotes() (*concurrentmap.Map[string, Remote], error)
GetBackups() (map[string]Remote, error)
GetBackups() (*concurrentmap.Map[string, Remote], error)
GetBranches() (*concurrentmap.Map[string, BranchConfig], error)
}
@@ -71,7 +71,7 @@ type BranchConfig struct {
type RepoState struct {
Head ref.MarshalableRef `json:"head"`
Remotes *concurrentmap.Map[string, Remote] `json:"remotes"`
Backups map[string]Remote `json:"backups"`
Backups *concurrentmap.Map[string, Remote] `json:"backups"`
Branches *concurrentmap.Map[string, BranchConfig] `json:"branches"`
// |staged|, |working|, and |merge| are legacy fields left over from when Dolt repos stored this info in the repo
// state file, not in the DB directly. They're still here so that we can migrate existing repositories forward to the
@@ -86,7 +86,7 @@ type RepoState struct {
type repoStateLegacy struct {
Head ref.MarshalableRef `json:"head"`
Remotes *concurrentmap.Map[string, Remote] `json:"remotes"`
Backups map[string]Remote `json:"backups"`
Backups *concurrentmap.Map[string, Remote] `json:"backups"`
Branches *concurrentmap.Map[string, BranchConfig] `json:"branches"`
Staged string `json:"staged,omitempty"`
Working string `json:"working,omitempty"`
@@ -112,7 +112,7 @@ type mergeState struct {
}
func (rs *repoStateLegacy) toRepoState() *RepoState {
return &RepoState{
newRS := &RepoState{
Head: rs.Head,
Remotes: rs.Remotes,
Backups: rs.Backups,
@@ -121,6 +121,15 @@ func (rs *repoStateLegacy) toRepoState() *RepoState {
working: rs.Working,
merge: rs.Merge,
}
if newRS.Remotes == nil {
newRS.Remotes = concurrentmap.New[string, Remote]()
}
if newRS.Backups == nil {
newRS.Backups = concurrentmap.New[string, Remote]()
}
return newRS
}
func (rs *repoStateLegacy) save(fs filesys.ReadWriteFS) error {
@@ -162,7 +171,7 @@ func CloneRepoState(fs filesys.ReadWriteFS, r Remote) (*RepoState, error) {
working: hashStr,
Remotes: remotes,
Branches: concurrentmap.New[string, BranchConfig](),
Backups: make(map[string]Remote),
Backups: concurrentmap.New[string, Remote](),
}
err := rs.Save(fs)
@@ -184,7 +193,7 @@ func CreateRepoState(fs filesys.ReadWriteFS, br string) (*RepoState, error) {
Head: ref.MarshalableRef{Ref: headRef},
Remotes: concurrentmap.New[string, Remote](),
Branches: concurrentmap.New[string, BranchConfig](),
Backups: make(map[string]Remote),
Backups: concurrentmap.New[string, Remote](),
}
err = rs.Save(fs)
@@ -224,9 +233,9 @@ func (rs *RepoState) RemoveRemote(r Remote) {
}
func (rs *RepoState) AddBackup(r Remote) {
rs.Backups[r.Name] = r
rs.Backups.Set(r.Name, r)
}
func (rs *RepoState) RemoveBackup(r Remote) {
delete(rs.Backups, r.Name)
rs.Backups.Delete(r.Name)
}
@@ -27,6 +27,12 @@ type Config struct {
Creds credentials.PerRPCCredentials
WithEnvCreds bool
// If this is non-empty, and WithEnvCreds is true, then the caller is
// requesting to use username/password authentication instead of JWT
// authentication against the gRPC endpoint. Currently, the password
// comes from the OS environment variable DOLT_REMOTE_PASSWORD.
UserIdForOsEnvAuth string
// If non-nil, this is used for transport level security in the dial
// options, instead of a default option based on `Insecure`.
TLSConfig *tls.Config
+258 -158
View File
@@ -68,7 +68,6 @@ func mergeProllyTable(ctx context.Context, tm *TableMerger, mergedSch schema.Sch
}
leftRows := durable.ProllyMapFromIndex(lr)
valueMerger := newValueMerger(mergedSch, tm.leftSch, tm.rightSch, tm.ancSch, leftRows.Pool(), tm.ns)
leftMapping := valueMerger.leftMapping
// We need a sql.Context to apply column default values in merges; if we don't have one already,
// create one, since this code also gets called from the CLI merge code path.
@@ -77,32 +76,12 @@ func mergeProllyTable(ctx context.Context, tm *TableMerger, mergedSch schema.Sch
sqlCtx = sql.NewContext(ctx)
}
// Migrate primary index data to rewrite the values on the left side of the merge if necessary
schemasDifferentSize := len(tm.leftSch.GetAllCols().GetColumns()) != len(mergedSch.GetAllCols().GetColumns())
if rewriteRows || schemasDifferentSize || leftMapping.IsIdentityMapping() == false {
if err := migrateDataToMergedSchema(sqlCtx, tm, valueMerger, mergedSch); err != nil {
return nil, nil, err
}
// After we migrate the data on the left-side to the new, merged schema, we reset
// the left mapping to an identity mapping, since it's a direct mapping now.
// However, columns that did not exist on the left schema shouldn't be updated, because they
// still don't exist on the migrated data.
for i, col := range mergedSch.GetNonPKCols().GetColumns() {
if findNonPKColumnMappingByTagOrName(tm.leftSch, col) != -1 {
valueMerger.leftMapping[i] = i
}
}
}
// After we've migrated the existing data to the new schema, it's safe for us to update the schema on the table
mergeTbl, err = tm.leftTbl.UpdateSchema(sqlCtx, mergedSch)
if err != nil {
return nil, nil, err
}
rebuildPrimaryIndex := rewriteRows || schemasDifferentSize || !valueMerger.leftMapping.IsIdentityMapping()
rebuidSecondaryIndexes := rewriteRows
var stats *MergeStats
mergeTbl, stats, err = mergeProllyTableData(sqlCtx, tm, mergedSch, mergeTbl, valueMerger, rewriteRows)
mergeTbl, stats, err = mergeProllyTableData(sqlCtx, tm, mergedSch, mergeTbl, valueMerger, rebuildPrimaryIndex, rebuidSecondaryIndexes)
if err != nil {
return nil, nil, err
}
@@ -127,10 +106,12 @@ func mergeProllyTable(ctx context.Context, tm *TableMerger, mergedSch schema.Sch
// to the right-side, we apply it to the left-side by merging it into the left-side's primary index
// as well as any secondary indexes, and also checking for unique constraints incrementally. When
// conflicts are detected, this function attempts to resolve them automatically if possible, and
// if not, they are recorded as conflicts in the table's artifacts. If |rebuildIndexes| is set to
// true, then secondary indexes will be rebuilt, instead of being incrementally merged together. This
// is less efficient, but safer, especially when type changes have been applied to a table's schema.
func mergeProllyTableData(ctx *sql.Context, tm *TableMerger, finalSch schema.Schema, mergeTbl *doltdb.Table, valueMerger *valueMerger, rebuildIndexes bool) (*doltdb.Table, *MergeStats, error) {
// if not, they are recorded as conflicts in the table's artifacts. If |rebuildPrimaryIndex| is set to
// true, then every row in the primary index will be recomputed. This is usually because the right side
// introduced a schema change. If |rebuildSecondaryIndexes| is true, then the seconary indexes will be
// rebuilt instead of being incrementally merged together. This is less efficient, but safer, especially
// when type changes have been applied to a table's schema.
func mergeProllyTableData(ctx *sql.Context, tm *TableMerger, finalSch schema.Schema, mergeTbl *doltdb.Table, valueMerger *valueMerger, rebuildPrimaryIndex, rebuildSecondaryIndexes bool) (*doltdb.Table, *MergeStats, error) {
iter, err := threeWayDiffer(ctx, tm, valueMerger)
if err != nil {
return nil, nil, err
@@ -211,6 +192,17 @@ func mergeProllyTableData(ctx *sql.Context, tm *TableMerger, finalSch schema.Sch
s.ConstraintViolations += cnt
switch diff.Op {
case tree.DiffOpLeftAdd, tree.DiffOpLeftModify:
// In the event that the right side introduced a schema change, account for it here.
// We still have to migrate when the diff is `tree.DiffOpLeftModify` because of the corner case where
// the right side contains a schema change but the changed column is null, so row bytes don't change.
if rebuildPrimaryIndex {
err = pri.merge(ctx, diff, tm.leftSch)
if err != nil {
return nil, nil, err
}
}
case tree.DiffOpDivergentModifyConflict, tree.DiffOpDivergentDeleteConflict:
// In this case, a modification or delete was made to one side, and a conflicting delete or modification
// was made to the other side, so these cannot be automatically resolved.
@@ -219,13 +211,17 @@ func mergeProllyTableData(ctx *sql.Context, tm *TableMerger, finalSch schema.Sch
if err != nil {
return nil, nil, err
}
err = pri.merge(ctx, diff, tm.leftSch)
if err != nil {
return nil, nil, err
}
case tree.DiffOpRightAdd:
s.Adds++
err = pri.merge(ctx, diff, tm.rightSch)
if err != nil {
return nil, nil, err
}
err = sec.merge(ctx, diff, tm.rightSch)
err = sec.merge(ctx, diff, tm.leftSch, tm.rightSch, tm, finalSch)
if err != nil {
return nil, nil, err
}
@@ -235,17 +231,17 @@ func mergeProllyTableData(ctx *sql.Context, tm *TableMerger, finalSch schema.Sch
if err != nil {
return nil, nil, err
}
err = sec.merge(ctx, diff, tm.rightSch)
err = sec.merge(ctx, diff, tm.leftSch, tm.rightSch, tm, finalSch)
if err != nil {
return nil, nil, err
}
case tree.DiffOpRightDelete:
case tree.DiffOpRightDelete, tree.DiffOpDivergentDeleteResolved:
s.Deletes++
err = pri.merge(ctx, diff, tm.rightSch)
if err != nil {
return nil, nil, err
}
err = sec.merge(ctx, diff, tm.rightSch)
err = sec.merge(ctx, diff, tm.leftSch, tm.rightSch, tm, finalSch)
if err != nil {
return nil, nil, err
}
@@ -257,7 +253,7 @@ func mergeProllyTableData(ctx *sql.Context, tm *TableMerger, finalSch schema.Sch
if err != nil {
return nil, nil, err
}
err = sec.merge(ctx, diff, nil)
err = sec.merge(ctx, diff, tm.leftSch, tm.rightSch, tm, finalSch)
if err != nil {
return nil, nil, err
}
@@ -276,6 +272,12 @@ func mergeProllyTableData(ctx *sql.Context, tm *TableMerger, finalSch schema.Sch
}
}
// After we've resolved all the diffs, it's safe for us to update the schema on the table
mergeTbl, err = tm.leftTbl.UpdateSchema(ctx, finalSch)
if err != nil {
return nil, nil, err
}
finalRows, err := pri.finalize(ctx)
if err != nil {
return nil, nil, err
@@ -286,7 +288,7 @@ func mergeProllyTableData(ctx *sql.Context, tm *TableMerger, finalSch schema.Sch
return nil, nil, err
}
finalIdxs, err := mergeProllySecondaryIndexes(ctx, tm, leftIdxs, rightIdxs, finalSch, finalRows, conflicts.ae, rebuildIndexes)
finalIdxs, err := mergeProllySecondaryIndexes(ctx, tm, leftIdxs, rightIdxs, finalSch, finalRows, conflicts.ae, rebuildSecondaryIndexes)
if err != nil {
return nil, nil, err
}
@@ -398,7 +400,7 @@ func (cv checkValidator) validateDiff(ctx *sql.Context, diff tree.ThreeWayDiff)
var valueTuple val.Tuple
var valueDesc val.TupleDesc
switch diff.Op {
case tree.DiffOpLeftDelete, tree.DiffOpRightDelete, tree.DiffOpConvergentDelete:
case tree.DiffOpLeftDelete, tree.DiffOpRightDelete, tree.DiffOpConvergentDelete, tree.DiffOpDivergentDeleteResolved:
// no need to validate check constraints for deletes
return 0, nil
case tree.DiffOpDivergentDeleteConflict, tree.DiffOpDivergentModifyConflict:
@@ -420,14 +422,18 @@ func (cv checkValidator) validateDiff(ctx *sql.Context, diff tree.ThreeWayDiff)
}
for checkName, checkExpression := range cv.checkExpressions {
// If the row came from the right side of the merge, then remap it (if necessary) to the final schema.
// This isn't necessary for left-side changes, because we already migrated the primary index data to
// the merged schema, and we skip keyless tables, since their value tuples require different mapping
// Remap the value to the final schema before checking.
// We skip keyless tables, since their value tuples require different mapping
// logic and we don't currently support merges to keyless tables that contain schema changes anyway.
newTuple := valueTuple
if !cv.valueMerger.keyless && (diff.Op == tree.DiffOpRightAdd || diff.Op == tree.DiffOpRightModify) {
newTupleBytes := remapTuple(valueTuple, valueDesc, cv.valueMerger.rightMapping)
newTuple = val.NewTuple(cv.valueMerger.syncPool, newTupleBytes...)
if !cv.valueMerger.keyless {
if diff.Op == tree.DiffOpRightAdd || diff.Op == tree.DiffOpRightModify {
newTupleBytes := remapTuple(valueTuple, valueDesc, cv.valueMerger.rightMapping)
newTuple = val.NewTuple(cv.valueMerger.syncPool, newTupleBytes...)
} else if diff.Op == tree.DiffOpLeftAdd || diff.Op == tree.DiffOpLeftModify {
newTupleBytes := remapTuple(valueTuple, valueDesc, cv.valueMerger.leftMapping)
newTuple = val.NewTuple(cv.valueMerger.syncPool, newTupleBytes...)
}
}
row, err := index.BuildRow(ctx, diff.Key, newTuple, cv.sch, cv.valueMerger.ns)
@@ -545,11 +551,23 @@ func newUniqValidator(ctx *sql.Context, sch schema.Schema, tm *TableMerger, vm *
// validateDiff processes |diff| and checks for any unique constraint violations that need to be updated. The number
// of violations recorded along with any error encountered is returned. Processing |diff| may resolve existing unique
// constraint violations, in which case the violations returned may be a negative number.
func (uv uniqValidator) validateDiff(ctx context.Context, diff tree.ThreeWayDiff) (violations int, err error) {
func (uv uniqValidator) validateDiff(ctx *sql.Context, diff tree.ThreeWayDiff) (violations int, err error) {
var value val.Tuple
switch diff.Op {
case tree.DiffOpRightAdd, tree.DiffOpRightModify:
value = diff.Right
// Don't remap the value to the merged schema if the table is keyless or if the mapping is an identity mapping.
if !uv.valueMerger.keyless && !uv.valueMerger.rightMapping.IsIdentityMapping() {
modifiedValue := remapTuple(value, uv.tm.rightSch.GetValueDescriptor(), uv.valueMerger.rightMapping)
value = val.NewTuple(uv.valueMerger.syncPool, modifiedValue...)
}
case tree.DiffOpLeftAdd, tree.DiffOpLeftModify:
value = diff.Left
// Don't remap the value to the merged schema if the table is keyless or if the mapping is an identity mapping.
if !uv.valueMerger.keyless && !uv.valueMerger.leftMapping.IsIdentityMapping() {
modifiedValue := remapTuple(value, uv.tm.leftSch.GetValueDescriptor(), uv.valueMerger.leftMapping)
value = val.NewTuple(uv.valueMerger.syncPool, modifiedValue...)
}
case tree.DiffOpRightDelete:
// If we see a row deletion event from the right side, we grab the original/base value so that we can update our
// local copy of the secondary index.
@@ -560,12 +578,6 @@ func (uv uniqValidator) validateDiff(ctx context.Context, diff tree.ThreeWayDiff
return
}
// Don't remap the value to the merged schema if the table is keyless or if the mapping is an identity mapping.
if !uv.valueMerger.keyless && !uv.valueMerger.rightMapping.IsIdentityMapping() {
modifiedValue := remapTuple(value, uv.tm.rightSch.GetValueDescriptor(), uv.valueMerger.rightMapping)
value = val.NewTuple(uv.valueMerger.syncPool, modifiedValue...)
}
// For a row deletion... we need to remove any unique constraint violations that were previously recorded for
// this row.
if diff.Op == tree.DiffOpRightDelete {
@@ -594,7 +606,7 @@ func (uv uniqValidator) validateDiff(ctx context.Context, diff tree.ThreeWayDiff
}
// After detecting any unique constraint violations, we need to update our indexes with the updated row
if diff.Op == tree.DiffOpRightAdd || diff.Op == tree.DiffOpRightModify || diff.Op == tree.DiffOpDivergentModifyResolved {
if diff.Op != tree.DiffOpRightDelete {
for _, idx := range uv.indexes {
err := idx.insertRow(ctx, diff.Key, value)
if err != nil {
@@ -920,6 +932,7 @@ func (nv nullValidator) validateDiff(ctx context.Context, diff tree.ThreeWayDiff
}
}
}
count = len(violations)
}
return
}
@@ -1055,6 +1068,8 @@ func (m *primaryMerger) merge(ctx *sql.Context, diff tree.ThreeWayDiff, sourceSc
return m.mut.Put(ctx, diff.Key, newTupleValue)
case tree.DiffOpRightDelete:
return m.mut.Put(ctx, diff.Key, diff.Right)
case tree.DiffOpDivergentDeleteResolved:
return m.mut.Delete(ctx, diff.Key)
case tree.DiffOpDivergentModifyResolved:
// any generated columns need to be re-resolved because their computed values may have changed as a result of
// the merge
@@ -1084,6 +1099,31 @@ func (m *primaryMerger) merge(ctx *sql.Context, diff tree.ThreeWayDiff, sourceSc
}
return m.mut.Put(ctx, diff.Key, merged)
case tree.DiffOpLeftAdd, tree.DiffOpLeftModify, tree.DiffOpDivergentModifyConflict, tree.DiffOpDivergentDeleteConflict:
// If the right side has a schema change, then newly added rows from the left must be migrated to the new schema.
// Rows with unresolvable conflicts must also be migrated to the new schema so that they can resolved manually.
if diff.Left == nil {
return m.mut.Put(ctx, diff.Key, nil)
}
newTupleValue := diff.Left
if schema.IsKeyless(sourceSch) {
if m.valueMerger.leftMapping.IsIdentityMapping() == false {
return fmt.Errorf("cannot merge keyless tables with reordered columns")
}
} else {
defaults, err := resolveDefaults(ctx, m.tableMerger.name, m.finalSch, m.tableMerger.leftSch)
if err != nil {
return err
}
tempTupleValue, err := remapTupleWithColumnDefaults(ctx, diff.Key, newTupleValue, sourceSch.GetValueDescriptor(),
m.valueMerger.leftMapping, m.tableMerger, m.tableMerger.leftSch, m.finalSch, defaults, m.valueMerger.syncPool, false)
if err != nil {
return err
}
newTupleValue = tempTupleValue
}
return m.mut.Put(ctx, diff.Key, newTupleValue)
default:
return fmt.Errorf("unexpected diffOp for editing primary index: %s", diff.Op)
}
@@ -1209,7 +1249,7 @@ func newSecondaryMerger(ctx *sql.Context, tm *TableMerger, valueMerger *valueMer
}, nil
}
func (m *secondaryMerger) merge(ctx *sql.Context, diff tree.ThreeWayDiff, sourceSch schema.Schema) error {
func (m *secondaryMerger) merge(ctx *sql.Context, diff tree.ThreeWayDiff, leftSchema, rightSchema schema.Schema, tm *TableMerger, finalSchema schema.Schema) error {
var err error
for _, idx := range m.leftIdxes {
switch diff.Op {
@@ -1218,12 +1258,13 @@ func (m *secondaryMerger) merge(ctx *sql.Context, diff tree.ThreeWayDiff, source
err = applyEdit(ctx, idx, diff.Key, diff.Left, diff.Merged)
case tree.DiffOpRightAdd, tree.DiffOpRightModify:
// Just as with the primary index, we need to map right-side changes to the final, merged schema.
if sourceSch == nil {
if rightSchema == nil {
return fmt.Errorf("no source schema specified to map right-side changes to merged schema")
}
newTupleValue := diff.Right
if schema.IsKeyless(sourceSch) {
baseTupleValue := diff.Base
if schema.IsKeyless(rightSchema) {
if m.valueMerger.rightMapping.IsIdentityMapping() == false {
return fmt.Errorf("cannot merge keyless tables with reordered columns")
}
@@ -1233,11 +1274,12 @@ func (m *secondaryMerger) merge(ctx *sql.Context, diff tree.ThreeWayDiff, source
return err
}
// Convert right value to result schema
tempTupleValue, err := remapTupleWithColumnDefaults(
ctx,
diff.Key,
diff.Right,
sourceSch.GetValueDescriptor(),
m.valueMerger.rightSchema.GetValueDescriptor(),
m.valueMerger.rightMapping,
m.tableMerger,
m.tableMerger.rightSch,
@@ -1250,9 +1292,33 @@ func (m *secondaryMerger) merge(ctx *sql.Context, diff tree.ThreeWayDiff, source
return err
}
newTupleValue = tempTupleValue
if diff.Base != nil {
defaults, err := resolveDefaults(ctx, m.tableMerger.name, m.mergedSchema, m.tableMerger.ancSch)
if err != nil {
return err
}
// Convert base value to result schema
baseTupleValue, err = remapTupleWithColumnDefaults(
ctx,
diff.Key,
diff.Base,
// Only the right side was modified, so the base schema must be the same as the left schema
leftSchema.GetValueDescriptor(),
m.valueMerger.baseMapping,
tm,
m.tableMerger.ancSch,
finalSchema,
defaults,
m.valueMerger.syncPool,
false)
if err != nil {
return err
}
}
}
err = applyEdit(ctx, idx, diff.Key, diff.Base, newTupleValue)
err = applyEdit(ctx, idx, diff.Key, baseTupleValue, newTupleValue)
case tree.DiffOpRightDelete:
err = applyEdit(ctx, idx, diff.Key, diff.Base, diff.Right)
default:
@@ -1480,9 +1546,12 @@ func mergeTableArtifacts(ctx context.Context, tm *TableMerger, mergeTbl *doltdb.
// a three-way cell edit (tree.DiffOpDivergentModifyResolved).
type valueMerger struct {
numCols int
baseVD, rightVD, resultVD val.TupleDesc
resultSchema schema.Schema
baseVD, leftVD, rightVD, resultVD val.TupleDesc
leftSchema, rightSchema, resultSchema schema.Schema
leftMapping, rightMapping, baseMapping val.OrdinalMapping
baseToLeftMapping val.OrdinalMapping
baseToRightMapping val.OrdinalMapping
baseToResultMapping val.OrdinalMapping
syncPool pool.BuffPool
keyless bool
ns tree.NodeStore
@@ -1491,18 +1560,26 @@ type valueMerger struct {
func newValueMerger(merged, leftSch, rightSch, baseSch schema.Schema, syncPool pool.BuffPool, ns tree.NodeStore) *valueMerger {
leftMapping, rightMapping, baseMapping := generateSchemaMappings(merged, leftSch, rightSch, baseSch)
baseToLeftMapping, baseToRightMapping, baseToResultMapping := generateSchemaMappings(baseSch, leftSch, rightSch, merged)
return &valueMerger{
numCols: merged.GetNonPKCols().Size(),
baseVD: baseSch.GetValueDescriptor(),
rightVD: rightSch.GetValueDescriptor(),
resultVD: merged.GetValueDescriptor(),
resultSchema: merged,
leftMapping: leftMapping,
rightMapping: rightMapping,
baseMapping: baseMapping,
syncPool: syncPool,
keyless: schema.IsKeyless(merged),
ns: ns,
numCols: merged.GetNonPKCols().Size(),
baseVD: baseSch.GetValueDescriptor(),
rightVD: rightSch.GetValueDescriptor(),
resultVD: merged.GetValueDescriptor(),
leftVD: leftSch.GetValueDescriptor(),
resultSchema: merged,
leftMapping: leftMapping,
rightMapping: rightMapping,
baseMapping: baseMapping,
baseToLeftMapping: baseToLeftMapping,
baseToRightMapping: baseToRightMapping,
baseToResultMapping: baseToResultMapping,
leftSchema: leftSch,
rightSchema: rightSch,
syncPool: syncPool,
keyless: schema.IsKeyless(merged),
ns: ns,
}
}
@@ -1552,89 +1629,6 @@ func findNonPKColumnMappingByTagOrName(sch schema.Schema, col schema.Column) int
}
}
// migrateDataToMergedSchema migrates the data from the left side of the merge of a table to the merged schema. This
// currently only includes updating the primary index. This is necessary when a schema change is
// being applied, so that when the new schema is used to pull out data from the table, it will be in the right order.
func migrateDataToMergedSchema(ctx *sql.Context, tm *TableMerger, vm *valueMerger, mergedSch schema.Schema) error {
lr, err := tm.leftTbl.GetRowData(ctx)
if err != nil {
return err
}
leftRows := durable.ProllyMapFromIndex(lr)
mut := leftRows.Rewriter(mergedSch.GetKeyDescriptor(), mergedSch.GetValueDescriptor())
mapIter, err := mut.IterAll(ctx)
if err != nil {
return err
}
leftSch, err := tm.leftTbl.GetSchema(ctx)
if err != nil {
return err
}
valueDescriptor := leftSch.GetValueDescriptor()
defaults, err := resolveDefaults(ctx, tm.name, mergedSch, tm.leftSch)
if err != nil {
return err
}
for {
keyTuple, valueTuple, err := mapIter.Next(ctx)
if err == io.EOF {
break
} else if err != nil {
return err
}
newValueTuple, err := remapTupleWithColumnDefaults(
ctx,
keyTuple,
valueTuple,
valueDescriptor,
vm.leftMapping,
tm,
tm.leftSch,
mergedSch,
defaults,
vm.syncPool,
false,
)
if err != nil {
return err
}
err = mut.Put(ctx, keyTuple, newValueTuple)
if err != nil {
return err
}
}
m, err := mut.Map(ctx)
if err != nil {
return err
}
newIndex := durable.IndexFromProllyMap(m)
newTable, err := tm.leftTbl.UpdateRows(ctx, newIndex)
if err != nil {
return err
}
tm.leftTbl = newTable
// NOTE: We don't handle migrating secondary index data to the new schema here. In most cases, a schema change
// won't affect secondary index data, but there are a few cases where we do rebuild the indexes. Dropping
// a column *should* keep the index, but remove that column from it and rebuild it, but Dolt/GMS currently
// drop the index completely if a used column is removed.
// https://github.com/dolthub/dolt/issues/5641
//
// Most of the currently supported type changes for a schema merge don't require rebuilding secondary
// indexes either. The exception is converting a column to BINARY, which requires us to rewrite the data
// and ensure it's right-padded with null bytes, up to the column length. This is handled at the end of
// the table merge. If we detect that the table rows needed to be rewritten, then we'll rebuild all indexes
// on the table, just to be safe.
return nil
}
// tryMerge performs a cell-wise merge given left, right, and base cell value
// tuples. It returns the merged cell value tuple and a bool indicating if a
// conflict occurred. tryMerge should only be called if left and right produce
@@ -1647,15 +1641,20 @@ func (m *valueMerger) tryMerge(ctx context.Context, left, right, base val.Tuple)
return nil, false, nil
}
if base != nil && (left == nil) != (right == nil) {
// One row deleted, the other modified
return nil, false, nil
for i := 0; i < len(m.baseToRightMapping); i++ {
isConflict, err := m.processBaseColumn(ctx, i, left, right, base)
if err != nil {
return nil, false, err
}
if isConflict {
return nil, false, nil
}
}
// Because we have non-identical diffs, left and right are guaranteed to be
// non-nil at this point.
if left == nil || right == nil {
panic("found nil left / right which should never occur")
if base != nil && (left == nil) != (right == nil) {
// One row deleted, the other modified
// We just validated that this is not a conflict.
return nil, true, nil
}
mergedValues := make([][]byte, m.numCols)
@@ -1673,6 +1672,99 @@ func (m *valueMerger) tryMerge(ctx context.Context, left, right, base val.Tuple)
return val.NewTuple(m.syncPool, mergedValues...), true, nil
}
// processBaseColumn returns whether column |i| of the base schema,
// if removed on one side, causes a conflict when merged with the other side.
func (m *valueMerger) processBaseColumn(ctx context.Context, i int, left, right, base val.Tuple) (conflict bool, err error) {
if base == nil {
// We're resolving an insertion. This can be done entirely in `processColumn`.
return false, nil
}
baseCol := base.GetField(i)
if left == nil {
// Left side deleted the row. Thus, right side must have modified the row in order for there to be a conflict to resolve.
rightCol, rightColIdx, rightColExists := getColumn(&right, &m.baseToRightMapping, i)
if !rightColExists {
// Right side deleted the column while left side deleted the row. This is not a conflict.
return false, nil
}
// This is a conflict if the value on the right changed.
// But if the right side only changed its representation (from ALTER COLUMN) and still has the same value,
// then this can be resolved.
baseCol, err = convert(ctx, m.baseVD, m.rightVD, m.rightSchema, i, rightColIdx, base, baseCol, m.ns)
if err != nil {
return false, err
}
if m.resultVD.Comparator().CompareValues(i, baseCol, rightCol, m.rightVD.Types[rightColIdx]) == 0 {
// right column did not change, so there is no conflict.
return false, nil
}
// conflicting modifications
return true, nil
}
if right == nil {
// Right side deleted the row. Thus, left side must have modified the row in order for there to be a conflict to resolve.
leftCol, leftColIdx, leftColExists := getColumn(&left, &m.baseToLeftMapping, i)
if !leftColExists {
// Left side deleted the column while right side deleted the row. This is not a conflict.
return false, nil
}
// This is a conflict if the value on the left changed.
// But if the left side only changed its representation (from ALTER COLUMN) and still has the same value,
// then this can be resolved.
baseCol, err = convert(ctx, m.baseVD, m.leftVD, m.leftSchema, i, leftColIdx, base, baseCol, m.ns)
if err != nil {
return false, err
}
if m.resultVD.Comparator().CompareValues(i, baseCol, leftCol, m.leftVD.Types[leftColIdx]) == 0 {
// left column did not change, so there is no conflict.
return false, nil
}
// conflicting modifications
return true, nil
}
rightCol, rightColIdx, rightColExists := getColumn(&right, &m.baseToRightMapping, i)
leftCol, leftColIdx, leftColExists := getColumn(&left, &m.baseToLeftMapping, i)
if leftColExists && rightColExists {
// This column also exists in the merged schema, and will be processed there.
return false, nil
}
if !leftColExists && !rightColExists {
// This column is a convergent deletion. There is no conflict.
return false, nil
}
var modifiedCol []byte
var modifiedColIdx int
var modifiedSchema schema.Schema
var modifiedVD val.TupleDesc
if !leftColExists {
modifiedCol, modifiedColIdx = rightCol, rightColIdx
modifiedSchema = m.rightSchema
modifiedVD = m.rightVD
} else {
modifiedCol, modifiedColIdx = leftCol, leftColIdx
modifiedSchema = m.leftSchema
modifiedVD = m.leftVD
}
baseCol, err = convert(ctx, m.baseVD, modifiedVD, modifiedSchema, i, modifiedColIdx, base, baseCol, m.ns)
if err != nil {
return false, err
}
if modifiedVD.Comparator().CompareValues(i, baseCol, modifiedCol, modifiedVD.Types[i]) == 0 {
return false, nil
}
return true, nil
}
// processColumn returns the merged value of column |i| of the merged schema,
// based on the |left|, |right|, and |base| schema.
func (m *valueMerger) processColumn(ctx context.Context, i int, left, right, base val.Tuple) (result []byte, conflict bool, err error) {
@@ -1711,6 +1803,11 @@ func (m *valueMerger) processColumn(ctx context.Context, i int, left, right, bas
return rightCol, false, nil
}
leftCol, err = convert(ctx, m.leftVD, m.resultVD, m.resultSchema, leftColIdx, i, left, leftCol, m.ns)
if err != nil {
return nil, false, err
}
if m.resultVD.Comparator().CompareValues(i, leftCol, rightCol, resultType) == 0 {
// columns are equal, return either.
return leftCol, false, nil
@@ -1762,7 +1859,10 @@ func (m *valueMerger) processColumn(ctx context.Context, i int, left, right, bas
rightModified = m.resultVD.Comparator().CompareValues(i, rightCol, baseCol, resultType) != 0
}
// The left value was previously converted in `migrateDataToMergedSchema`, so we don't need to convert it here.
leftCol, err = convert(ctx, m.leftVD, m.resultVD, m.resultSchema, leftColIdx, i, left, leftCol, m.ns)
if err != nil {
return nil, true, nil
}
if m.resultVD.Comparator().CompareValues(i, leftCol, rightCol, resultType) == 0 {
// columns are equal, return either.
return leftCol, false, nil
@@ -71,6 +71,19 @@ func GetMutableSecondaryIdxsWithPending(ctx *sql.Context, sch schema.Schema, tab
return nil, err
}
m := durable.ProllyMapFromIndex(idx)
// If the schema has changed, don't reuse the index.
// TODO: This isn't technically required, but correctly handling updating secondary indexes when only some
// of the table's rows have been updated is difficult to get right.
// Dropping the index is potentially slower but guarenteed to be correct.
if !m.KeyDesc().Equals(index.Schema().GetKeyDescriptorWithNoConversion()) {
continue
}
if !m.ValDesc().Equals(index.Schema().GetValueDescriptor()) {
continue
}
if schema.IsKeyless(sch) {
m = prolly.ConvertToSecondaryKeylessIndex(m)
}
@@ -173,16 +173,6 @@ var testCases = []testCase{
true,
false,
},
{
"dropping a column should be equivalent to setting a column to null",
build(1, 2, 0),
build(2, 1),
build(1, 1, 1),
3, 2, 3,
build(2, 2),
true,
false,
},
// TODO (dhruv): need to fix this test case for new storage format
//{
// "add rows but one holds a new column",
@@ -268,24 +268,25 @@ var columnAddDropTests = []schemaMergeTest{
skip: true,
},
{
// Skipped because the differ currently doesn't try to merge the dropped column.
// (https://github.com/dolthub/dolt/issues/6747)
name: "one side sets to non-NULL, other drops NULL, plus data change",
ancestor: singleRow(1, 2, nil),
left: singleRow(1, 3),
right: singleRow(1, 2, 3),
dataConflict: true,
skip: true,
},
{
// Skipped because the differ currently doesn't try to merge the dropped column.
// (https://github.com/dolthub/dolt/issues/6747)
name: "one side sets to non-NULL, other drops non-NULL",
ancestor: singleRow(1, 2, 3),
left: singleRow(1, 2),
right: singleRow(1, 2, 4),
dataConflict: true,
skip: true,
},
{
name: "one side drops column, other deletes row",
ancestor: []sql.Row{row(1, 2, 3), row(4, 5, 6)},
left: []sql.Row{row(1, 2), row(4, 5)},
right: []sql.Row{row(1, 2, 3)},
merged: []sql.Row{row(1, 2)},
},
},
},
@@ -304,54 +305,39 @@ var columnAddDropTests = []schemaMergeTest{
merged: singleRow(1, 3),
},
{
// Skipped because the differ currently doesn't try to merge the dropped column.
// (https://github.com/dolthub/dolt/issues/6747)
name: "one side sets to NULL, other drops non-NULL",
ancestor: singleRow(1, 2, 3),
left: singleRow(1, 3),
right: singleRow(1, nil, 3),
dataConflict: true,
skip: true,
},
{
// Skipped because the differ currently doesn't try to merge the dropped column.
// (https://github.com/dolthub/dolt/issues/6747)
name: "one side sets to NULL, other drops non-NULL, plus data change",
ancestor: singleRow(1, 2, 4),
left: singleRow(1, 3),
right: singleRow(1, nil, 4),
dataConflict: true,
skip: true,
},
{
// Skipped because the differ currently doesn't try to merge the dropped column.
// (https://github.com/dolthub/dolt/issues/6747)
name: "one side sets to non-NULL, other drops NULL, plus data change",
ancestor: singleRow(1, nil, 3),
left: singleRow(1, 3),
right: singleRow(1, 2, 3),
dataConflict: true,
skip: true,
},
{
// Skipped because the differ currently doesn't try to merge the dropped column.
// (https://github.com/dolthub/dolt/issues/6747)
name: "one side sets to non-NULL, other drops NULL, plus data change",
ancestor: singleRow(1, nil, 3),
left: singleRow(1, 4),
right: singleRow(1, 2, 3),
dataConflict: true,
skip: true,
},
{
// Skipped because the differ currently doesn't try to merge the dropped column.
// (https://github.com/dolthub/dolt/issues/6747)
name: "one side sets to non-NULL, other drops non-NULL",
ancestor: singleRow(1, 2, 3),
left: singleRow(1, 3),
right: singleRow(1, 4, 3),
dataConflict: true,
skip: true,
},
},
},
@@ -547,6 +533,29 @@ var columnAddDropTests = []schemaMergeTest{
skipNewFmt: true,
skipOldFmt: true,
},
{
name: "right side drops and adds column of same type",
ancestor: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, c int, a int)")),
left: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, c int, a int)")),
right: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, c int, b int)")),
merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, c int, b int)")),
dataTests: []dataTest{
{
name: "left side modifies dropped column",
ancestor: singleRow(1, 1, 2),
left: singleRow(1, 1, 3),
right: singleRow(1, 2, 2),
dataConflict: true,
},
},
},
{
name: "right side drops and adds column of different type",
ancestor: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, c int, a int)")),
left: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, c int, a int)")),
right: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, c int, b text)")),
merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, c int, b text)")),
},
}
var columnDefaultTests = []schemaMergeTest{
@@ -735,6 +744,27 @@ var typeChangeTests = []schemaMergeTest{
right: singleRow(1, "hello world", 1, "hello world"),
dataConflict: true,
},
{
name: "delete and schema change on left",
ancestor: singleRow(1, "test", 1, "test"),
left: nil,
right: singleRow(1, "test", 1, "test"),
merged: nil,
},
{
name: "schema change on left, delete on right",
ancestor: singleRow(1, "test", 1, "test"),
left: singleRow(1, "test", 1, "test"),
right: nil,
merged: nil,
},
{
name: "schema and value change on left, delete on right",
ancestor: singleRow(1, "test", 1, "test"),
left: singleRow(1, "hello", 1, "hello"),
right: nil,
dataConflict: true,
},
},
},
}
@@ -952,7 +982,7 @@ func testSchemaMergeHelper(t *testing.T, tests []schemaMergeTest, flipSides bool
require.NoError(t, err)
foundDataConflict = foundDataConflict || hasConflict
}
require.Equal(t, expectDataConflict, foundDataConflict)
require.True(t, foundDataConflict, "Expected data conflict, but didn't find one.")
} else {
for name, addr := range exp {
a, ok := act[name]
@@ -978,17 +1008,19 @@ func testSchemaMergeHelper(t *testing.T, tests []schemaMergeTest, flipSides bool
runTest(t, test, false)
})
for _, data := range test.dataTests {
test.ancestor.rows = data.ancestor
test.left.rows = data.left
test.right.rows = data.right
test.merged.rows = data.merged
test.skipNewFmt = test.skipNewFmt || data.skip
test.skipFlipOnNewFormat = test.skipFlipOnNewFormat || data.skipFlip
// Copy the test so that the values from one data test don't affect subsequent data tests.
dataDest := test
dataDest.ancestor.rows = data.ancestor
dataDest.left.rows = data.left
dataDest.right.rows = data.right
dataDest.merged.rows = data.merged
dataDest.skipNewFmt = dataDest.skipNewFmt || data.skip
dataDest.skipFlipOnNewFormat = dataDest.skipFlipOnNewFormat || data.skipFlip
t.Run(data.name, func(t *testing.T) {
if data.skip {
t.Skip()
}
runTest(t, test, data.dataConflict)
runTest(t, dataDest, data.dataConflict)
})
}
})
+39
View File
@@ -30,6 +30,8 @@ import (
"strings"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/store/hash"
@@ -397,3 +399,40 @@ func getFileReaderAt(path string, offset int64, length int64) (io.ReadCloser, in
r := closerReaderWrapper{io.LimitReader(f, length), f}
return r, fSize, nil
}
// ExtractBasicAuthCreds extracts the username and password from the incoming request. It returns RequestCredentials
// populated with necessary information to authenticate the request. nil and an error will be returned if any error
// occurs.
func ExtractBasicAuthCreds(ctx context.Context) (*RequestCredentials, error) {
if md, ok := metadata.FromIncomingContext(ctx); !ok {
return nil, errors.New("no metadata in context")
} else {
var username string
var password string
auths := md.Get("authorization")
if len(auths) != 1 {
username = "root"
password = ""
} else {
auth := auths[0]
if !strings.HasPrefix(auth, "Basic ") {
return nil, fmt.Errorf("bad request: authorization header did not start with 'Basic '")
}
authTrim := strings.TrimPrefix(auth, "Basic ")
uDec, err := base64.URLEncoding.DecodeString(authTrim)
if err != nil {
return nil, fmt.Errorf("incoming request authorization header failed to decode: %v", err)
}
userPass := strings.Split(string(uDec), ":")
username = userPass[0]
password = userPass[1]
}
addr, ok := peer.FromContext(ctx)
if !ok {
return nil, errors.New("incoming request had no peer")
}
return &RequestCredentials{Username: username, Password: password, Address: addr.Addr.String()}, nil
}
}
+28 -41
View File
@@ -16,14 +16,10 @@ package remotesrv
import (
"context"
"encoding/base64"
"strings"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/status"
)
@@ -34,12 +30,20 @@ type RequestCredentials struct {
}
type ServerInterceptor struct {
Lgr *logrus.Entry
Authenticator Authenticator
Lgr *logrus.Entry
AccessController AccessControl
}
type Authenticator interface {
Authenticate(creds *RequestCredentials) bool
// AccessControl is an interface that provides authentication and authorization for the gRPC server.
type AccessControl interface {
// ApiAuthenticate checks the incoming request for authentication credentials and validates them. If the user's
// identity checks out, the returned context will have the sqlContext within it, which contains the user's ID.
// If the user is not legitimate, an error is returned.
ApiAuthenticate(ctx context.Context) (context.Context, error)
// ApiAuthorize checks that the authenticated user has sufficient privileges to perform the requested action.
// Currently, CLONE_ADMIN is required. True and a nil error returned if the user is authorized, otherwise false
// with an error.
ApiAuthorize(ctx context.Context) (bool, error)
}
func (si *ServerInterceptor) Stream() grpc.StreamServerInterceptor {
@@ -69,40 +73,23 @@ func (si *ServerInterceptor) Options() []grpc.ServerOption {
}
}
// authenticate checks the incoming request for authentication credentials and validates them. If the user is
// legitimate, an authorization check is performed. If no error is returned, the user should be allowed to proceed.
func (si *ServerInterceptor) authenticate(ctx context.Context) error {
if md, ok := metadata.FromIncomingContext(ctx); ok {
var username string
var password string
auths := md.Get("authorization")
if len(auths) != 1 {
username = "root"
} else {
auth := auths[0]
if !strings.HasPrefix(auth, "Basic ") {
si.Lgr.Info("incoming request had malformed authentication header")
return status.Error(codes.Unauthenticated, "unauthenticated")
}
authTrim := strings.TrimPrefix(auth, "Basic ")
uDec, err := base64.URLEncoding.DecodeString(authTrim)
if err != nil {
si.Lgr.Infof("incoming request authorization header failed to decode: %v", err)
return status.Error(codes.Unauthenticated, "unauthenticated")
}
userPass := strings.Split(string(uDec), ":")
username = userPass[0]
password = userPass[1]
}
addr, ok := peer.FromContext(ctx)
if !ok {
si.Lgr.Info("incoming request had no peer")
return status.Error(codes.Unauthenticated, "unauthenticated")
}
if authed := si.Authenticator.Authenticate(&RequestCredentials{Username: username, Password: password, Address: addr.Addr.String()}); !authed {
return status.Error(codes.Unauthenticated, "unauthenticated")
}
return nil
ctx, err := si.AccessController.ApiAuthenticate(ctx)
if err != nil {
si.Lgr.Warnf("authentication failed: %s", err.Error())
status.Error(codes.Unauthenticated, "unauthenticated")
return err
}
return status.Error(codes.Unauthenticated, "unauthenticated 1")
// Have a valid user in the context. Check authorization.
if authorized, err := si.AccessController.ApiAuthorize(ctx); !authorized {
si.Lgr.Warnf("authorization failed: %s", err.Error())
status.Error(codes.PermissionDenied, "unauthorized")
return err
}
// Access Granted.
return nil
}
+7
View File
@@ -77,8 +77,15 @@ type Schema interface {
GetMapDescriptors() (keyDesc, valueDesc val.TupleDesc)
// GetKeyDescriptor returns the key tuple descriptor for this schema.
// If a column has a type that can't appear in a key (such as "address" columns),
// that column will get converted to equivalent types that can. (Example: text -> varchar)
GetKeyDescriptor() val.TupleDesc
// GetKeyDescriptorWithNoConversion returns the a descriptor for the columns used in the key.
// Unlike `GetKeyDescriptor`, it doesn't attempt to convert columns if they can't appear in a key,
// and returns them as they are.
GetKeyDescriptorWithNoConversion() val.TupleDesc
// GetValueDescriptor returns the value tuple descriptor for this schema.
GetValueDescriptor() val.TupleDesc
+12 -3
View File
@@ -409,6 +409,15 @@ func (si *schemaImpl) GetMapDescriptors() (keyDesc, valueDesc val.TupleDesc) {
// GetKeyDescriptor implements the Schema interface.
func (si *schemaImpl) GetKeyDescriptor() val.TupleDesc {
return si.getKeyColumnsDescriptor(true)
}
// GetKeyDescriptorWithNoConversion implements the Schema interface.
func (si *schemaImpl) GetKeyDescriptorWithNoConversion() val.TupleDesc {
return si.getKeyColumnsDescriptor(false)
}
func (si *schemaImpl) getKeyColumnsDescriptor(convertAddressColumns bool) val.TupleDesc {
if IsKeyless(si) {
return val.KeylessTupleDesc
}
@@ -420,17 +429,17 @@ func (si *schemaImpl) GetKeyDescriptor() val.TupleDesc {
sqlType := col.TypeInfo.ToSqlType()
queryType := sqlType.Type()
var t val.Type
if queryType == query.Type_BLOB {
if convertAddressColumns && queryType == query.Type_BLOB {
t = val.Type{
Enc: val.Encoding(EncodingFromSqlType(query.Type_VARBINARY)),
Nullable: columnMissingNotNullConstraint(col),
}
} else if queryType == query.Type_TEXT {
} else if convertAddressColumns && queryType == query.Type_TEXT {
t = val.Type{
Enc: val.Encoding(EncodingFromSqlType(query.Type_VARCHAR)),
Nullable: columnMissingNotNullConstraint(col),
}
} else if queryType == query.Type_GEOMETRY {
} else if convertAddressColumns && queryType == query.Type_GEOMETRY {
t = val.Type{
Enc: val.Encoding(serial.EncodingCell),
Nullable: columnMissingNotNullConstraint(col),
+1 -1
View File
@@ -419,7 +419,7 @@ func (db Database) getTableInsensitive(ctx *sql.Context, head *doltdb.Commit, ds
sess, db.RevisionQualifiedName(),
concurrentmap.New[string, env.Remote](),
concurrentmap.New[string, env.BranchConfig](),
map[string]env.Remote{})
concurrentmap.New[string, env.Remote]())
ws, err := sess.WorkingSet(ctx, db.RevisionQualifiedName())
if err != nil {
return nil, false, err
@@ -224,7 +224,7 @@ func syncBackupViaName(ctx *sql.Context, dbData env.DbData, sess *dsess.DoltSess
return err
}
b, ok := backups[backupName]
b, ok := backups.Get(backupName)
if !ok {
return fmt.Errorf("error: unknown backup: '%s'; %v", backupName, backups)
}
@@ -21,6 +21,7 @@ import (
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/libraries/doltcore/branch_control"
"github.com/dolthub/dolt/go/libraries/doltcore/dbfactory"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
@@ -73,6 +74,12 @@ func doDoltFetch(ctx *sql.Context, args []string) (int, error) {
return cmdFailure, err
}
if user, hasUser := apr.GetValue(cli.UserFlag); hasUser {
remote = remote.WithParams(map[string]string{
dbfactory.GRPCUsernameAuthParam: user,
})
}
srcDB, err := sess.Provider().GetRemoteDB(ctx, dbData.Ddb.ValueReadWriter().Format(), remote, false)
if err != nil {
return 1, err
@@ -24,6 +24,7 @@ import (
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/libraries/doltcore/branch_control"
"github.com/dolthub/dolt/go/libraries/doltcore/dbfactory"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
@@ -93,6 +94,12 @@ func doDoltPull(ctx *sql.Context, args []string) (int, int, error) {
return noConflictsOrViolations, threeWayMerge, err
}
if user, hasUser := apr.GetValue(cli.UserFlag); hasUser {
pullSpec.Remote = pullSpec.Remote.WithParams(map[string]string{
dbfactory.GRPCUsernameAuthParam: user,
})
}
srcDB, err := sess.Provider().GetRemoteDB(ctx, dbData.Ddb.ValueReadWriter().Format(), pullSpec.Remote, false)
if err != nil {
return noConflictsOrViolations, threeWayMerge, fmt.Errorf("failed to get remote db; %w", err)
@@ -42,8 +42,8 @@ type InitialDbState struct {
ReadOnly bool
DbData env.DbData
Remotes *concurrentmap.Map[string, env.Remote]
Backups *concurrentmap.Map[string, env.Remote]
Branches *concurrentmap.Map[string, env.BranchConfig]
Backups map[string]env.Remote
// If err is set, this InitialDbState is partially invalid, but may be
// usable to initialize a database at a revision specifier, for
@@ -33,8 +33,8 @@ type SessionStateAdapter struct {
session *DoltSession
dbName string
remotes *concurrentmap.Map[string, env.Remote]
backups map[string]env.Remote
branches *concurrentmap.Map[string, env.BranchConfig]
backups *concurrentmap.Map[string, env.Remote]
}
func (s SessionStateAdapter) SetCWBHeadRef(ctx context.Context, newRef ref.MarshalableRef) error {
@@ -45,7 +45,7 @@ var _ env.RepoStateReader = SessionStateAdapter{}
var _ env.RepoStateWriter = SessionStateAdapter{}
var _ env.RootsProvider = SessionStateAdapter{}
func NewSessionStateAdapter(session *DoltSession, dbName string, remotes *concurrentmap.Map[string, env.Remote], branches *concurrentmap.Map[string, env.BranchConfig], backups map[string]env.Remote) SessionStateAdapter {
func NewSessionStateAdapter(session *DoltSession, dbName string, remotes *concurrentmap.Map[string, env.Remote], branches *concurrentmap.Map[string, env.BranchConfig], backups *concurrentmap.Map[string, env.Remote]) SessionStateAdapter {
if branches == nil {
branches = concurrentmap.New[string, env.BranchConfig]()
}
@@ -92,7 +92,7 @@ func (s SessionStateAdapter) GetRemotes() (*concurrentmap.Map[string, env.Remote
return s.remotes, nil
}
func (s SessionStateAdapter) GetBackups() (map[string]env.Remote, error) {
func (s SessionStateAdapter) GetBackups() (*concurrentmap.Map[string, env.Remote], error) {
return s.backups, nil
}
@@ -147,7 +147,7 @@ func (s SessionStateAdapter) AddRemote(remote env.Remote) error {
}
func (s SessionStateAdapter) AddBackup(backup env.Remote) error {
if _, ok := s.backups[backup.Name]; ok {
if _, ok := s.backups.Get(backup.Name); ok {
return env.ErrBackupAlreadyExists
}
@@ -170,7 +170,7 @@ func (s SessionStateAdapter) AddBackup(backup env.Remote) error {
return fmt.Errorf("%w: '%s' -> %s", env.ErrRemoteAddressConflict, bac.Name, bac.Url)
}
s.backups[backup.Name] = backup
s.backups.Set(backup.Name, backup)
repoState.AddBackup(backup)
return repoState.Save(fs)
}
@@ -202,11 +202,11 @@ func (s SessionStateAdapter) RemoveRemote(_ context.Context, name string) error
}
func (s SessionStateAdapter) RemoveBackup(_ context.Context, name string) error {
backup, ok := s.backups[name]
backup, ok := s.backups.Get(name)
if !ok {
return env.ErrBackupNotFound
}
delete(s.backups, backup.Name)
s.backups.Delete(backup.Name)
fs, err := s.session.Provider().FileSystemForDatabase(s.dbName)
if err != nil {
@@ -218,12 +218,12 @@ func (s SessionStateAdapter) RemoveBackup(_ context.Context, name string) error
return err
}
backup, ok = repoState.Backups[name]
backup, ok = repoState.Backups.Get(name)
if !ok {
// sanity check
return env.ErrBackupNotFound
}
delete(repoState.Backups, name)
repoState.Backups.Delete(name)
return repoState.Save(fs)
}
@@ -5335,13 +5335,11 @@ var DoltCherryPickTests = []queries.ScriptTest{
SetUpScript: []string{
"SET @@autocommit=0;",
"create table t (pk int primary key, v varchar(100));",
"insert into t values (0, 'zero');",
"insert into t values (1, 'one');",
"call dolt_commit('-Am', 'create table t');",
"call dolt_checkout('-b', 'branch1');",
"insert into t values (1, \"one\");",
"call dolt_commit('-am', 'adding row 1');",
"insert into t values (2, \"two\");",
"call dolt_commit('-am', 'adding row 2');",
"update t set v=\"uno\" where pk=1;",
"call dolt_commit('-Am', 'updating row 1 -> uno');",
"alter table t drop column v;",
"call dolt_commit('-am', 'drop column v');",
"call dolt_checkout('main');",
@@ -5353,13 +5351,12 @@ var DoltCherryPickTests = []queries.ScriptTest{
},
{
Query: "select * from dolt_conflicts;",
Expected: []sql.Row{{"t", uint64(2)}},
Expected: []sql.Row{{"t", uint64(1)}},
},
{
Query: "select base_pk, base_v, our_pk, our_diff_type, their_pk, their_diff_type from dolt_conflicts_t;",
Expected: []sql.Row{
{1, "one", nil, "removed", 1, "modified"},
{2, "two", nil, "removed", 2, "modified"},
{1, "uno", 1, "modified", 1, "modified"},
},
},
{
@@ -5372,7 +5369,7 @@ var DoltCherryPickTests = []queries.ScriptTest{
},
{
Query: "select * from t;",
Expected: []sql.Row{{0, "zero"}},
Expected: []sql.Row{{1, "one"}},
},
},
},
@@ -5382,13 +5379,11 @@ var DoltCherryPickTests = []queries.ScriptTest{
"SET @@autocommit=1;",
"SET @@dolt_allow_commit_conflicts=1;",
"create table t (pk int primary key, v varchar(100));",
"insert into t values (0, 'zero');",
"insert into t values (1, 'one');",
"call dolt_commit('-Am', 'create table t');",
"call dolt_checkout('-b', 'branch1');",
"insert into t values (1, \"one\");",
"call dolt_commit('-am', 'adding row 1');",
"insert into t values (2, \"two\");",
"call dolt_commit('-am', 'adding row 2');",
"update t set v=\"uno\" where pk=1;",
"call dolt_commit('-Am', 'updating row 1 -> uno');",
"alter table t drop column v;",
"call dolt_commit('-am', 'drop column v');",
"call dolt_checkout('main');",
@@ -5400,13 +5395,12 @@ var DoltCherryPickTests = []queries.ScriptTest{
},
{
Query: "select * from dolt_conflicts;",
Expected: []sql.Row{{"t", uint64(2)}},
Expected: []sql.Row{{"t", uint64(1)}},
},
{
Query: "select base_pk, base_v, our_pk, our_diff_type, their_pk, their_diff_type from dolt_conflicts_t;",
Expected: []sql.Row{
{1, "one", nil, "removed", 1, "modified"},
{2, "two", nil, "removed", 2, "modified"},
{1, "uno", 1, "modified", 1, "modified"},
},
},
{
@@ -5419,7 +5413,7 @@ var DoltCherryPickTests = []queries.ScriptTest{
},
{
Query: "select * from t;",
Expected: []sql.Row{{0, "zero"}},
Expected: []sql.Row{{1, "one"}},
},
},
},
@@ -5428,13 +5422,11 @@ var DoltCherryPickTests = []queries.ScriptTest{
SetUpScript: []string{
"SET @@autocommit=0;",
"create table t (pk int primary key, v varchar(100));",
"insert into t values (0, 'zero');",
"insert into t values (1, 'one');",
"call dolt_commit('-Am', 'create table t');",
"call dolt_checkout('-b', 'branch1');",
"insert into t values (1, \"one\");",
"call dolt_commit('-am', 'adding row 1');",
"insert into t values (2, \"two\");",
"call dolt_commit('-am', 'adding row 2');",
"update t set v=\"uno\" where pk=1;",
"call dolt_commit('-Am', 'updating row 1 -> uno');",
"alter table t drop column v;",
"call dolt_commit('-am', 'drop column v');",
"call dolt_checkout('main');",
@@ -5446,7 +5438,7 @@ var DoltCherryPickTests = []queries.ScriptTest{
},
{
Query: "select * from dolt_conflicts;",
Expected: []sql.Row{{"t", uint64(2)}},
Expected: []sql.Row{{"t", uint64(1)}},
},
{
Query: "select * from dolt_status",
@@ -5455,8 +5447,7 @@ var DoltCherryPickTests = []queries.ScriptTest{
{
Query: "select base_pk, base_v, our_pk, our_diff_type, their_pk, their_diff_type from dolt_conflicts_t;",
Expected: []sql.Row{
{1, "one", nil, "removed", 1, "modified"},
{2, "two", nil, "removed", 2, "modified"},
{1, "uno", 1, "modified", 1, "modified"},
},
},
{
@@ -5473,7 +5464,7 @@ var DoltCherryPickTests = []queries.ScriptTest{
},
{
Query: "select * from t;",
Expected: []sql.Row{{0}},
Expected: []sql.Row{{1}},
},
{
Query: "call dolt_commit('-am', 'committing cherry-pick');",
@@ -5563,13 +5554,11 @@ var DoltCherryPickTests = []queries.ScriptTest{
"SET @@autocommit=1;",
"SET @@dolt_allow_commit_conflicts=1;",
"create table t (pk int primary key, v varchar(100));",
"insert into t values (0, 'zero');",
"insert into t values (1, 'one');",
"call dolt_commit('-Am', 'create table t');",
"call dolt_checkout('-b', 'branch1');",
"insert into t values (1, \"one\");",
"call dolt_commit('-am', 'adding row 1');",
"insert into t values (2, \"two\");",
"call dolt_commit('-am', 'adding row 2');",
"update t set v=\"uno\" where pk=1;",
"call dolt_commit('-Am', 'updating row 1 -> uno');",
"alter table t drop column v;",
"call dolt_commit('-am', 'drop column v');",
"call dolt_checkout('main');",
@@ -5581,13 +5570,12 @@ var DoltCherryPickTests = []queries.ScriptTest{
},
{
Query: "select * from dolt_conflicts;",
Expected: []sql.Row{{"t", uint64(2)}},
Expected: []sql.Row{{"t", uint64(1)}},
},
{
Query: "select base_pk, base_v, our_pk, our_diff_type, their_pk, their_diff_type from dolt_conflicts_t;",
Expected: []sql.Row{
{1, "one", nil, "removed", 1, "modified"},
{2, "two", nil, "removed", 2, "modified"},
{1, "uno", 1, "modified", 1, "modified"},
},
},
{
@@ -5600,7 +5588,7 @@ var DoltCherryPickTests = []queries.ScriptTest{
},
{
Query: "select * from t;",
Expected: []sql.Row{{0, "zero"}},
Expected: []sql.Row{{1, "one"}},
},
{
Query: "select * from dolt_status;",
@@ -1695,6 +1695,36 @@ var MergeScripts = []queries.ScriptTest{
},
},
},
{
Name: "resolving a deleted and modified row handles constraint checks",
SetUpScript: []string{
"create table test(a int primary key, b int, c int );",
"alter table test add check (b < 4);",
"insert into test values (1, 2, 3);",
"call dolt_add('test');",
"call dolt_commit('-m', 'create test table');",
"call dolt_checkout('-b', 'other');",
"alter table test drop column c;",
"call dolt_add('test');",
"call dolt_commit('-m', 'drop column');",
"call dolt_checkout('main');",
"delete from test;",
"call dolt_add('test');",
"call dolt_commit('-m', 'remove row');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "CALL DOLT_MERGE('other');",
Expected: []sql.Row{{doltCommit, 0, 0}},
},
{
Query: "select * from test",
Expected: []sql.Row{},
},
},
},
{
Name: "Pk convergent updates to sec diff congruent",
SetUpScript: []string{
@@ -157,4 +157,26 @@ var RevertScripts = []queries.ScriptTest{
},
},
},
{
Name: "dolt_revert() automatically resolves some conflicts",
SetUpScript: []string{
"create table tableA (id int primary key, col1 varchar(255));",
"call dolt_add('.');",
"call dolt_commit('-m', 'new table');",
"insert into tableA values (1, 'test')",
"call dolt_add('.');",
"call dolt_commit('-m', 'new row');",
"call dolt_branch('new_row');",
"ALTER TABLE tableA MODIFY col1 TEXT",
"call dolt_add('.');",
"call dolt_commit('-m', 'change col');",
"call dolt_revert('new_row');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "select * from tableA",
Expected: []sql.Row{},
},
},
},
}
@@ -1261,6 +1261,37 @@ var SchemaChangeTestsTypeChanges = []MergeScriptTest{
},
},
},
{
Name: "VARCHAR to TEXT widening with right side modification",
AncSetUpScript: []string{
"set autocommit = 0;",
"CREATE table t (pk int primary key, col1 varchar(10), col2 int);",
"INSERT into t values (1, '123', 10);",
"alter table t add index idx1 (col1(10));",
},
RightSetUpScript: []string{
"alter table t modify column col1 TEXT;",
"UPDATE t SET col2 = 40 WHERE col2 = 10",
"INSERT into t values (2, '12345678901234567890', 20);",
},
LeftSetUpScript: []string{
"INSERT into t values (3, '321', 30);",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{doltCommit, 0, 0}},
},
{
Query: "select * from t order by pk;",
Expected: []sql.Row{
{1, "123", 40},
{2, "12345678901234567890", 20},
{3, "321", 30},
},
},
},
},
{
Name: "VARCHAR to TEXT widening",
AncSetUpScript: []string{
@@ -2059,6 +2090,45 @@ var SchemaChangeTestsSchemaConflicts = []MergeScriptTest{
},
},
},
{
Name: "adding a not-null constraint and default value to a column, alongside table rewrite",
AncSetUpScript: []string{
"set dolt_force_transaction_commit = on;",
"create table t (pk int primary key, col1 int);",
"insert into t values (1, null), (2, null);",
},
RightSetUpScript: []string{
"update t set col1 = 9999 where col1 is null;",
"alter table t modify column col1 int not null default 9999;",
"alter table t add column col2 int default 100",
"insert into t values (3, 30, 200), (4, 40, 300);",
},
LeftSetUpScript: []string{
"insert into t values (5, null), (6, null);",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{"", 0, 1}},
},
{
Query: "select * from t;",
Expected: []sql.Row{
{1, 9999, 100},
{2, 9999, 100},
{3, 30, 200},
{4, 40, 300},
},
},
{
Query: "select pk, violation_type from dolt_constraint_violations_t",
Expected: []sql.Row{
{5, uint16(4)},
{6, uint16(4)},
},
},
},
},
{
Name: "adding a not-null constraint to one side",
AncSetUpScript: []string{
+3 -3
View File
@@ -78,10 +78,10 @@ func RemoteSrvServerArgs(ctxFactory func(context.Context) (*sql.Context, error),
return args, nil
}
func WithUserPasswordAuth(args remotesrv.ServerArgs, auth remotesrv.Authenticator) remotesrv.ServerArgs {
func WithUserPasswordAuth(args remotesrv.ServerArgs, authnz remotesrv.AccessControl) remotesrv.ServerArgs {
si := remotesrv.ServerInterceptor{
Lgr: args.Logger,
Authenticator: auth,
Lgr: args.Logger,
AccessController: authnz,
}
args.Options = append(args.Options, si.Options()...)
return args
+31 -11
View File
@@ -161,7 +161,16 @@ func (d *ThreeWayDiffer[K, O]) Next(ctx context.Context) (ThreeWayDiff, error) {
if d.lDiff.To == nil && d.rDiff.To == nil {
res = d.newConvergentEdit(d.lDiff.Key, d.lDiff.To, d.lDiff.Type)
} else if d.lDiff.To == nil || d.rDiff.To == nil {
res = d.newDivergentDeleteConflict(d.lDiff.Key, d.lDiff.From, d.lDiff.To, d.rDiff.To)
// Divergent delete. Attempt to resolve.
_, ok, err := d.resolveCb(ctx, val.Tuple(d.lDiff.To), val.Tuple(d.rDiff.To), val.Tuple(d.lDiff.From))
if err != nil {
return ThreeWayDiff{}, err
}
if !ok {
res = d.newDivergentDeleteConflict(d.lDiff.Key, d.lDiff.From, d.lDiff.To, d.rDiff.To)
} else {
res = d.newDivergentDeleteResolved(d.lDiff.Key, d.lDiff.From, d.lDiff.To, d.rDiff.To)
}
} else if d.lDiff.Type == d.rDiff.Type && bytes.Equal(d.lDiff.To, d.rDiff.To) {
res = d.newConvergentEdit(d.lDiff.Key, d.lDiff.To, d.lDiff.Type)
} else {
@@ -209,16 +218,17 @@ type DiffOp uint16
const (
DiffOpLeftAdd DiffOp = iota // leftAdd
DiffOpRightAdd // rightAdd
DiffOpLeftDelete // leftDelete
DiffOpRightDelete // rightDelete
DiffOpLeftModify // leftModify
DiffOpRightModify // rightModify
DiffOpConvergentAdd // convergentAdd
DiffOpConvergentDelete // convergentDelete
DiffOpConvergentModify // convergentModify
DiffOpDivergentModifyResolved // divergentModifyResolved
DiffOpDivergentDeleteConflict // divergentDeleteConflict
DiffOpDivergentModifyConflict // divergentModifyConflict
DiffOpLeftDelete //leftDelete
DiffOpRightDelete //rightDelete
DiffOpLeftModify //leftModify
DiffOpRightModify //rightModify
DiffOpConvergentAdd //convergentAdd
DiffOpConvergentDelete //convergentDelete
DiffOpConvergentModify //convergentModify
DiffOpDivergentModifyResolved //divergentModifyResolved
DiffOpDivergentDeleteConflict //divergentDeleteConflict
DiffOpDivergentModifyConflict //divergentModifyConflict
DiffOpDivergentDeleteResolved //divergentDeleteConflict
)
// ThreeWayDiff is a generic object for encoding a three way diff.
@@ -308,6 +318,16 @@ func (d *ThreeWayDiffer[K, O]) newDivergentDeleteConflict(key, base, left, right
}
}
func (d *ThreeWayDiffer[K, O]) newDivergentDeleteResolved(key, base, left, right Item) ThreeWayDiff {
return ThreeWayDiff{
Op: DiffOpDivergentDeleteResolved,
Key: val.Tuple(key),
Base: val.Tuple(base),
Left: val.Tuple(left),
Right: val.Tuple(right),
}
}
func (d *ThreeWayDiffer[K, O]) newDivergentClashConflict(key, base, left, right Item) ThreeWayDiff {
return ThreeWayDiff{
Op: DiffOpDivergentModifyConflict,
+4 -13
View File
@@ -179,9 +179,10 @@ teardown() {
dolt commit -am "add row 100 to other (on branch2)"
# This ALTER TABLE statement modifies other rows that aren't included in the cherry-picked
# commit row (100, 200, 300) is modified to (100, 300). This shows up as a conflict
# commit row (100, 200, 300) is modified to (100, 400). This shows up as a conflict
# in the cherry-pick (modified row on one side, row doesn't exist on the other side).
dolt sql -q "ALTER TABLE other DROP COLUMN c1;"
dolt sql -q "UPDATE other SET c2 = 400 WHERE pk = 100"
dolt sql -q "INSERT INTO other VALUES (10, 30);"
dolt sql -q "INSERT INTO test VALUES (100, 'q');"
dolt commit -am "alter table, add row 10 to other, add row 100 to test (on branch2)"
@@ -203,7 +204,7 @@ teardown() {
run dolt conflicts cat .
[ $status -eq 0 ]
[[ $output =~ "| - | ours | 100 | 200 | 300 |" ]] || false
[[ $output =~ "| * | theirs | 100 | NULL | 300 |" ]] || false
[[ $output =~ "| * | theirs | 100 | NULL | 400 |" ]] || false
# Asert the data we expect is in the table
run dolt sql -r csv -q "SELECT * from other;"
@@ -577,24 +578,14 @@ teardown() {
dolt commit -am "alter table test drop column v"
# Dropping column v on branch1 modifies all rows in the table, and those rows
# don't exist on main, so they are reported as conflicts the rows were modified in
# the target commit, but they don't exist on the target root.
# don't exist on main, so they would be a conflict. However, cell-wise merging is able to resolve the conflict.
dolt checkout main
run dolt cherry-pick branch1
[ $status -eq 1 ]
run dolt conflicts cat .
[ $status -eq 0 ]
[[ $output =~ '| | base | 1 | a |' ]] || false
[[ $output =~ '| - | ours | 1 | a |' ]] || false
[[ $output =~ '| * | theirs | 1 | NULL |' ]] || false
# Resolve and assert that column v is dropped
dolt conflicts resolve --ours .
run dolt sql -q "SHOW CREATE TABLE test;"
[ $status -eq 0 ]
[[ ! $output =~ '`v` varchar(10)' ]] || false
dolt commit -am "cherry-picked column drop"
}
@test "cherry-pick: commit with ALTER TABLE rename column" {
@@ -3,7 +3,7 @@ load $BATS_TEST_DIRNAME/helper/common.bash
setup() {
setup_common
export DOLT_DBNAME_REPLACE_HYPHENS="true"
export DOLT_DBNAME_REPLACE="true"
database_name=dolt_repo_$$
dolt sql -q "CREATE TABLE test(pk int PRIMARY KEY, color varchar(200))"
+1 -1
View File
@@ -6,7 +6,7 @@ setup() {
skiponwindows "Missing dependencies"
setup_common
export DOLT_DBNAME_REPLACE_HYPHENS="true"
export DOLT_DBNAME_REPLACE="true"
}
teardown() {
+22
View File
@@ -16,6 +16,28 @@ teardown() {
[[ "$output" =~ "No tables to export." ]] || false
}
@test "dump: roundtrip on database with leading space character and hyphen" {
mkdir ' test-db'
cd ' test-db'
dolt init
create_tables
insert_data_into_tables
run dolt dump
[ "$status" -eq 0 ]
[[ "$output" =~ "Successfully exported data." ]] || false
[ -f doltdump.sql ]
mkdir roundtrip
cd roundtrip
dolt init
dolt sql < ../doltdump.sql
run dolt sql -q "show databases"
[ $status -eq 0 ]
[[ $output =~ "| test-db" ]] || false
}
@test "dump: SQL type - with multiple tables" {
dolt sql -q "CREATE TABLE new_table(pk int primary key);"
dolt sql -q "INSERT INTO new_table VALUES (1);"
+2 -2
View File
@@ -130,7 +130,7 @@ SQL
}
@test "sql-checkout: DOLT_CHECKOUT updates the head ref session var" {
export DOLT_DBNAME_REPLACE_HYPHENS="true"
export DOLT_DBNAME_REPLACE="true"
run dolt sql <<SQL
call dolt_checkout('-b', 'feature-branch');
select @@dolt_repo_$$_head_ref;
@@ -141,7 +141,7 @@ SQL
}
@test "sql-checkout: CALL DOLT_CHECKOUT updates the head ref session var" {
export DOLT_DBNAME_REPLACE_HYPHENS="true"
export DOLT_DBNAME_REPLACE="true"
run dolt sql <<SQL
CALL DOLT_CHECKOUT('-b', 'feature-branch');
select @@dolt_repo_$$_head_ref;
+2 -15
View File
@@ -134,11 +134,11 @@ teardown() {
[[ $output =~ "not found" ]] || false
}
@test "sql-client: handle dashes for implicit database with hyphen disabled" {
@test "sql-client: handle dashes for implicit database" {
make_repo test-dashes
cd test-dashes
PORT=$( definePORT )
export DOLT_DBNAME_REPLACE_HYPHENS="true"
export DOLT_DBNAME_REPLACE="true"
dolt sql-server --user=root --port=$PORT &
SERVER_PID=$! # will get killed by teardown_common
sleep 5 # not using python wait so this works on windows
@@ -148,19 +148,6 @@ teardown() {
[[ $output =~ " test_dashes " ]] || false
}
@test "sql-client: handle dashes for implicit database with hyphen allowed" {
make_repo test-dashes
cd test-dashes
PORT=$( definePORT )
dolt sql-server --user=root --port=$PORT &
SERVER_PID=$! # will get killed by teardown_common
sleep 5 # not using python wait so this works on windows
run dolt sql-client -u root -P $PORT -q "show databases"
[ $status -eq 0 ]
[[ $output =~ " test-dashes " ]] || false
}
@test "sql-client: select statement prints accurate query timing" {
cd repo1
start_sql_server repo1
+4 -4
View File
@@ -212,7 +212,7 @@ SQL
}
@test "sql-commit: DOLT_COMMIT updates session variables" {
export DOLT_DBNAME_REPLACE_HYPHENS="true"
export DOLT_DBNAME_REPLACE="true"
head_variable=@@dolt_repo_$$_head
head_commit=$(get_head_commit)
run dolt sql << SQL
@@ -233,7 +233,7 @@ SQL
}
@test "sql-commit: CALL DOLT_COMMIT updates session variables" {
export DOLT_DBNAME_REPLACE_HYPHENS="true"
export DOLT_DBNAME_REPLACE="true"
head_variable=@@dolt_repo_$$_head
head_commit=$(get_head_commit)
run dolt sql << SQL
@@ -254,7 +254,7 @@ SQL
}
@test "sql-commit: DOLT_COMMIT with unstaged tables leaves them in the working set" {
export DOLT_DBNAME_REPLACE_HYPHENS="true"
export DOLT_DBNAME_REPLACE="true"
head_variable=@@dolt_repo_$$_head
run dolt sql << SQL
@@ -314,7 +314,7 @@ SQL
}
@test "sql-commit: CALL DOLT_COMMIT with unstaged tables leaves them in the working set" {
export DOLT_DBNAME_REPLACE_HYPHENS="true"
export DOLT_DBNAME_REPLACE="true"
head_variable=@@dolt_repo_$$_head
run dolt sql << SQL
@@ -321,3 +321,32 @@ SQL
[[ "$output" =~ "def,metabase,utf8mb4,utf8mb4_unicode_ci,,NO" ]] || false
cd ..
}
@test "sql-create-database: creating database with hyphen and space characters replaced" {
mkdir 'test- dashes'
cd 'test- dashes'
dolt init
export DOLT_DBNAME_REPLACE="true"
# aliasing with 'a' allows check on the exact length of the database name
run dolt sql << SQL
USE test_dashes;
SELECT DATABASE() AS a;
SQL
[ $status -eq 0 ]
[[ $output =~ "| test_dashes |" ]] || false
}
@test "sql-create-database: creating database with hyphen and space characters allowed" {
mkdir ' test- db _ '
cd ' test- db _ '
dolt init
# aliasing with 'a' allows check on the exact length of the database name
run dolt sql << SQL
USE \` test- db _ \`;
SELECT DATABASE() AS a;
SQL
[ $status -eq 0 ]
[[ $output =~ "| test- db _ |" ]] || false
}
+2 -1
View File
@@ -1038,9 +1038,10 @@ SQL
dolt sql -q "INSERT INTO other VALUES (100, 200, 300);"
dolt commit -am "add row 100 to other (on branch2)"
# This ALTER TABLE statement modifies other rows that aren't included in the cherry-picked
# commit row (100, 200, 300) is modified to (100, 300). This shows up as a conflict
# commit row (100, 200, 300) is modified to (100, 400). This shows up as a conflict
# in the cherry-pick (modified row on one side, row doesn't exist on the other side).
dolt sql -q "ALTER TABLE other DROP COLUMN c1;"
dolt sql -q "UPDATE other SET c2 = 400 WHERE pk = 100"
dolt sql -q "INSERT INTO other VALUES (10, 30);"
dolt sql -q "INSERT INTO test VALUES (100, 'q');"
dolt commit -am "alter table, add row 10 to other, add row 100 to test (on branch2)"
+2 -2
View File
@@ -197,7 +197,7 @@ SQL
}
@test "sql-merge: DOLT_MERGE correctly returns head and working session variables." {
export DOLT_DBNAME_REPLACE_HYPHENS="true"
export DOLT_DBNAME_REPLACE="true"
dolt sql << SQL
call dolt_commit('-a', '-m', 'Step 1');
call dolt_checkout('-b', 'feature-branch');
@@ -317,7 +317,7 @@ SQL
}
@test "sql-merge: DOLT_MERGE -no-ff correctly changes head and working session variables." {
export DOLT_DBNAME_REPLACE_HYPHENS="true"
export DOLT_DBNAME_REPLACE="true"
dolt sql << SQL
call dolt_commit('-a', '-m', 'Step 1');
call dolt_checkout('-b', 'feature-branch');
+2 -2
View File
@@ -280,7 +280,7 @@ SQL
}
@test "sql-reset: CALL DOLT_RESET --hard properly maintains session variables." {
export DOLT_DBNAME_REPLACE_HYPHENS="true"
export DOLT_DBNAME_REPLACE="true"
head_variable=@@dolt_repo_$$_head
head_hash=$(get_head_commit)
run dolt sql << SQL
@@ -318,7 +318,7 @@ SQL
}
@test "sql-reset: CALL DOLT_RESET soft maintains staged session variable" {
export DOLT_DBNAME_REPLACE_HYPHENS="true"
export DOLT_DBNAME_REPLACE="true"
working_hash_var=@@dolt_repo_$$_working
run dolt sql -q "SELECT $working_hash_var"
working_hash=$output
@@ -162,12 +162,10 @@ select count(*) from vals;
}
@test "sql-server-remotesrv: clone/fetch/pull from remotesapi port with authentication" {
skip "only support authenticating fetch with dolthub for now."
mkdir remote
cd remote
dolt init
dolt --privilege-file=privs.json sql -q "CREATE USER user IDENTIFIED BY 'pass0'"
dolt --privilege-file=privs.json sql -q "CREATE USER user0 IDENTIFIED BY 'pass0'"
dolt sql -q 'create table vals (i int);'
dolt sql -q 'insert into vals (i) values (1), (2), (3), (4), (5);'
dolt add vals
@@ -187,7 +185,7 @@ select count(*) from vals;
run dolt sql -q 'select count(*) from vals'
[[ "$output" =~ "5" ]] || false
dolt --port 3307 --host localhost -u $DOLT_REMOTE_USER -p $DOLT_REMOTE_PASSWORD sql -q "
dolt --port 3307 --host localhost --no-tls -u $DOLT_REMOTE_USER -p $DOLT_REMOTE_PASSWORD sql -q "
use remote;
call dolt_checkout('-b', 'new_branch');
insert into vals (i) values (6), (7), (8), (9), (10);
@@ -202,7 +200,7 @@ call dolt_commit('-am', 'add some vals');
# No auth fetch
run dolt fetch
[[ "$status" != 0 ]] || false
[[ "$output" =~ "Unauthenticated" ]] || false
[[ "$output" =~ "Access denied for user 'root'" ]] || false
# # With auth fetch
run dolt fetch -u $DOLT_REMOTE_USER
@@ -216,7 +214,7 @@ call dolt_commit('-am', 'add some vals');
run dolt checkout new_branch
[[ "$status" -eq 0 ]] || false
dolt --port 3307 --host localhost -u $DOLT_REMOTE_USER -p $DOLT_REMOTE_PASSWORD sql -q "
dolt --port 3307 --host localhost --no-tls -u $DOLT_REMOTE_USER -p $DOLT_REMOTE_PASSWORD sql -q "
use remote;
call dolt_checkout('new_branch');
insert into vals (i) values (11);
@@ -226,7 +224,7 @@ call dolt_commit('-am', 'add one val');
# No auth pull
run dolt pull
[[ "$status" != 0 ]] || false
[[ "$output" =~ "Unauthenticated" ]] || false
[[ "$output" =~ "Access denied for user 'root'" ]] || false
# With auth pull
run dolt pull -u $DOLT_REMOTE_USER
@@ -236,8 +234,6 @@ call dolt_commit('-am', 'add one val');
}
@test "sql-server-remotesrv: clone/fetch/pull from remotesapi port with clone_admin authentication" {
skip "only support authenticating fetch with dolthub for now."
mkdir remote
cd remote
dolt init
@@ -250,11 +246,12 @@ call dolt_commit('-am', 'add one val');
srv_pid=$!
sleep 2 # wait for server to start so we don't lock it out
run dolt --port 3307 --host localhost -u user0 -p pass0 sql -q "
CREATE USER clone_admin_user@'%' IDENTIFIED BY 'pass1';
GRANT CLONE_ADMIN ON *.* TO clone_admin_user@'%';
run dolt sql -q "
CREATE USER clone_admin_user@'localhost' IDENTIFIED BY 'pass1';
GRANT CLONE_ADMIN ON *.* TO clone_admin_user@'localhost';
select user from mysql.user;
"
[ $status -eq 0 ]
[[ $output =~ user0 ]] || false
[[ $output =~ clone_admin_user ]] || false
@@ -268,12 +265,10 @@ select user from mysql.user;
run dolt sql -q 'select count(*) from vals'
[[ "$output" =~ "5" ]] || false
dolt --port 3307 --host localhost -u user0 -p pass0 sql -q "
use remote;
dolt --port 3307 --host localhost -u user0 -p pass0 --no-tls --use-db remote sql -q "
call dolt_checkout('-b', 'new_branch');
insert into vals (i) values (6), (7), (8), (9), (10);
call dolt_commit('-am', 'add some vals');
"
call dolt_commit('-am', 'add some vals');"
run dolt branch -v -a
[ "$status" -eq 0 ]
@@ -283,7 +278,7 @@ call dolt_commit('-am', 'add some vals');
# No auth fetch
run dolt fetch
[[ "$status" != 0 ]] || false
[[ "$output" =~ "Unauthenticated" ]] || false
[[ "$output" =~ "Access denied for user 'root'" ]] || false
# # With auth fetch
run dolt fetch -u clone_admin_user
@@ -297,17 +292,15 @@ call dolt_commit('-am', 'add some vals');
run dolt checkout new_branch
[[ "$status" -eq 0 ]] || false
dolt --port 3307 --host localhost -u user0 -p pass0 sql -q "
use remote;
dolt sql -q "
call dolt_checkout('new_branch');
insert into vals (i) values (11);
call dolt_commit('-am', 'add one val');
"
call dolt_commit('-am', 'add one val');"
# No auth pull
run dolt pull
[[ "$status" != 0 ]] || false
[[ "$output" =~ "Unauthenticated" ]] || false
[[ "$output" =~ "Access denied for user 'root'" ]] || false
# With auth pull
run dolt pull -u clone_admin_user
@@ -334,7 +327,7 @@ call dolt_commit('-am', 'add one val');
cd ../
run dolt clone http://localhost:50051/remote repo1
[[ "$status" != 0 ]] || false
[[ "$output" =~ "Unauthenticated" ]] || false
[[ "$output" =~ "Access denied for user 'root'" ]] || false
}
@test "sql-server-remotesrv: dolt clone with incorrect authentication returns error" {
@@ -361,10 +354,10 @@ call dolt_commit('-am', 'add one val');
export DOLT_REMOTE_PASSWORD="wrong-password"
run dolt clone http://localhost:50051/remote repo1 -u $DOLT_REMOTE_USER
[[ "$status" != 0 ]] || false
[[ "$output" =~ "Unauthenticated" ]] || false
[[ "$output" =~ "Access denied for user 'user0'" ]] || false
export DOLT_REMOTE_PASSWORD="pass0"
run dolt clone http://localhost:50051/remote repo1 -u doesnt_exist
[[ "$status" != 0 ]] || false
[[ "$output" =~ "Unauthenticated" ]] || false
[[ "$output" =~ "Access denied for user 'doesnt_exist'" ]] || false
}
+1 -1
View File
@@ -1789,7 +1789,7 @@ behavior:
@test "sql-server: dropping database with '-' in it but replaced with underscore" {
skiponwindows "Missing dependencies"
export DOLT_DBNAME_REPLACE_HYPHENS="true"
export DOLT_DBNAME_REPLACE="true"
mkdir my-db
cd my-db
dolt init
+1 -1
View File
@@ -3,7 +3,7 @@ load $BATS_TEST_DIRNAME/helper/common.bash
setup() {
setup_common
export DOLT_DBNAME_REPLACE_HYPHENS="true"
export DOLT_DBNAME_REPLACE="true"
dolt sql <<SQL
CREATE TABLE one_pk (
pk BIGINT NOT NULL,
+16 -16
View File
@@ -60,7 +60,7 @@ teardown() {
}
@test "undrop: undrop root database with hyphen replaced in its name" {
export DOLT_DBNAME_REPLACE_HYPHENS="true"
export DOLT_DBNAME_REPLACE="true"
setup_remote_server
# Create a new Dolt database directory to use as a root database
# NOTE: We use hyphens here to test how db dirs are renamed.
@@ -111,24 +111,24 @@ call dolt_commit('-Am', 'creating table t1');
EOF
run dolt sql -q "show databases;"
[ $status -eq 0 ]
[[ $output =~ "test-_db-1" ]] || false
[[ $output =~ " test- db-1 " ]] || false
# Drop the root database
dolt sql -q "drop database \`test-_db-1\`;"
dolt sql -q "drop database \` test- db-1 \`;"
run dolt sql -q "show databases;"
[ $status -eq 0 ]
[[ ! $output =~ "test-_db-1" ]] || false
[[ ! $output =~ " test- db-1 " ]] || false
# Undrop the test-_db-1 database
# Undrop the ' test- db-1 ' database
# NOTE: After being undropped, the database is no longer the root database,
# but contained in a subdirectory like a non-root database.
dolt sql -q "call dolt_undrop('test-_db-1');"
dolt sql -q "call dolt_undrop(' test- db-1 ');"
run dolt sql -q "show databases;"
[ $status -eq 0 ]
[[ $output =~ "test-_db-1" ]] || false
[[ $output =~ " test- db-1 " ]] || false
# Sanity check querying some data
run dolt sql -r csv -q "select * from \`test-_db-1\`.t1;"
run dolt sql -r csv -q "select * from \` test- db-1 \`.t1;"
[ $status -eq 0 ]
[[ $output =~ "1,one" ]] || false
}
@@ -136,7 +136,7 @@ EOF
# Asserts that a non-root database can be dropped and then restored with dolt_undrop(), even when
# the case of the database name given to dolt_undrop() doesn't match match the original case.
@test "undrop: undrop non-root database with hyphen replaced in its name" {
export DOLT_DBNAME_REPLACE_HYPHENS="true"
export DOLT_DBNAME_REPLACE="true"
setup_remote_server
dolt sql << EOF
use drop_me_2;
@@ -170,28 +170,28 @@ EOF
@test "undrop: undrop non-root database with hyphen allowed in its name" {
setup_remote_server
dolt sql << EOF
use \`drop-_me-2\`;
use \` drop- me-2 \`;
create table t1 (pk int primary key, c1 varchar(200));
insert into t1 values (1, "one");
call dolt_commit('-Am', 'creating table t1');
EOF
run dolt sql -q "show databases;"
[ $status -eq 0 ]
[[ $output =~ "drop-_me-2" ]] || false
[[ $output =~ " drop- me-2 " ]] || false
dolt sql -q "drop database \`drop-_me-2\`;"
dolt sql -q "drop database \` drop- me-2 \`;"
run dolt sql -q "show databases;"
[ $status -eq 0 ]
[[ ! $output =~ "drop-_me-2" ]] || false
[[ ! $output =~ " drop- me-2 " ]] || false
# Call dolt_undrop() with non-matching case for the database name to
# ensure dolt_undrop() works with case-insensitive database names.
dolt sql -q "call dolt_undrop('DrOp-_mE-2');"
dolt sql -q "call dolt_undrop(' DrOp- mE-2 ');"
run dolt sql -q "show databases;"
[ $status -eq 0 ]
[[ $output =~ "drop-_me-2" ]] || false
[[ $output =~ " drop- me-2 " ]] || false
run dolt sql -r csv -q "select * from \`drop-_me-2\`.t1;"
run dolt sql -r csv -q "select * from \` drop- me-2 \`.t1;"
[ $status -eq 0 ]
[[ $output =~ "1,one" ]] || false
}
@@ -92,7 +92,7 @@ teardown() {
[ "$status" -eq 0 ]
[[ "${lines[1]}" =~ "| pk | a | b | x | y |" ]] || false
[[ "${lines[2]}" =~ "+----+------+-----+---+-----+" ]] || false
[[ "${lines[3]}" =~ "| 0 | asdf | 1.1 | 0 | 121 |" ]] || false
[[ "${lines[3]}" =~ "| 0 | asdf | 1.1 | 1 | 121 |" ]] || false
[[ "${lines[4]}" =~ "| 2 | asdf | 1.1 | 0 | 121 |" ]] || false
[[ "${lines[5]}" =~ "| 3 | data | 1.1 | 0 | 121 |" ]] || false
}
@@ -119,7 +119,7 @@ teardown() {
[ "$status" -eq 0 ]
[[ "${lines[1]}" =~ "| pk | a | b | w | z |" ]] || false
[[ "${lines[2]}" =~ "+----+------+-----+---+-----+" ]] || false
[[ "${lines[3]}" =~ "| 0 | asdf | 1.1 | 0 | 122 |" ]] || false
[[ "${lines[3]}" =~ "| 0 | asdf | 1.1 | 1 | 122 |" ]] || false
[[ "${lines[4]}" =~ "| 1 | asdf | 1.1 | 0 | 122 |" ]] || false
[[ "${lines[5]}" =~ "| 4 | data | 1.1 | 0 | 122 |" ]] || false
@@ -153,8 +153,8 @@ EOF
+---+----+------+-----+------+------+------+------+
| | pk | a | b | w | z | x | y |
+---+----+------+-----+------+------+------+------+
| < | 0 | asdf | 1.1 | 0 | 122 | NULL | NULL |
| > | 0 | asdf | 1.1 | NULL | NULL | 0 | 121 |
| < | 0 | asdf | 1.1 | 1 | 122 | NULL | NULL |
| > | 0 | asdf | 1.1 | NULL | NULL | 1 | 121 |
| - | 1 | asdf | 1.1 | 0 | 122 | NULL | NULL |
| + | 2 | asdf | 1.1 | NULL | NULL | 0 | 121 |
| + | 3 | data | 1.1 | NULL | NULL | 0 | 121 |
@@ -45,6 +45,7 @@ dolt branch init
dolt branch other
dolt sql <<SQL
DELETE FROM abc WHERE pk=1;
UPDATE abc SET x = 1 WHERE pk = 0;
INSERT INTO abc VALUES (3, 'data', 1.1, 0, 0);
ALTER TABLE abc DROP COLUMN w;
ALTER TABLE abc ADD COLUMN y BIGINT;
@@ -56,6 +57,7 @@ dolt commit -m "made changes to $DEFAULT_BRANCH"
dolt checkout other
dolt sql <<SQL
DELETE FROM abc WHERE pk=2;
UPDATE abc SET w = 1 WHERE pk = 0;
INSERT INTO abc VALUES (4, 'data', 1.1, 0, 0);
ALTER TABLE abc DROP COLUMN x;
ALTER TABLE abc ADD COLUMN z BIGINT;