Merge remote-tracking branch 'origin/main' into aaron/auto-gc-periodic-check

This commit is contained in:
Aaron Son
2025-10-07 01:35:00 -07:00
27 changed files with 971 additions and 301 deletions
+3
View File
@@ -88,6 +88,9 @@ jobs:
- name: Install expect
if: matrix.os == 'ubuntu-22.04'
run: sudo apt-get install -y expect
- name: Install pcre2grep
if: matrix.os == 'ubuntu-22.04'
run: sudo apt-get install -y pcre2-utils
- name: Install Maven
working-directory: ./.ci_bin
run: |
+6 -12
View File
@@ -38,7 +38,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/servercfg"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
dsqle "github.com/dolthub/dolt/go/libraries/doltcore/sqle"
dblr "github.com/dolthub/dolt/go/libraries/doltcore/sqle/binlogreplication"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/cluster"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dprocedures"
@@ -54,7 +53,7 @@ import (
// SqlEngine packages up the context necessary to run sql queries against dsqle.
type SqlEngine struct {
provider *dsqle.DoltDatabaseProvider
provider *sqle.DoltDatabaseProvider
ContextFactory sql.ContextFactory
dsessFactory sessionFactory
engine *gms.Engine
@@ -82,7 +81,7 @@ type SqlEngineConfig struct {
JwksConfig []servercfg.JwksConfig
SystemVariables SystemVariables
ClusterController *cluster.Controller
AutoGCController *dsqle.AutoGCController
AutoGCController *sqle.AutoGCController
BinlogReplicaController binlogreplication.BinlogReplicaController
EventSchedulerStatus eventscheduler.SchedulerStatus
}
@@ -122,7 +121,7 @@ func NewSqlEngine(
bThreads := sql.NewBackgroundThreads()
var runAsyncThreads sqle.RunAsyncThreads
dbs, runAsyncThreads, err = dsqle.ApplyReplicationConfig(ctx, mrEnv, cli.CliOut, dbs...)
dbs, runAsyncThreads, err = sqle.ApplyReplicationConfig(ctx, mrEnv, cli.CliOut, dbs...)
if err != nil {
return nil, err
}
@@ -149,11 +148,6 @@ func NewSqlEngine(
all := make([]dsess.SqlDatabase, len(dbs))
copy(all, dbs)
// this is overwritten only for server sessions
for _, db := range dbs {
db.DbData().Ddb.SetCommitHookLogger(ctx, cli.CliOut)
}
clusterDB := config.ClusterController.ClusterDatabase()
if clusterDB != nil {
all = append(all, clusterDB.(dsess.SqlDatabase))
@@ -161,7 +155,7 @@ func NewSqlEngine(
}
b := env.GetDefaultInitBranch(mrEnv.Config())
pro, err := dsqle.NewDoltDatabaseProviderWithDatabases(b, mrEnv.FileSystem(), all, locations)
pro, err := sqle.NewDoltDatabaseProviderWithDatabases(b, mrEnv.FileSystem(), all, locations)
if err != nil {
return nil, err
}
@@ -474,7 +468,7 @@ func configureBinlogPrimaryController(engine *gms.Engine) error {
// configureEventScheduler configures the event scheduler with the |engine| for executing events, a |sessFactory|
// for creating sessions, and a DoltDatabaseProvider, |pro|.
func configureEventScheduler(config *SqlEngineConfig, engine *gms.Engine, ctxFactory sql.ContextFactory, sessFactory sessionFactory, pro *dsqle.DoltDatabaseProvider) error {
func configureEventScheduler(config *SqlEngineConfig, engine *gms.Engine, ctxFactory sql.ContextFactory, sessFactory sessionFactory, pro *sqle.DoltDatabaseProvider) error {
// getCtxFunc is used to create new session with a new context for event scheduler.
getCtxFunc := func() (*sql.Context, error) {
sess, err := sessFactory(sql.NewBaseSession(), pro)
@@ -513,7 +507,7 @@ func sqlContextFactory(ctx context.Context, opts ...sql.ContextOption) *sql.Cont
}
// doltSessionFactory returns a sessionFactory that creates a new DoltSession
func doltSessionFactory(pro *dsqle.DoltDatabaseProvider, statsPro sql.StatsProvider, config config.ReadWriteConfig, bc *branch_control.Controller, gcSafepointController *gcctx.GCSafepointController, autocommit bool) sessionFactory {
func doltSessionFactory(pro *sqle.DoltDatabaseProvider, statsPro sql.StatsProvider, config config.ReadWriteConfig, bc *branch_control.Controller, gcSafepointController *gcctx.GCSafepointController, autocommit bool) sessionFactory {
return func(mysqlSess *sql.BaseSession, provider sql.DatabaseProvider) (*dsess.DoltSession, error) {
doltSession, err := dsess.NewDoltSession(mysqlSess, pro, config, bc, statsPro, writer.NewWriteSession, gcSafepointController)
if err != nil {
+4
View File
@@ -266,6 +266,10 @@ func buildInitialRebaseMsg(sqlCtx *sql.Context, queryist cli.Queryist, rebaseBra
if !ok {
return "", fmt.Errorf("unexpected type for commit_message; expected string, found %T", commitMessage)
}
// Match Git's behavior and filter out newlines
commitMessage = strings.Replace(commitMessage, "\n", " ", -1)
buffer.WriteString(fmt.Sprintf("%s %s %s\n", action, commitHash, commitMessage))
}
buffer.WriteString("\n")
+1 -1
View File
@@ -15,5 +15,5 @@
package doltversion
const (
Version = "1.59.13"
Version = "1.59.17"
)
+2 -2
View File
@@ -61,7 +61,7 @@ require (
github.com/dolthub/dolt-mcp v0.2.2-0.20250917171427-13e4520d1c36
github.com/dolthub/eventsapi_schema v0.0.0-20250915094920-eadfd39051ca
github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2
github.com/dolthub/go-mysql-server v0.20.1-0.20250930235107-1b5cc8168991
github.com/dolthub/go-mysql-server v0.20.1-0.20251003202417-9979526e55c8
github.com/dolthub/gozstd v0.0.0-20240423170813-23a2903bca63
github.com/edsrzf/mmap-go v1.2.0
github.com/esote/minmaxheap v1.0.0
@@ -199,4 +199,4 @@ require (
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
)
go 1.25
go 1.25.0
+2 -2
View File
@@ -213,8 +213,8 @@ github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
github.com/dolthub/go-icu-regex v0.0.0-20250916051405-78a38d478790 h1:zxMsH7RLiG+dlZ/y0LgJHTV26XoiSJcuWq+em6t6VVc=
github.com/dolthub/go-icu-regex v0.0.0-20250916051405-78a38d478790/go.mod h1:F3cnm+vMRK1HaU6+rNqQrOCyR03HHhR1GWG2gnPOqaE=
github.com/dolthub/go-mysql-server v0.20.1-0.20250930235107-1b5cc8168991 h1:DLaVeMz9j90y5p65RHP3mtKaDiobba3RLdWFCh33S/M=
github.com/dolthub/go-mysql-server v0.20.1-0.20250930235107-1b5cc8168991/go.mod h1:cO1zdcQORX4p1al2S1LvZzxKLsx+1KK+8GTlTsy9oSs=
github.com/dolthub/go-mysql-server v0.20.1-0.20251003202417-9979526e55c8 h1:r54ksXOt1SDgztJsHU3r+W9ZYZjYUTIguGYUcrM9bMk=
github.com/dolthub/go-mysql-server v0.20.1-0.20251003202417-9979526e55c8/go.mod h1:EeYR0apo+8j2Dyxmn2ghkPlirO2S5mT1xHBrA+Efys8=
github.com/dolthub/gozstd v0.0.0-20240423170813-23a2903bca63 h1:OAsXLAPL4du6tfbBgK0xXHZkOlos63RdKYS3Sgw/dfI=
github.com/dolthub/gozstd v0.0.0-20240423170813-23a2903bca63/go.mod h1:lV7lUeuDhH5thVGDCKXbatwKy2KW80L4rMT46n+Y2/Q=
github.com/dolthub/ishell v0.0.0-20240701202509-2b217167d718 h1:lT7hE5k+0nkBdj/1UOSFwjWpNxf+LCApbRHgnCA17XE=
-8
View File
@@ -19,7 +19,6 @@ import (
"context"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
@@ -2072,13 +2071,6 @@ func (ddb *DoltDB) PrependCommitHooks(ctx context.Context, hooks ...CommitHook)
return ddb
}
func (ddb *DoltDB) SetCommitHookLogger(ctx context.Context, wr io.Writer) *DoltDB {
if ddb.db.Database != nil {
ddb.db = ddb.db.SetCommitHookLogger(ctx, wr)
}
return ddb
}
func (ddb *DoltDB) ExecuteCommitHooks(ctx context.Context, datasetId string) error {
ds, err := ddb.db.GetDataset(ctx, datasetId)
if err != nil {
+10 -17
View File
@@ -16,7 +16,6 @@ package doltdb
import (
"context"
"io"
"sync"
"github.com/dolthub/dolt/go/store/datas"
@@ -33,12 +32,13 @@ type hooksDatabase struct {
// CommitHook is an abstraction for executing arbitrary commands after atomic database commits
type CommitHook interface {
// Execute is arbitrary read-only function whose arguments are new Dataset commit into a specific Database
// Execute is arbitrary read-only function whose arguments are new Dataset commit into a specific Database.
// The returned values are a callback function and an error. The callback function is
// a Wait function which is optional. If returned, it will be registered with the ReplicationStatusController. If the
// hook implements NotifyWaitFailedCommitHook, it will be notified if the Wait function returns an error.
// The |Error| returned is actually ignored by the caller. It exists primarily for for unit testing. Any error which
// happens in a hook should be logged by the hook itself.
Execute(ctx context.Context, ds datas.Dataset, db *DoltDB) (func(context.Context) error, error)
// HandleError is an bridge function to handle Execute errors
HandleError(ctx context.Context, err error) error
// SetLogger lets clients specify an output stream for HandleError
SetLogger(ctx context.Context, wr io.Writer) error
// ExecuteForWorkingSets returns whether or not the hook should be executed for working set updates
ExecuteForWorkingSets() bool
}
@@ -57,13 +57,6 @@ func (db hooksDatabase) SetCommitHooks(ctx context.Context, postHooks []CommitHo
return db
}
func (db hooksDatabase) SetCommitHookLogger(ctx context.Context, wr io.Writer) hooksDatabase {
for _, h := range db.hooks {
h.SetLogger(ctx, wr)
}
return db
}
func (db hooksDatabase) withReplicationStatusController(rsc *ReplicationStatusController) hooksDatabase {
db.rsc = rsc
return db
@@ -91,10 +84,9 @@ func (db hooksDatabase) ExecuteCommitHooks(ctx context.Context, ds datas.Dataset
wg.Add(1)
go func() {
defer wg.Done()
f, err := hook.Execute(ctx, ds, db.db)
if err != nil {
hook.HandleError(ctx, err)
}
// The error returned is intentionally ignored here. Hooks are expected to log errors themselves. The interface returns
// the error primarily for testing purposes.
f, _ := hook.Execute(ctx, ds, db.db)
if rsc != nil {
rsc.Wait[i+ioff] = f
if nf, ok := hook.(NotifyWaitFailedCommitHook); ok {
@@ -110,6 +102,7 @@ func (db hooksDatabase) ExecuteCommitHooks(ctx context.Context, ds datas.Dataset
if rsc != nil {
j := ioff
for i := ioff; i < len(rsc.Wait); i++ {
// compact out any nil entries
if rsc.Wait[i] != nil {
rsc.Wait[j] = rsc.Wait[i]
rsc.NotifyWaitFailed[j] = rsc.NotifyWaitFailed[i]
+65
View File
@@ -81,6 +81,18 @@ func expectSingleValue(sqlCtx *sql.Context, comparison string, value *string, qu
return compareNullValue(comparison, row[0], AssertionExpectedSingleValue), nil
}
// Check if the expected value is a boolean string, and if so, coerce the actual value to boolean, with the exception
// of "0" and "1", which are valid integers and are covered below.
if *value != "0" && *value != "1" {
if expectedBool, err := strconv.ParseBool(*value); err == nil {
actualBool, boolErr := getInterfaceAsBool(row[0])
if boolErr != nil {
return fmt.Sprintf("Could not convert value to boolean: %v", boolErr), nil
}
return compareBooleans(comparison, expectedBool, actualBool, AssertionExpectedSingleValue), nil
}
}
switch actualValue := row[0].(type) {
case int8:
expectedInt, err := strconv.ParseInt(*value, 10, 64)
@@ -346,6 +358,59 @@ func compareDecimals(comparison string, expectedValue, realValue decimal.Decimal
return ""
}
// getTinyIntColAsBool returns the value interface{} as a bool
// This is necessary because the query engine may return a tinyint column as a bool, int, or other types.
// Based on GetTinyIntColAsBool from commands/utils.go, which we can't depend on here due to package cycles.
func getInterfaceAsBool(col interface{}) (bool, error) {
switch v := col.(type) {
case bool:
return v, nil
case int:
return v == 1, nil
case int8:
return v == 1, nil
case int16:
return v == 1, nil
case int32:
return v == 1, nil
case int64:
return v == 1, nil
case uint:
return v == 1, nil
case uint8:
return v == 1, nil
case uint16:
return v == 1, nil
case uint32:
return v == 1, nil
case uint64:
return v == 1, nil
case string:
return v == "1", nil
default:
return false, fmt.Errorf("unexpected type %T, was expecting bool, int, or string", v)
}
}
// compareBooleans is a function used for comparing boolean values.
// It takes in a comparison string from one of: "==", "!="
// It returns a string. The string is empty if the assertion passed, or has a message explaining the failure otherwise
func compareBooleans(comparison string, expectedValue, realValue bool, assertionType string) string {
switch comparison {
case "==":
if expectedValue != realValue {
return fmt.Sprintf("Assertion failed: %s equal to %t, got %t", assertionType, expectedValue, realValue)
}
case "!=":
if expectedValue == realValue {
return fmt.Sprintf("Assertion failed: %s not equal to %t, got %t", assertionType, expectedValue, realValue)
}
default:
return fmt.Sprintf("%s is not a valid comparison for boolean values. Only '==' and '!=' are supported", comparison)
}
return ""
}
// compareNullValue is a function used for comparing a null value.
// It takes in a comparison string from one of: "==", "!="
// It returns a string. The string is empty if the assertion passed, or has a message explaining the failure otherwise
-9
View File
@@ -17,7 +17,6 @@ package sqle
import (
"context"
"errors"
"io"
"sync"
"time"
@@ -303,14 +302,6 @@ func (h *autoGCCommitHook) requestGC(ctx context.Context) error {
}
}
func (h *autoGCCommitHook) HandleError(ctx context.Context, err error) error {
return nil
}
func (h *autoGCCommitHook) SetLogger(ctx context.Context, wr io.Writer) error {
return nil
}
func (h *autoGCCommitHook) ExecuteForWorkingSets() bool {
return true
}
@@ -18,7 +18,6 @@ import (
"context"
"errors"
"fmt"
"io"
"sync"
"sync/atomic"
"time"
@@ -524,14 +523,6 @@ func (h *commithook) NotifyWaitFailed() {
h.fastFailReplicationWait = true
}
func (h *commithook) HandleError(ctx context.Context, err error) error {
return nil
}
func (h *commithook) SetLogger(ctx context.Context, wr io.Writer) error {
return nil
}
func (h *commithook) ExecuteForWorkingSets() bool {
return true
}
+209 -61
View File
@@ -22,16 +22,20 @@ import (
"time"
"github.com/dolthub/go-mysql-server/sql"
"github.com/sirupsen/logrus"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/types"
)
type PushOnWriteHook struct {
out io.Writer
destDB *doltdb.DoltDB
destDb *doltdb.DoltDB
tmpDir string
}
@@ -39,16 +43,29 @@ var _ doltdb.CommitHook = (*PushOnWriteHook)(nil)
// NewPushOnWriteHook creates a ReplicateHook, parameterizaed by the backup database
// and a local tempfile for pushing
func NewPushOnWriteHook(destDB *doltdb.DoltDB, tmpDir string) *PushOnWriteHook {
func NewPushOnWriteHook(tmpDir string, logger io.Writer) *PushOnWriteHook {
return &PushOnWriteHook{
destDB: destDB,
tmpDir: tmpDir,
out: logger,
}
}
// Execute implements CommitHook, replicates head updates to the destDb field
func (ph *PushOnWriteHook) Execute(ctx context.Context, ds datas.Dataset, db *doltdb.DoltDB) (func(context.Context) error, error) {
return nil, pushDataset(ctx, ph.destDB, db, ds, ph.tmpDir)
func (ph *PushOnWriteHook) Execute(ctx context.Context, ds datas.Dataset, srcDb *doltdb.DoltDB) (func(context.Context) error, error) {
if ph.destDb == nil {
e := fmt.Errorf("PushOnWriteHook invoked with nil destDB")
logrus.Errorf("runtime error: %v", e)
return nil, e
}
err := pushDataset(ctx, ph.destDb, srcDb, ds, ph.tmpDir)
if ph.out != nil && err != nil {
// if we can't write to the output, there's not much we can do.
_, _ = ph.out.Write([]byte(fmt.Sprintf("error pushing: %+v", err)))
}
return nil, err
}
func pushDataset(ctx context.Context, destDB, srcDB *doltdb.DoltDB, ds datas.Dataset, tmpDir string) error {
@@ -72,36 +89,22 @@ func pushDataset(ctx context.Context, destDB, srcDB *doltdb.DoltDB, ds datas.Dat
return destDB.SetHead(ctx, rf, addr)
}
// HandleError implements CommitHook
func (ph *PushOnWriteHook) HandleError(ctx context.Context, err error) error {
if ph.out != nil {
_, err := ph.out.Write([]byte(fmt.Sprintf("error pushing: %+v", err)))
if err != nil {
return err
}
}
return nil
}
func (*PushOnWriteHook) ExecuteForWorkingSets() bool {
return false
}
// SetLogger implements CommitHook
func (ph *PushOnWriteHook) SetLogger(ctx context.Context, wr io.Writer) error {
ph.out = wr
return nil
}
type PushArg struct {
ds datas.Dataset
db *doltdb.DoltDB
hash hash.Hash
ds datas.Dataset
srcDb *doltdb.DoltDB
destDb *doltdb.DoltDB
hash hash.Hash
}
type AsyncPushOnWriteHook struct {
out io.Writer
ch chan PushArg
destDb *doltdb.DoltDB
}
const (
@@ -114,10 +117,10 @@ const (
var _ doltdb.CommitHook = (*AsyncPushOnWriteHook)(nil)
// NewAsyncPushOnWriteHook creates a AsyncReplicateHook
func NewAsyncPushOnWriteHook(destDB *doltdb.DoltDB, tmpDir string, logger io.Writer) (*AsyncPushOnWriteHook, RunAsyncThreads) {
func NewAsyncPushOnWriteHook(tmpDir string, logger io.Writer) (*AsyncPushOnWriteHook, RunAsyncThreads) {
ch := make(chan PushArg, asyncPushBufferSize)
runThreads := func(bThreads *sql.BackgroundThreads, ctxF func(context.Context) (*sql.Context, error)) error {
return RunAsyncReplicationThreads(bThreads, ctxF, ch, destDB, tmpDir, logger)
return RunAsyncReplicationThreads(bThreads, ctxF, ch, tmpDir, logger)
}
return &AsyncPushOnWriteHook{ch: ch}, runThreads
}
@@ -127,25 +130,23 @@ func (*AsyncPushOnWriteHook) ExecuteForWorkingSets() bool {
}
// Execute implements CommitHook, replicates head updates to the destDb field
func (ah *AsyncPushOnWriteHook) Execute(ctx context.Context, ds datas.Dataset, db *doltdb.DoltDB) (func(context.Context) error, error) {
func (ah *AsyncPushOnWriteHook) Execute(ctx context.Context, ds datas.Dataset, srcDb *doltdb.DoltDB) (func(context.Context) error, error) {
if ah.destDb == nil {
e := fmt.Errorf("AsyncPushOnWriteHook invoked with nil destDB")
logrus.Errorf("runtime error: %v", e)
return nil, e
}
addr, _ := ds.MaybeHeadAddr()
// TODO: Unconditional push here seems dangerous.
ah.ch <- PushArg{ds: ds, db: db, hash: addr}
return nil, ctx.Err()
}
ah.ch <- PushArg{ds: ds, srcDb: srcDb, destDb: ah.destDb, hash: addr}
// HandleError implements CommitHook
func (ah *AsyncPushOnWriteHook) HandleError(ctx context.Context, err error) error {
if ah.out != nil {
ah.out.Write([]byte(err.Error()))
err := ctx.Err()
if err != nil {
_, _ = ah.out.Write([]byte(err.Error()))
}
return nil
}
// SetLogger implements CommitHook
func (ah *AsyncPushOnWriteHook) SetLogger(ctx context.Context, wr io.Writer) error {
ah.out = wr
return nil
return nil, err
}
type LogHook struct {
@@ -156,38 +157,23 @@ type LogHook struct {
var _ doltdb.CommitHook = (*LogHook)(nil)
// NewLogHook is a noop that logs to a writer when invoked
func NewLogHook(msg []byte) *LogHook {
return &LogHook{msg: msg}
func NewLogHook(msg []byte, logger io.Writer) *LogHook {
return &LogHook{msg: msg, out: logger}
}
// Execute implements CommitHook, writes message to log channel
func (lh *LogHook) Execute(ctx context.Context, ds datas.Dataset, db *doltdb.DoltDB) (func(context.Context) error, error) {
if lh.out != nil {
_, err := lh.out.Write(lh.msg)
return nil, err
_, _ = lh.out.Write(lh.msg)
}
return nil, nil
}
// HandleError implements CommitHook
func (lh *LogHook) HandleError(ctx context.Context, err error) error {
if lh.out != nil {
lh.out.Write([]byte(err.Error()))
}
return nil
}
// SetLogger implements CommitHook
func (lh *LogHook) SetLogger(ctx context.Context, wr io.Writer) error {
lh.out = wr
return nil
}
func (*LogHook) ExecuteForWorkingSets() bool {
return false
}
func RunAsyncReplicationThreads(bThreads *sql.BackgroundThreads, ctxF func(context.Context) (*sql.Context, error), ch chan PushArg, destDB *doltdb.DoltDB, tmpDir string, logger io.Writer) error {
func RunAsyncReplicationThreads(bThreads *sql.BackgroundThreads, ctxF func(context.Context) (*sql.Context, error), ch chan PushArg, tmpDir string, logger io.Writer) error {
mu := &sync.Mutex{}
var newHeads = make(map[string]PushArg, asyncPushBufferSize)
@@ -254,7 +240,7 @@ func RunAsyncReplicationThreads(bThreads *sql.BackgroundThreads, ctxF func(conte
defer sql.SessionEnd(sqlCtx.Session)
sql.SessionCommandBegin(sqlCtx.Session)
defer sql.SessionCommandEnd(sqlCtx.Session)
err := pushDataset(sqlCtx, destDB, newCm.db, newCm.ds, tmpDir)
err := pushDataset(sqlCtx, newCm.destDb, newCm.srcDb, newCm.ds, tmpDir)
if err != nil {
logger.Write([]byte("replication failed: " + err.Error()))
}
@@ -292,3 +278,165 @@ func RunAsyncReplicationThreads(bThreads *sql.BackgroundThreads, ctxF func(conte
return nil
}
// DynamicPushOnWriteHook is a CommitHook that conditionally invokes either a PushOnWriteHook or an AsyncPushOnWriteHook
// based on the values of the `dolt_replicate_to_remote` and `dolt_async_replication` system variables. If
// `dolt_replicate_to_remote`
//
// Each time the Execute method is invoked, the current values of the system variables are checked. If they differ from the
// last invocation, the internal PushOnWriteHook or AsyncPushOnWriteHook is updated to reflect the new configuration.
type DynamicPushOnWriteHook struct {
mu sync.Mutex
dEnv *env.DoltEnv
tempDir string
logger io.Writer
// Values below protected with mu Mutex
remote string
async bool
// Wrappers around the two hook types. We update the fields of these structs as needed based on the current config.
syncHook PushOnWriteHook
asyncHook AsyncPushOnWriteHook
}
var _ doltdb.CommitHook = (*DynamicPushOnWriteHook)(nil)
// NewDynamicPushOnWriteHook creates a DynamicPushOnWriteHook, parameterized by the environment and a logger. The configuration
// options at this time can result in errors, for example if the provided remote does not exist. This is not the case
// when the process is up and running. If bad configuration is detected at execution time, the error is logged and replication is skipped.
func NewDynamicPushOnWriteHook(ctx context.Context, dEnv *env.DoltEnv, logger io.Writer) (*DynamicPushOnWriteHook, RunAsyncThreads, error) {
remote, async, err := getReplicationVals()
if err != nil {
return nil, nil, err
}
tmpDir, err := dEnv.TempTableFilesDir()
if err != nil {
return nil, nil, err
}
a, newThreads := NewAsyncPushOnWriteHook(tmpDir, logger)
p := NewPushOnWriteHook(tmpDir, logger)
if remote != "" {
destDb, err := getDestinationDb(ctx, dEnv, remote)
if err != nil {
return nil, nil, err
}
p.destDb = destDb
a.destDb = destDb
}
return &DynamicPushOnWriteHook{
dEnv: dEnv,
tempDir: tmpDir,
logger: logger,
remote: remote,
async: async,
syncHook: *p,
asyncHook: *a,
}, newThreads, nil
}
// getReplicationVals gets the current values of the `dolt_replicate_to_remote` and `dolt_async_replication` system
// variables.
func getReplicationVals() (string, bool, error) {
_, val, ok := sql.SystemVariables.GetGlobal(dsess.ReplicateToRemote)
if !ok {
return "", false, sql.ErrUnknownSystemVariable.New(dsess.ReplicateToRemote)
}
remoteName, ok := val.(string)
if !ok {
return "", false, sql.ErrInvalidSystemVariableValue.New(val)
}
async := false
if _, val, ok = sql.SystemVariables.GetGlobal(dsess.AsyncReplication); ok && val == dsess.SysVarTrue {
async = true
}
return remoteName, async, nil
}
// getDestinationDb gets the target doltdb for replication, based on a provided remote name.
func getDestinationDb(ctx context.Context, dEnv *env.DoltEnv, remoteName string) (*doltdb.DoltDB, error) {
remotes, err := dEnv.GetRemotes()
if err != nil {
return nil, err
}
rem, ok := remotes.Get(remoteName)
if !ok {
return nil, fmt.Errorf("%w: '%s'", env.ErrRemoteNotFound, remoteName)
}
destDb, err := rem.GetRemoteDB(ctx, types.Format_Default, dEnv)
if err != nil {
return nil, err
}
return destDb, nil
}
func (m *DynamicPushOnWriteHook) Execute(ctx context.Context, ds datas.Dataset, db *doltdb.DoltDB) (func(context.Context) error, error) {
remoteName, async, err := getReplicationVals()
// We only need the lock if the remote configuration has changed.
hook, err := func() (doltdb.CommitHook, error) {
m.mu.Lock()
defer m.mu.Unlock()
if m.remote == remoteName && m.async == async {
// No change in config since last execution.
if m.remote == "" {
// replication disabled
return nil, nil
}
if async {
return &m.asyncHook, nil
}
return &m.syncHook, nil
}
if remoteName == "" {
// replication disabled
m.remote = ""
m.async = false
m.asyncHook.destDb = nil
m.syncHook.destDb = nil
return nil, nil
}
m.remote = remoteName
destDb, err := getDestinationDb(ctx, m.dEnv, m.remote)
if err != nil {
return nil, err
}
m.syncHook.destDb = destDb
m.asyncHook.destDb = destDb
if async {
return &m.asyncHook, nil
}
return &m.syncHook, nil
}()
if err != nil {
logrus.Warnf("replication hook failed: %v", err)
return nil, err
}
if hook == nil {
// replication disabled
return nil, nil
}
logrus.Debugf("replication hook invoked. pushing to '%s' (asyn=%t)", remoteName, async)
return hook.Execute(ctx, ds, db)
}
func (m *DynamicPushOnWriteHook) ExecuteForWorkingSets() bool {
return false
}
+10 -29
View File
@@ -17,8 +17,6 @@ package sqle
import (
"bytes"
"context"
"errors"
"io"
"path/filepath"
"testing"
"time"
@@ -133,8 +131,11 @@ func TestPushOnWriteHook(t *testing.T) {
t.Error("Failed to commit")
}
logger := &bytes.Buffer{}
// setup hook
hook := NewPushOnWriteHook(destDB, tmpDir)
hook := NewPushOnWriteHook(tmpDir, logger)
hook.destDb = destDB
ddb.PrependCommitHooks(ctx, hook)
t.Run("replicate to remote", func(t *testing.T) {
@@ -157,30 +158,16 @@ func TestPushOnWriteHook(t *testing.T) {
destHash, _ := destCommit.HashOf()
assert.Equal(t, srcHash, destHash)
})
t.Run("replicate handle error logs to writer", func(t *testing.T) {
var buffer = &bytes.Buffer{}
err = hook.SetLogger(ctx, buffer)
assert.NoError(t, err)
msg := "prince charles is a vampire"
hook.HandleError(ctx, errors.New(msg))
assert.Contains(t, buffer.String(), msg)
})
}
func TestLogHook(t *testing.T) {
msg := []byte("hello")
var err error
t.Run("new log hook", func(t *testing.T) {
ctx := context.Background()
hook := NewLogHook(msg)
var buffer = &bytes.Buffer{}
err = hook.SetLogger(ctx, buffer)
assert.NoError(t, err)
hook := NewLogHook(msg, buffer)
hook.Execute(ctx, datas.Dataset{}, nil)
assert.Equal(t, buffer.Bytes(), msg)
assert.Equal(t, msg, buffer.Bytes())
})
}
@@ -230,7 +217,8 @@ func TestAsyncPushOnWrite(t *testing.T) {
t.Run("replicate to remote", func(t *testing.T) {
bThreads := sql.NewBackgroundThreads()
defer bThreads.Shutdown()
hook, runThreads := NewAsyncPushOnWriteHook(destDB, tmpDir, &buffer.Buffer{})
hook, runThreads := NewAsyncPushOnWriteHook(tmpDir, &buffer.Buffer{})
hook.destDb = destDB
require.NotNil(t, hook)
require.NotNil(t, runThreads)
runThreads(bThreads, func(ctx context.Context) (*sql.Context, error) {
@@ -303,7 +291,8 @@ func TestAsyncPushOnWrite(t *testing.T) {
destDB.PrependCommitHooks(context.Background(), counts)
bThreads := sql.NewBackgroundThreads()
hook, runThreads := NewAsyncPushOnWriteHook(destDB, tmpDir, &buffer.Buffer{})
hook, runThreads := NewAsyncPushOnWriteHook(tmpDir, &buffer.Buffer{})
hook.destDb = destDB
runThreads(bThreads, func(ctx context.Context) (*sql.Context, error) {
return sql.NewContext(ctx), nil
})
@@ -347,14 +336,6 @@ func (c *countingCommitHook) Execute(ctx context.Context, ds datas.Dataset, db *
return nil, nil
}
func (c *countingCommitHook) HandleError(ctx context.Context, err error) error {
return nil
}
func (c *countingCommitHook) SetLogger(ctx context.Context, wr io.Writer) error {
return nil
}
func (c *countingCommitHook) ExecuteForWorkingSets() bool {
return false
}
@@ -16,7 +16,6 @@ package sqle
import (
"context"
"io"
"testing"
sqle "github.com/dolthub/go-mysql-server"
@@ -110,7 +109,7 @@ func TestDatabaseProvider(t *testing.T) {
require.Len(t, hooks, 2)
_, ok := hooks[0].(*snoopingCommitHook)
assert.True(t, ok, "expect hook to be snoopingCommitHook, it is %T", hooks[0])
_, ok = hooks[1].(*PushOnWriteHook)
_, ok = hooks[1].(*DynamicPushOnWriteHook)
assert.True(t, ok, "expect hook to be PushOnWriteHook, it is %T", hooks[1])
})
t.Run("AsyncPushOnWrite", func(t *testing.T) {
@@ -133,7 +132,7 @@ func TestDatabaseProvider(t *testing.T) {
require.Len(t, hooks, 2)
_, ok := hooks[0].(*snoopingCommitHook)
assert.True(t, ok, "expect hook to be snoopingCommitHook, it is %T", hooks[0])
_, ok = hooks[1].(*AsyncPushOnWriteHook)
_, ok = hooks[1].(*DynamicPushOnWriteHook)
assert.True(t, ok, "expect hook to be AsyncPushOnWriteHook, it is %T", hooks[1])
})
})
@@ -147,14 +146,6 @@ func (*snoopingCommitHook) Execute(ctx context.Context, ds datas.Dataset, db *do
return nil, nil
}
func (*snoopingCommitHook) HandleError(ctx context.Context, err error) error {
return nil
}
func (*snoopingCommitHook) SetLogger(ctx context.Context, wr io.Writer) error {
return nil
}
func (*snoopingCommitHook) ExecuteForWorkingSets() bool {
return true
}
@@ -1611,6 +1611,47 @@ var DoltRebaseScriptTests = []queries.ScriptTest{
},
},
},
{
Name: "dolt_rebase: handles multi-line commit messages",
SetUpScript: []string{
`CALL dolt_commit('--allow-empty', '-m', 'empty commit 1');`,
`CALL dolt_commit('--allow-empty', '-m', 'empty
commit
2');`,
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT message FROM dolt_log LIMIT 1;",
Expected: []sql.Row{
{"empty \ncommit \n2"},
},
},
{
Query: "CALL dolt_rebase('-i', '--empty=keep', 'HEAD~1');",
Expected: []sql.Row{
{0, "interactive rebase started on branch dolt_rebase_main; adjust the rebase plan in the dolt_rebase table, then continue rebasing by calling dolt_rebase('--continue')"},
},
},
{
Query: "SELECT * from dolt_rebase;",
Expected: []sql.Row{
{"1", "pick", doltCommit, "empty \ncommit \n2"},
},
},
{
Query: "CALL dolt_rebase('--continue');",
Expected: []sql.Row{
{0, "Successfully rebased and updated refs/heads/main"},
},
},
{
Query: "SELECT message FROM dolt_log LIMIT 1;",
Expected: []sql.Row{
{"empty \ncommit \n2"},
},
},
},
},
}
var DoltRebaseMultiSessionScriptTests = []queries.ScriptTest{
@@ -307,6 +307,24 @@ var DoltTestRunFunctionScripts = []queries.ScriptTest{
},
},
},
{
Name: "Can expect single boolean",
SetUpScript: []string{
"CREATE TABLE booleans (b BOOLEAN)",
"INSERT INTO booleans VALUES (true)",
"INSERT INTO dolt_tests VALUES ('should pass', 'boolean tests', 'select * from booleans;', 'expected_single_value', '==', 'true'), " +
"('should fail', 'boolean tests', 'select * from booleans;', 'expected_single_value', '==', 'false')",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT * FROM dolt_test_run('boolean tests')",
Expected: []sql.Row{
{"should fail", "boolean tests", "select * from booleans;", "FAIL", "Assertion failed: expected_single_value equal to false, got true"},
{"should pass", "boolean tests", "select * from booleans;", "PASS", ""},
},
},
},
},
{
Name: "Can handle null values correctly",
SetUpScript: []string{
+2 -42
View File
@@ -27,47 +27,10 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
"github.com/dolthub/dolt/go/store/types"
)
func getPushOnWriteHook(ctx context.Context, dEnv *env.DoltEnv, logger io.Writer) (doltdb.CommitHook, RunAsyncThreads, error) {
_, val, ok := sql.SystemVariables.GetGlobal(dsess.ReplicateToRemote)
if !ok {
return nil, nil, sql.ErrUnknownSystemVariable.New(dsess.ReplicateToRemote)
} else if val == "" {
return nil, nil, nil
}
remoteName, ok := val.(string)
if !ok {
return nil, nil, sql.ErrInvalidSystemVariableValue.New(val)
}
remotes, err := dEnv.GetRemotes()
if err != nil {
return nil, nil, err
}
rem, ok := remotes.Get(remoteName)
if !ok {
return nil, nil, fmt.Errorf("%w: '%s'", env.ErrRemoteNotFound, remoteName)
}
ddb, err := rem.GetRemoteDB(ctx, types.Format_Default, dEnv)
if err != nil {
return nil, nil, err
}
tmpDir, err := dEnv.TempTableFilesDir()
if err != nil {
return nil, nil, err
}
if _, val, ok = sql.SystemVariables.GetGlobal(dsess.AsyncReplication); ok && val == dsess.SysVarTrue {
hook, runThreads := NewAsyncPushOnWriteHook(ddb, tmpDir, logger)
return hook, runThreads, nil
}
return NewPushOnWriteHook(ddb, tmpDir), nil, nil
return NewDynamicPushOnWriteHook(ctx, dEnv, logger)
}
type RunAsyncThreads func(*sql.BackgroundThreads, func(context.Context) (*sql.Context, error)) error
@@ -80,14 +43,11 @@ func GetCommitHooks(ctx context.Context, dEnv *env.DoltEnv, logger io.Writer) ([
if err != nil {
path, _ := dEnv.FS.Abs(".")
logrus.Errorf("error loading replication for database at %s, replication disabled: %v", path, err)
postCommitHooks = append(postCommitHooks, NewLogHook([]byte(err.Error()+"\n")))
postCommitHooks = append(postCommitHooks, NewLogHook([]byte(err.Error()+"\n"), logger))
} else if hook != nil {
postCommitHooks = append(postCommitHooks, hook)
}
for _, h := range postCommitHooks {
_ = h.SetLogger(ctx, logger)
}
return postCommitHooks, runThreads, nil
}
+4
View File
@@ -0,0 +1,4 @@
Make a developer build for one of the supported platforms,
similar to how buildpgobinaries would build them.
Does not include profile-guided optimization.
+23
View File
@@ -0,0 +1,23 @@
#!/bin/bash
set -e
set -o pipefail
script_dir=$(dirname "$0")
cd $script_dir/../..
GO_BUILD_VERSION=1.25.1
if (( $# != 1 )); then
echo "usage: build.sh linux-arm64|linux-amd64|darwin-arm64|darwin-amd64|windows-amd64"
exit 2
fi
TUPLE=$1
shift
docker run --rm \
-v `pwd`:/src \
-e OS_ARCH_TUPLES="$TUPLE" \
golang:"$GO_BUILD_VERSION"-trixie \
/src/utils/publishrelease/buildindocker.sh
+124
View File
@@ -0,0 +1,124 @@
#!/bin/bash
# Run this from within a golang docker container.
# Expects two env variables:
# * GO_BUILD_FLAGS - any extra flags to be passed to `go build`
# * OS_ARCH_TUPLES - the arch tuples to build releases for
#
# Expects the go.mod source root to be in /src.
# Will place the built binaries in /src/out.
set -e
set -o pipefail
KNOWN_OS_ARCH_TUPLES="darwin-amd64 darwin-arm64 windows-amd64 linux-amd64 linux-arm64"
if [[ -z "$OS_ARCH_TUPLES" ]]; then
OS_ARCH_TUPLES="$KNOWN_OS_ARCH_TUPLES"
fi
for tuple in $OS_ARCH_TUPLES; do
found=0
for known in $KNOWN_OS_ARCH_TUPLES; do
if [[ $tuple == $known ]]; then
found=1
fi
done
if (( found == 0 )); then
echo "buildindocker.sh: Unknown OS_ARCH_TUPLE $tuple supplied. Known tuples: $KNOWN_OS_ARCH_TUPLES."
exit 2
fi
done
apt-get update && apt-get install -y p7zip-full pigz curl xz-utils mingw-w64 clang-19
cd /
curl -o optcross.tar.xz https://dolthub-tools.s3.us-west-2.amazonaws.com/optcross/"$(uname -m)"-linux_20250327_0.0.3_trixie.tar.xz
tar Jxf optcross.tar.xz
curl -o icustatic.tar.xz https://dolthub-tools.s3.us-west-2.amazonaws.com/icustatic/20250327_0.0.3_trixie.tar.xz
tar Jxf icustatic.tar.xz
export PATH=/opt/cross/bin:"$PATH"
cd /src
BINS="dolt"
declare -A platform_cc
platform_cc["linux-arm64"]="aarch64-linux-musl-gcc"
platform_cc["linux-amd64"]="x86_64-linux-musl-gcc"
platform_cc["darwin-arm64"]="clang-19 --target=aarch64-darwin --sysroot=/opt/cross/darwin-sysroot -mmacosx-version-min=12.0"
platform_cc["darwin-amd64"]="clang-19 --target=x86_64-darwin --sysroot=/opt/cross/darwin-sysroot -mmacosx-version-min=12.0"
platform_cc["windows-amd64"]="x86_64-w64-mingw32-gcc"
declare -A platform_cxx
platform_cxx["linux-arm64"]="aarch64-linux-musl-g++"
platform_cxx["linux-amd64"]="x86_64-linux-musl-g++"
platform_cxx["darwin-arm64"]="clang++-19 --target=aarch64-darwin --sysroot=/opt/cross/darwin-sysroot -mmacosx-version-min=12.0 --stdlib=libc++"
platform_cxx["darwin-amd64"]="clang++-19 --target=x86_64-darwin --sysroot=/opt/cross/darwin-sysroot -mmacosx-version-min=12.0 --stdlib=libc++"
platform_cxx["windows-amd64"]="x86_64-w64-mingw32-g++"
declare -A platform_as
platform_as["linux-arm64"]="aarch64-linux-musl-as"
platform_as["linux-amd64"]="x86_64-linux-musl-as"
platform_as["darwin-arm64"]="clang-19 --target=aarch64-darwin --sysroot=/opt/cross/darwin-sysroot -mmacosx-version-min=12.0"
platform_as["darwin-amd64"]="clang-19 --target=x86_64-darwin --sysroot=/opt/cross/darwin-sysroot -mmacosx-version-min=12.0"
platform_as["windows-amd64"]="x86_64-w64-mingw32-as"
# Note: the extldflags below for the MacOS builds specify an SDK version of 14.4
# This corresponds to our currently installed toolchain, but should change if the
# toolchain changes.
declare -A platform_go_ldflags
platform_go_ldflags["linux-arm64"]="-s -w"
platform_go_ldflags["linux-amd64"]="-s -w"
platform_go_ldflags["darwin-arm64"]="-s -w -compressdwarf=false -extldflags -Wl,-platform_version,macos,12.0,14.4"
platform_go_ldflags["darwin-amd64"]="-s -w -compressdwarf=false -extldflags -Wl,-platform_version,macos,12.0,14.4"
platform_go_ldflags["windows-amd64"]="-s -w"
declare -A platform_cgo_ldflags
platform_cgo_ldflags["linux-arm64"]="-static -s"
platform_cgo_ldflags["linux-amd64"]="-static -s"
platform_cgo_ldflags["darwin-arm64"]=""
platform_cgo_ldflags["darwin-amd64"]=""
platform_cgo_ldflags["windows-amd64"]="-static-libgcc -static-libstdc++"
for tuple in $OS_ARCH_TUPLES; do
os=`echo $tuple | sed 's/-.*//'`
arch=`echo $tuple | sed 's/.*-//'`
o="out/dolt-$os-$arch"
mkdir -p "$o/bin"
cp Godeps/LICENSES "$o/"
for bin in $BINS; do
echo Building "$o/$bin"
obin="$bin"
if [ "$os" = windows ]; then
obin="$bin.exe"
fi
CGO_ENABLED=1 \
GOOS="$os" \
GOARCH="$arch" \
CC="${platform_cc[${tuple}]}" \
CXX="${platform_cxx[${tuple}]}" \
AS="${platform_as[${tuple}]}" \
CGO_LDFLAGS="${platform_cgo_ldflags[${tuple}]}" \
go build \
$GO_BUILD_FLAGS \
-ldflags="${platform_go_ldflags[${tuple}]}" \
-tags icu_static \
-trimpath \
-o "$o/bin/$obin" "./cmd/$bin/"
done
if [ "$os" = windows ]; then
(cd out && 7z a "dolt-$os-$arch.zip" "dolt-$os-$arch" && 7z a "dolt-$os-$arch.7z" "dolt-$os-$arch")
else
tar cf - -C out "dolt-$os-$arch" | pigz -9 > "out/dolt-$os-$arch.tar.gz"
fi
done
render_install_sh() {
local parsed=(`grep "Version = " ./cmd/dolt/doltversion/version.go`)
local DOLT_VERSION=`eval echo ${parsed[2]}`
sed 's|__DOLT_VERSION__|'"$DOLT_VERSION"'|' utils/publishrelease/install.sh
}
render_install_sh > out/install.sh
chmod 755 out/install.sh
+6 -97
View File
@@ -9,100 +9,9 @@ cd $script_dir/../..
[ ! -z "$GO_BUILD_VERSION" ] || (echo "Must supply GO_BUILD_VERSION"; exit 1)
[ ! -z "$PROFILE" ] || (echo "Must supply PROFILE"; exit 1)
docker run --rm -v `pwd`:/src -v "$PROFILE":/cpu.pprof golang:"$GO_BUILD_VERSION"-trixie /bin/bash -c '
set -e
set -o pipefail
apt-get update && apt-get install -y p7zip-full pigz curl xz-utils mingw-w64 clang-19
cd /
curl -o optcross.tar.xz https://dolthub-tools.s3.us-west-2.amazonaws.com/optcross/"$(uname -m)"-linux_20250327_0.0.3_trixie.tar.xz
tar Jxf optcross.tar.xz
curl -o icustatic.tar.xz https://dolthub-tools.s3.us-west-2.amazonaws.com/icustatic/20250327_0.0.3_trixie.tar.xz
tar Jxf icustatic.tar.xz
export PATH=/opt/cross/bin:"$PATH"
cd /src
BINS="dolt"
OS_ARCH_TUPLES="darwin-amd64 darwin-arm64 windows-amd64 linux-amd64 linux-arm64"
declare -A platform_cc
platform_cc["linux-arm64"]="aarch64-linux-musl-gcc"
platform_cc["linux-amd64"]="x86_64-linux-musl-gcc"
platform_cc["darwin-arm64"]="clang-19 --target=aarch64-darwin --sysroot=/opt/cross/darwin-sysroot -mmacosx-version-min=12.0"
platform_cc["darwin-amd64"]="clang-19 --target=x86_64-darwin --sysroot=/opt/cross/darwin-sysroot -mmacosx-version-min=12.0"
platform_cc["windows-amd64"]="x86_64-w64-mingw32-gcc"
declare -A platform_cxx
platform_cxx["linux-arm64"]="aarch64-linux-musl-g++"
platform_cxx["linux-amd64"]="x86_64-linux-musl-g++"
platform_cxx["darwin-arm64"]="clang++-19 --target=aarch64-darwin --sysroot=/opt/cross/darwin-sysroot -mmacosx-version-min=12.0 --stdlib=libc++"
platform_cxx["darwin-amd64"]="clang++-19 --target=x86_64-darwin --sysroot=/opt/cross/darwin-sysroot -mmacosx-version-min=12.0 --stdlib=libc++"
platform_cxx["windows-amd64"]="x86_64-w64-mingw32-g++"
declare -A platform_as
platform_as["linux-arm64"]="aarch64-linux-musl-as"
platform_as["linux-amd64"]="x86_64-linux-musl-as"
platform_as["darwin-arm64"]="clang-19 --target=aarch64-darwin --sysroot=/opt/cross/darwin-sysroot -mmacosx-version-min=12.0"
platform_as["darwin-amd64"]="clang-19 --target=x86_64-darwin --sysroot=/opt/cross/darwin-sysroot -mmacosx-version-min=12.0"
platform_as["windows-amd64"]="x86_64-w64-mingw32-as"
# Note: the extldflags below for the MacOS builds specify an SDK version of 14.4
# This corresponds to our currently installed toolchain, but should change if the
# toolchain changes.
declare -A platform_go_ldflags
platform_go_ldflags["linux-arm64"]="-s -w"
platform_go_ldflags["linux-amd64"]="-s -w"
platform_go_ldflags["darwin-arm64"]="-s -w -compressdwarf=false -extldflags -Wl,-platform_version,macos,12.0,14.4"
platform_go_ldflags["darwin-amd64"]="-s -w -compressdwarf=false -extldflags -Wl,-platform_version,macos,12.0,14.4"
platform_go_ldflags["windows-amd64"]="-s -w"
declare -A platform_cgo_ldflags
platform_cgo_ldflags["linux-arm64"]="-static -s"
platform_cgo_ldflags["linux-amd64"]="-static -s"
platform_cgo_ldflags["darwin-arm64"]=""
platform_cgo_ldflags["darwin-amd64"]=""
platform_cgo_ldflags["windows-amd64"]="-static-libgcc -static-libstdc++"
for tuple in $OS_ARCH_TUPLES; do
os=`echo $tuple | sed 's/-.*//'`
arch=`echo $tuple | sed 's/.*-//'`
o="out/dolt-$os-$arch"
mkdir -p "$o/bin"
cp Godeps/LICENSES "$o/"
for bin in $BINS; do
echo Building "$o/$bin"
obin="$bin"
if [ "$os" = windows ]; then
obin="$bin.exe"
fi
CGO_ENABLED=1 \
GOOS="$os" \
GOARCH="$arch" \
CC="${platform_cc[${tuple}]}" \
CXX="${platform_cxx[${tuple}]}" \
AS="${platform_as[${tuple}]}" \
CGO_LDFLAGS="${platform_cgo_ldflags[${tuple}]}" \
go build \
-pgo=/cpu.pprof \
-ldflags="${platform_go_ldflags[${tuple}]}" \
-tags icu_static \
-trimpath \
-o "$o/bin/$obin" "./cmd/$bin/"
done
if [ "$os" = windows ]; then
(cd out && 7z a "dolt-$os-$arch.zip" "dolt-$os-$arch" && 7z a "dolt-$os-$arch.7z" "dolt-$os-$arch")
else
tar cf - -C out "dolt-$os-$arch" | pigz -9 > "out/dolt-$os-$arch.tar.gz"
fi
done
render_install_sh() {
local parsed=(`grep "Version = " ./cmd/dolt/doltversion/version.go`)
local DOLT_VERSION=`eval echo ${parsed[2]}`
sed '\''s|__DOLT_VERSION__|'\''"$DOLT_VERSION"'\''|'\'' utils/publishrelease/install.sh
}
render_install_sh > out/install.sh
chmod 755 out/install.sh
'
docker run --rm \
-v `pwd`:/src \
-v "$PROFILE":/cpu.pprof \
-e GO_BUILD_FLAGS='-pgo=/cpu.pprof' \
golang:"$GO_BUILD_VERSION"-trixie \
/src/utils/publishrelease/buildindocker.sh
+321
View File
@@ -63,7 +63,328 @@ get_conflict_tables() {
'
}
@test "ignore: simple matches" {
dolt sql <<SQL
CREATE TABLE ignoreme (pk int);
CREATE TABLE dontignore (pk int);
CREATE TABLE nomatch (pk int);
SQL
dolt add -A
staged=$(get_staged_tables)
ignored=$(get_ignored_tables)
[[ ! -z $(echo "$ignored" | grep "ignoreme") ]] || false
[[ ! -z $(echo "$staged" | grep "dontignore") ]] || false
[[ ! -z $(echo "$staged" | grep "nomatch") ]] || false
}
@test "ignore: specific overrides" {
dolt sql <<SQL
CREATE TABLE please_ignore (pk int);
CREATE TABLE please_ignore_too (pk int);
CREATE TABLE do_not_ignore (pk int);
CREATE TABLE commit_me (pk int);
CREATE TABLE commit_me_not(pk int);
SQL
dolt add -A
ignored=$(get_ignored_tables)
staged=$(get_staged_tables)
[[ ! -z $(echo "$ignored" | grep "please_ignore") ]] || false
[[ ! -z $(echo "$ignored" | grep "please_ignore_too") ]] || false
[[ ! -z $(echo "$staged" | grep "do_not_ignore") ]] || false
[[ ! -z $(echo "$staged" | grep "commit_me") ]] || false
[[ ! -z $(echo "$ignored" | grep "commit_me_not") ]] || false
}
@test "ignore: conflict" {
dolt sql <<SQL
CREATE TABLE commit_ignore (pk int);
SQL
run dolt add -A
[ "$status" -eq 1 ]
[[ "$output" =~ "the table commit_ignore matches conflicting patterns in dolt_ignore" ]] || false
[[ "$output" =~ "ignored: *_ignore" ]] || false
[[ "$output" =~ "not ignored: commit_*" ]] || false
}
@test "ignore: question mark" {
dolt sql <<SQL
CREATE TABLE test (pk int);
CREATE TABLE test1 (pk int);
CREATE TABLE test11 (pk int);
SQL
dolt add -A
ignored=$(get_ignored_tables)
staged=$(get_staged_tables)
[[ ! -z $(echo "$ignored" | grep "test$") ]] || false
[[ ! -z $(echo "$ignored" | grep "test1$") ]] || false
[[ ! -z $(echo "$staged" | grep "test11$") ]] || false
}
@test "ignore: don't stash ignored tables" {
dolt sql <<SQL
CREATE TABLE ignoreme (pk int);
SQL
run dolt stash -u
[ "$status" -eq 0 ]
[[ "$output" =~ "No local changes to save" ]] || false
}
@test "ignore: error when trying to stash table with dolt_ignore conflict" {
dolt sql <<SQL
CREATE TABLE commit_ignore (pk int);
SQL
run dolt stash -u
[ "$status" -eq 1 ]
[[ "$output" =~ "the table commit_ignore matches conflicting patterns in dolt_ignore" ]] || false
[[ "$output" =~ "ignored: *_ignore" ]] || false
[[ "$output" =~ "not ignored: commit_*" ]] || false
}
@test "ignore: stash ignored and untracked tables when --all is passed" {
dolt sql <<SQL
CREATE TABLE ignoreme (pk int);
CREATE TABLE dontignore (pk int);
SQL
dolt stash -a
working=$(get_working_tables)
ignored=$(get_ignored_tables)
[[ -z $(echo "$ignored" | grep "ignoreme") ]] || false
[[ -z $(echo "$working" | grep "dontignore") ]] || false
dolt stash pop
working=$(get_working_tables)
ignored=$(get_ignored_tables)
[[ ! -z $(echo "$ignored" | grep "ignoreme") ]] || false
[[ ! -z $(echo "$working" | grep "dontignore") ]] || false
}
@test "ignore: stash table with dolt_ignore conflict when --all is passed" {
dolt sql <<SQL
CREATE TABLE commit_ignore (pk int);
SQL
dolt stash -a
conflicts=$(get_conflict_tables)
[[ -z $(echo "$conflicts" | grep "commit_ignore") ]] || false
dolt stash pop
conflicts=$(get_conflict_tables)
[[ ! -z $(echo "$conflicts" | grep "commit_ignore") ]] || false
}
@test "ignore: allow staging ignored tables if 'add --force' is supplied" {
dolt sql <<SQL
CREATE TABLE ignoreme (pk int);
SQL
dolt add -A --force
staged=$(get_staged_tables)
[[ ! -z $(echo "$staged" | grep "ignoreme") ]] || false
}
@test "ignore: don't auto-stage ignored tables" {
dolt sql <<SQL
CREATE TABLE ignoreme (pk int);
CREATE TABLE nomatch (pk int);
SQL
dolt commit -m "commit1" -A
run dolt show
[ "$status" -eq 0 ]
! [["$output" =~ "diff --dolt a/ignoreme b/ignoreme"]] || false
}
@test "ignore: dolt status doesn't show ignored tables when --ignored is not supplied" {
dolt sql <<SQL
CREATE TABLE ignoreme (pk int);
CREATE TABLE nomatch (pk int);
SQL
run dolt status
[ "$status" -eq 0 ]
[[ "$output" =~ "nomatch" ]] || false
! [["$output" =~ "Ignored tables"]] || false
! [["$output" =~ "ignoreme"]] || false
}
@test "ignore: dolt status shows ignored tables when --ignored is not supplied" {
dolt sql <<SQL
CREATE TABLE ignoreme (pk int);
CREATE TABLE nomatch (pk int);
SQL
run dolt status --ignored
[ "$status" -eq 0 ]
[[ "$output" =~ "nomatch" ]] || false
[[ "$output" =~ "Ignored tables" ]] || false
[[ "$output" =~ "ignoreme" ]] || false
}
@test "ignore: don't display new but ignored tables in dolt diff" {
dolt sql <<SQL
CREATE TABLE ignoreme (pk int);
CREATE TABLE nomatch (pk int);
SQL
run dolt diff
[ "$status" -eq 0 ]
[[ "$output" =~ "nomatch" ]] || false
! [["$output" =~ "ignoreme"]] || false
}
@test "ignore: don't display new but ignored tables in reverse diff" {
dolt sql <<SQL
CREATE TABLE ignoreme (pk int);
CREATE TABLE nomatch (pk int);
SQL
run dolt diff -R
[ "$status" -eq 0 ]
[[ "$output" =~ "nomatch" ]] || false
! [["$output" =~ "ignoreme"]] || false
}
@test "ignore: DO display modified ignored tables in dolt diff after staging" {
dolt sql <<SQL
CREATE TABLE ignoreme (pk int);
SQL
dolt add --force ignoreme
dolt sql <<SQL
INSERT INTO ignoreme VALUES (1);
SQL
run dolt diff
[ "$status" -eq 0 ]
echo "$output"
[[ "$output" =~ "ignoreme" ]] || false
}
@test "ignore: DO display modified ignored tables in reverse diff after staging" {
dolt sql <<SQL
CREATE TABLE ignoreme (pk int);
SQL
dolt add --force ignoreme
dolt sql <<SQL
INSERT INTO ignoreme VALUES (1);
SQL
run dolt diff -R
[ "$status" -eq 0 ]
echo "$output"
[[ "$output" =~ "ignoreme" ]] || false
}
@test "ignore: DO display modified ignored tables in dolt diff after committing" {
dolt sql <<SQL
CREATE TABLE ignoreme (pk int);
SQL
dolt add --force ignoreme
dolt commit -m "commit1"
dolt sql <<SQL
INSERT INTO ignoreme VALUES (1);
SQL
run dolt diff
[ "$status" -eq 0 ]
echo "$output"
[[ "$output" =~ "ignoreme" ]] || false
}
@test "ignore: detect when equivalent patterns have different values" {
dolt sql <<SQL
INSERT INTO dolt_ignore VALUES
("**_test", true),
("*_test", false),
("*_foo", true),
("%_foo", false);
CREATE TABLE a_test (pk int);
CREATE TABLE a_foo (pk int);
SQL
conflict=$(get_conflict_tables)
echo "$conflict"
[[ ! -z $(echo "$conflict" | grep "a_test") ]] || false
[[ ! -z $(echo "$conflict" | grep "a_foo") ]] || false
}
@test "ignore: allow using dolt_ignore with AS OF" {
+33
View File
@@ -21,9 +21,15 @@ teardown() {
teardown_common
}
# sets up the EDITOR env var with a script that takes the input file from
# the process invoking the editor and copies it to the editor-input.txt
# file for tests to check, and then copies the file specifeid as an argument
# to this function, as the output for the editor, sent back to the process
# that invoked the editor.
setupCustomEditorScript() {
touch rebaseScript.sh
echo "#!/bin/bash" >> rebaseScript.sh
echo "cp \$1 editor-input.txt" >> rebaseScript.sh
if [ $# -eq 1 ]; then
echo "mv $1 \$1" >> rebaseScript.sh
fi
@@ -131,6 +137,33 @@ setupCustomEditorScript() {
[[ "$output" =~ "main commit 2" ]] || false
}
# bats test_tags=no_lambda
# skip bats on lambda, since we don't have the pcre2grep utility there
@test "rebase: multi-line commit messages" {
setupCustomEditorScript
# Create a multi-line commit message
dolt checkout b1
dolt commit --allow-empty -m "multi
line
commit
message"
# Run rebase (with the default plan, custom editor makes no changes)
run dolt rebase --empty=keep -i main
[ "$status" -eq 0 ]
[[ "$output" =~ "Successfully rebased and updated refs/heads/b1" ]] || false
# Assert that the newlines were removed in the rebase plan editor
grep "multi line commit message" editor-input.txt
# Assert that the commit log still shows the multi-line message
run dolt log -n1
[ "$status" -eq 0 ]
echo "$output" > tmp.out
pcre2grep -nM "multi\s*\R+\s*line\s*\R+\s*commit\s*\R+\s*message" tmp.out
}
@test "rebase: failed rebase will abort and clean up" {
setupCustomEditorScript "invalidRebasePlan.txt"
dolt checkout b1
+57
View File
@@ -22,6 +22,8 @@ teardown() {
teardown_common
rm -rf $TMPDIRS
cd $BATS_TMPDIR
stop_sql_server
}
@test "replication: configuration errors" {
@@ -1029,3 +1031,58 @@ SQL
[ "$status" -eq 0 ]
[[ "$output" =~ "1,1" ]] || false
}
@test "replication: sql-server remote replication config changes update in real time" {
# This test is a little different from others in this file. We configure branch
# replication to push to remote1 or backup1. Then we create a commit, and fetch from
# the appropriate replica to ensure the configuration was honored.
cd repo1
start_sql_server
dolt sql -q "set @@persist.dolt_replicate_to_remote = 'remote1'"
dolt commit --allow-empty -m "push one to remote1"
dolt sql -q "set @@persist.dolt_replicate_to_remote = 'backup1'"
dolt commit --allow-empty -m "push one to backup1"
cd ../
dolt clone file://./rem1 remote1_clone
cd remote1_clone
run dolt log -n 1
[ "$status" -eq 0 ]
[[ "$output" =~ "push one to remote1" ]] || false
[[ ! "$output" =~ "push one to backup1" ]] || false
cd ../
dolt clone file://./bac1 backup1_clone
cd backup1_clone
run dolt log -n 1
[ "$status" -eq 0 ]
[[ "$output" =~ "push one to backup1" ]] || false
# We can also update the `dolt_async_replication` setting and have it take effect without
# restarting the server. We don't have a great way to differentiate between async and sync
# but we'll at least turn it on to make sure it continues to replicate.
cd ../repo1
dolt sql -q "set @@persist.dolt_async_replication = 1"
dolt commit --allow-empty -m "async push to backup1"
sleep 5 # allow for async replication to complete
dolt sql -q "set @@persist.dolt_replicate_to_remote = 'remote1'"
dolt commit --allow-empty -m "async push to remote1"
sleep 5
cd ../remote1_clone
dolt pull
run dolt log -n 1
[ "$status" -eq 0 ]
[[ "$output" =~ "async push to remote1" ]] || false
cd ../backup1_clone
dolt pull
run dolt log -n 1
[ "$status" -eq 0 ]
[[ "$output" =~ "async push to backup1" ]] || false
[[ ! "$output" =~ "async push to remote1" ]] || false
}
@@ -1,6 +1,6 @@
module github.com/dolthub/dolt/integration-tests/go-sql-server-driver
go 1.25
go 1.25.0
require (
github.com/dolthub/dolt/go v0.40.4
@@ -0,0 +1,22 @@
import java.sql.*;
// https://github.com/dolthub/dolt/issues/9890
public class MySQLConnectorTest_Collation {
public static void main(String[] args) {
String user = args[0];
String port = args[1];
String db = args[2];
try {
String url = "jdbc:mysql://127.0.0.1:" + port + "/" + db;
Connection conn = DriverManager.getConnection(url, user, "");
var result = conn.getMetaData().getColumns(null, null, null, null);
} catch (SQLException ex) {
System.out.println("An error occurred.");
ex.printStackTrace();
System.exit(1);
}
System.exit(0);
}
}
@@ -52,6 +52,11 @@ teardown() {
java -cp $BATS_TEST_DIRNAME/java:$BATS_TEST_DIRNAME/java/mysql-connector-java-8.0.21.jar MySQLConnectorTest $USER $PORT $REPO_NAME
}
@test "mysql-connector-java client collations" {
javac $BATS_TEST_DIRNAME/java/MySQLConnectorTest_Collation.java
java -cp $BATS_TEST_DIRNAME/java:$BATS_TEST_DIRNAME/java/mysql-connector-java-8.0.21.jar MySQLConnectorTest_Collation $USER $PORT $REPO_NAME
}
@test "node mysql client" {
node $BATS_TEST_DIRNAME/node/index.js $USER $PORT $REPO_NAME
node $BATS_TEST_DIRNAME/node/knex.js $USER $PORT $REPO_NAME