Merge latest main branch into binary-as-hex feature branch

- Resolves conflicts in sql.go by integrating both:
  1. Main branch's query catalog refactoring (direct SQL queries, no dEnv dependency)
  2. Binary-as-hex parameter support from feature branch
- Maintains SaveQuery function name from main branch
- Preserves binaryAsHex parameter threading through all query execution paths
- Updates function signatures to match main branch's simplified approach
This commit is contained in:
elianddb
2025-07-30 19:21:55 +00:00
38 changed files with 763 additions and 944 deletions

View File

@@ -27,6 +27,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions/dolt_ci"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
"github.com/dolthub/dolt/go/store/val"
)
var viewDocs = cli.CommandDocumentationContent{
@@ -202,7 +203,15 @@ func getSavedQueries(sqlCtx *sql.Context, queryist cli.Queryist) (map[string]str
return nil, err
}
for _, row := range rows {
savedQueries[row[2].(string)] = row[3].(string)
queryName, err := row[2].(*val.TextStorage).Unwrap(sqlCtx)
if err != nil {
return nil, err
}
queryStatement, err := row[3].(*val.TextStorage).Unwrap(sqlCtx)
if err != nil {
return nil, err
}
savedQueries[queryName] = queryStatement
}
}

View File

@@ -54,7 +54,7 @@ import (
// SqlEngine packages up the context necessary to run sql queries against dsqle.
type SqlEngine struct {
provider sql.DatabaseProvider
provider *dsqle.DoltDatabaseProvider
ContextFactory sql.ContextFactory
dsessFactory sessionFactory
engine *gms.Engine
@@ -435,13 +435,17 @@ func (se *SqlEngine) FileSystem() filesys.Filesys {
}
func (se *SqlEngine) Close() error {
var err error
if se.engine != nil {
if se.engine.Analyzer.Catalog.BinlogReplicaController != nil {
dblr.DoltBinlogReplicaController.Close()
}
return se.engine.Close()
err = se.engine.Close()
}
return nil
if se.provider != nil {
se.provider.Close()
}
return err
}
// configureBinlogReplicaController configures the binlog replication controller with the |engine|.

View File

@@ -15,6 +15,7 @@
package commands
import (
"bytes"
"context"
"fmt"
"io"
@@ -32,6 +33,8 @@ import (
"github.com/dolthub/vitess/go/vt/vterrors"
"github.com/fatih/color"
"github.com/flynn-archive/go-shlex"
"github.com/gocraft/dbr/v2"
"github.com/gocraft/dbr/v2/dialect"
textunicode "golang.org/x/text/encoding/unicode"
"golang.org/x/text/transform"
"gopkg.in/src-d/go-errors.v1"
@@ -42,10 +45,10 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
dsqle "github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dtables"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
"github.com/dolthub/dolt/go/libraries/utils/iohelp"
"github.com/dolthub/dolt/go/libraries/utils/osutil"
"github.com/dolthub/dolt/go/store/val"
eventsapi "github.com/dolthub/eventsapi_schema/dolt/services/eventsapi/v1alpha1"
)
@@ -234,13 +237,13 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
if query, queryOK := apr.GetValue(QueryFlag); queryOK {
if apr.Contains(saveFlag) {
return execSaveQuery(sqlCtx, dEnv, queryist, apr, query, format, usage, binaryAsHex)
return SaveQuery(sqlCtx, queryist, apr, query, format, usage, binaryAsHex)
}
return queryMode(sqlCtx, queryist, apr, query, format, usage, binaryAsHex)
} else if savedQueryName, exOk := apr.GetValue(executeFlag); exOk {
return executeSavedQuery(sqlCtx, queryist, dEnv, savedQueryName, format, usage, binaryAsHex)
return executeSavedQuery(sqlCtx, queryist, savedQueryName, format, usage, binaryAsHex)
} else if apr.Contains(listSavedFlag) {
return listSavedQueries(sqlCtx, queryist, dEnv, format, usage)
return listSavedQueries(sqlCtx, queryist, format, usage)
} else {
// Run in either batch mode for piped input, or shell mode for interactive
isTty := false
@@ -351,49 +354,38 @@ func (cmd SqlCmd) handleLegacyArguments(ap *argparser.ArgParser, commandStr stri
}
func listSavedQueries(ctx *sql.Context, qryist cli.Queryist, dEnv *env.DoltEnv, format engine.PrintResultFormat, usage cli.UsagePrinter) int {
if !dEnv.Valid() {
return sqlHandleVErrAndExitCode(qryist, errhand.BuildDError("error: --%s must be used in a dolt database directory.", listSavedFlag).Build(), usage)
}
workingRoot, err := dEnv.WorkingRoot(ctx)
if err != nil {
return sqlHandleVErrAndExitCode(qryist, errhand.VerboseErrorFromError(err), usage)
}
hasQC, err := workingRoot.HasTable(ctx, doltdb.TableName{Name: doltdb.DoltQueryCatalogTableName})
if err != nil {
verr := errhand.BuildDError("error: Failed to read from repository.").AddCause(err).Build()
return sqlHandleVErrAndExitCode(qryist, verr, usage)
}
if !hasQC {
return 0
}
func listSavedQueries(ctx *sql.Context, qryist cli.Queryist, format engine.PrintResultFormat, usage cli.UsagePrinter) int {
query := "SELECT * FROM " + doltdb.DoltQueryCatalogTableName
return sqlHandleVErrAndExitCode(qryist, execSingleQuery(ctx, qryist, query, format, false), usage)
}
func executeSavedQuery(ctx *sql.Context, qryist cli.Queryist, dEnv *env.DoltEnv, savedQueryName string, format engine.PrintResultFormat, usage cli.UsagePrinter, binaryAsHex bool) int {
if !dEnv.Valid() {
return sqlHandleVErrAndExitCode(qryist, errhand.BuildDError("error: --%s must be used in a dolt database directory.", executeFlag).Build(), usage)
}
func executeSavedQuery(ctx *sql.Context, qryist cli.Queryist, savedQueryName string, format engine.PrintResultFormat, usage cli.UsagePrinter, binaryAsHex bool) int {
var buffer bytes.Buffer
buffer.WriteString("SELECT query FROM dolt_query_catalog where id = ?")
searchQuery, err := dbr.InterpolateForDialect(buffer.String(), []interface{}{savedQueryName}, dialect.MySQL)
workingRoot, err := dEnv.WorkingRoot(ctx)
rows, err := GetRowsForSql(qryist, ctx, searchQuery)
if err != nil {
return sqlHandleVErrAndExitCode(qryist, errhand.VerboseErrorFromError(err), usage)
} else if len(rows) == 0 {
err = fmt.Errorf("saved query %s not found", savedQueryName)
return sqlHandleVErrAndExitCode(qryist, errhand.VerboseErrorFromError(err), usage)
}
sq, err := dtables.RetrieveFromQueryCatalog(ctx, workingRoot, savedQueryName)
if err != nil {
return sqlHandleVErrAndExitCode(qryist, errhand.VerboseErrorFromError(err), usage)
var query string
if ts, ok := rows[0][0].(*val.TextStorage); ok {
query, err = ts.Unwrap(ctx)
if err != nil {
return sqlHandleVErrAndExitCode(qryist, errhand.VerboseErrorFromError(err), usage)
}
} else {
if s, ok := rows[0][0].(string); ok {
query = s
}
}
cli.PrintErrf("Executing saved query '%s':\n%s\n", savedQueryName, sq.Query)
return sqlHandleVErrAndExitCode(qryist, execSingleQuery(ctx, qryist, sq.Query, format, binaryAsHex), usage)
cli.PrintErrf("Executing saved query '%s':\n%s\n", savedQueryName, query)
return sqlHandleVErrAndExitCode(qryist, execSingleQuery(ctx, qryist, query, format, binaryAsHex), usage)
}
func queryMode(
@@ -416,11 +408,7 @@ func queryMode(
return 0
}
func execSaveQuery(ctx *sql.Context, dEnv *env.DoltEnv, qryist cli.Queryist, apr *argparser.ArgParseResults, query string, format engine.PrintResultFormat, usage cli.UsagePrinter, binaryAsHex bool) int {
if !dEnv.Valid() {
return sqlHandleVErrAndExitCode(qryist, errhand.BuildDError("error: --%s must be used in a dolt database directory.", saveFlag).Build(), usage)
}
func SaveQuery(ctx *sql.Context, qryist cli.Queryist, apr *argparser.ArgParseResults, query string, format engine.PrintResultFormat, usage cli.UsagePrinter, binaryAsHex bool) int {
saveName := apr.GetValueOrDefault(saveFlag, "")
verr := execSingleQuery(ctx, qryist, query, format, binaryAsHex)
@@ -428,23 +416,31 @@ func execSaveQuery(ctx *sql.Context, dEnv *env.DoltEnv, qryist cli.Queryist, apr
return sqlHandleVErrAndExitCode(qryist, verr, usage)
}
workingRoot, err := dEnv.WorkingRoot(ctx)
order := int32(1)
rows, err := GetRowsForSql(qryist, ctx, "SELECT MAX(display_order) FROM dolt_query_catalog")
if err != nil {
return sqlHandleVErrAndExitCode(qryist, errhand.BuildDError("error: failed to get working root").AddCause(err).Build(), usage)
return sqlHandleVErrAndExitCode(qryist, errhand.VerboseErrorFromError(err), usage)
}
if len(rows) > 0 && rows[0][0] != nil {
order, err = getInt32ColAsInt32(rows[0][0])
if err != nil {
return sqlHandleVErrAndExitCode(qryist, errhand.VerboseErrorFromError(err), usage)
}
order++
}
saveMessage := apr.GetValueOrDefault(messageFlag, "")
newRoot, verr := saveQuery(ctx, workingRoot, query, saveName, saveMessage)
if verr != nil {
return sqlHandleVErrAndExitCode(qryist, verr, usage)
}
err = dEnv.UpdateWorkingRoot(ctx, newRoot)
var buffer bytes.Buffer
buffer.WriteString("INSERT INTO dolt_query_catalog VALUES (?, ?, ?, ?, ?) ON DUPLICATE KEY UPDATE query = ?, description = ?")
params := []interface{}{saveName, order, saveName, query, saveMessage, query, saveMessage}
insertQuery, err := dbr.InterpolateForDialect(buffer.String(), params, dialect.MySQL)
if err != nil {
return sqlHandleVErrAndExitCode(qryist, errhand.BuildDError("error: failed to update working root").AddCause(err).Build(), usage)
return sqlHandleVErrAndExitCode(qryist, errhand.VerboseErrorFromError(err), usage)
}
return 0
_, err = GetRowsForSql(qryist, ctx, insertQuery)
return sqlHandleVErrAndExitCode(qryist, errhand.VerboseErrorFromError(err), usage)
}
// execSingleQuery runs a single query and prints the results. This is not intended for use in interactive modes, especially
@@ -620,16 +616,6 @@ func validateSqlArgs(apr *argparser.ArgParseResults) error {
return nil
}
// Saves the query given to the catalog with the name and message given.
func saveQuery(ctx *sql.Context, root doltdb.RootValue, query string, name string, message string) (doltdb.RootValue, errhand.VerboseError) {
_, newRoot, err := dtables.NewQueryCatalogEntryWithNameAsID(ctx, root, name, query, message)
if err != nil {
return nil, errhand.BuildDError("Couldn't save query").AddCause(err).Build()
}
return newRoot, nil
}
// execBatchMode runs all the queries in the input reader
func execBatchMode(ctx *sql.Context, qryist cli.Queryist, input io.Reader, continueOnErr bool, format engine.PrintResultFormat, binaryAsHex bool) error {
scanner := NewStreamScanner(input)

View File

@@ -377,7 +377,7 @@ func GetInt8ColAsBool(col interface{}) (bool, error) {
}
}
// getInt64ColAsInt64 returns the value of an int64 column as a string
// getInt64ColAsInt64 returns the value of an int64 column as an int64
// This is necessary because Queryist may return an int64 column as an int64 (when using SQLEngine)
// or as a string (when using ConnectionQueryist).
func getInt64ColAsInt64(col interface{}) (int64, error) {
@@ -399,7 +399,7 @@ func getInt64ColAsInt64(col interface{}) (int64, error) {
}
}
// getUint64ColAsUint64 returns the value of an uint64 column as a string
// getUint64ColAsUint64 returns the value of an uint64 column as a uint64
// This is necessary because Queryist may return an uint64 column as an uint64 (when using SQLEngine)
// or as a string (when using ConnectionQueryist).
func getUint64ColAsUint64(col interface{}) (uint64, error) {
@@ -421,6 +421,28 @@ func getUint64ColAsUint64(col interface{}) (uint64, error) {
}
}
// getInt32ColAsInt32 returns the value of an int32 column as an int32
// This is necessary because Queryist may return an int32 column as an int32 (when using SQLEngine)
// or as a string (when using ConnectionQueryist).
func getInt32ColAsInt32(col interface{}) (int32, error) {
switch v := col.(type) {
case int:
return int32(v), nil
case int32:
return v, nil
case uint32:
return int32(v), nil
case string:
iv, err := strconv.ParseInt(v, 10, 32)
if err != nil {
return 0, err
}
return int32(iv), nil
default:
return 0, fmt.Errorf("unexpected type %T, was expecting int32, uint32 or string", v)
}
}
// getStrBoolColAsBool returns the value of the input as a bool. This is required because depending on if we
// go over the wire or not we may get a string or a bool when we expect a bool.
func getStrBoolColAsBool(col interface{}) (bool, error) {

View File

@@ -16,5 +16,5 @@
package doltversion
const (
Version = "1.57.2"
Version = "1.57.3"
)

View File

@@ -13,7 +13,7 @@ require (
github.com/dolthub/fslock v0.0.3
github.com/dolthub/ishell v0.0.0-20240701202509-2b217167d718
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81
github.com/dolthub/vitess v0.0.0-20250611225316-90a5898bfe26
github.com/dolthub/vitess v0.0.0-20250729231828-87bce34800ce
github.com/dustin/go-humanize v1.0.1
github.com/fatih/color v1.13.0
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
@@ -60,7 +60,7 @@ require (
github.com/dolthub/aws-sdk-go-ini-parser v0.0.0-20250305001723-2821c37f6c12
github.com/dolthub/eventsapi_schema v0.0.0-20250725194025-a087efa1ee55
github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2
github.com/dolthub/go-mysql-server v0.20.1-0.20250725204146-7bc2294a9cb2
github.com/dolthub/go-mysql-server v0.20.1-0.20250730053026-797f25fcfc4f
github.com/dolthub/gozstd v0.0.0-20240423170813-23a2903bca63
github.com/esote/minmaxheap v1.0.0
github.com/goccy/go-json v0.10.2

View File

@@ -240,8 +240,10 @@ github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
github.com/dolthub/go-icu-regex v0.0.0-20250327004329-6799764f2dad h1:66ZPawHszNu37VPQckdhX1BPPVzREsGgNxQeefnlm3g=
github.com/dolthub/go-icu-regex v0.0.0-20250327004329-6799764f2dad/go.mod h1:ylU4XjUpsMcvl/BKeRRMXSH7e7WBrPXdSLvnRJYrxEA=
github.com/dolthub/go-mysql-server v0.20.1-0.20250725204146-7bc2294a9cb2 h1:1ID8uBiBAVS1422/VZNrqaD3PPFgbxGSbu/iEe2kBOA=
github.com/dolthub/go-mysql-server v0.20.1-0.20250725204146-7bc2294a9cb2/go.mod h1:6dT48ffh+d12GMuwuabrRaR9tK/XlNc21hiU7BVRZ5Q=
github.com/dolthub/go-mysql-server v0.20.1-0.20250729231836-5203d53bc3c8 h1:i4Z3VqPFWy8CNxBW4vEIe9jX/XqEqsy6cLVuuV8reVg=
github.com/dolthub/go-mysql-server v0.20.1-0.20250729231836-5203d53bc3c8/go.mod h1:mqpOPgp1LPDxhE/tZlL1OOiYW12Xd6e89OWm4eN7854=
github.com/dolthub/go-mysql-server v0.20.1-0.20250730053026-797f25fcfc4f h1:Cc6mAobw+9VXf5+47ubp1z3YLilGK0k10u8cI0tSfwI=
github.com/dolthub/go-mysql-server v0.20.1-0.20250730053026-797f25fcfc4f/go.mod h1:/11huCxrsYYVHJFrUC+RzHtrTig0kJz6lzSTc0xqTIQ=
github.com/dolthub/gozstd v0.0.0-20240423170813-23a2903bca63 h1:OAsXLAPL4du6tfbBgK0xXHZkOlos63RdKYS3Sgw/dfI=
github.com/dolthub/gozstd v0.0.0-20240423170813-23a2903bca63/go.mod h1:lV7lUeuDhH5thVGDCKXbatwKy2KW80L4rMT46n+Y2/Q=
github.com/dolthub/ishell v0.0.0-20240701202509-2b217167d718 h1:lT7hE5k+0nkBdj/1UOSFwjWpNxf+LCApbRHgnCA17XE=
@@ -250,8 +252,10 @@ github.com/dolthub/jsonpath v0.0.2-0.20240227200619-19675ab05c71 h1:bMGS25NWAGTE
github.com/dolthub/jsonpath v0.0.2-0.20240227200619-19675ab05c71/go.mod h1:2/2zjLQ/JOOSbbSboojeg+cAwcRV0fDLzIiWch/lhqI=
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81 h1:7/v8q9XGFa6q5Ap4Z/OhNkAMBaK5YeuEzwJt+NZdhiE=
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81/go.mod h1:siLfyv2c92W1eN/R4QqG/+RjjX5W2+gCTRjZxBjI3TY=
github.com/dolthub/vitess v0.0.0-20250611225316-90a5898bfe26 h1:9Npf0JYVCrwe9edTfYD/pjIncCePNDiu4j50xLcV334=
github.com/dolthub/vitess v0.0.0-20250611225316-90a5898bfe26/go.mod h1:1gQZs/byeHLMSul3Lvl3MzioMtOW1je79QYGyi2fd70=
github.com/dolthub/vitess v0.0.0-20250729225143-5ab74d1f0182 h1:LrUmlwHlQLSu8OIL60APDGeUoPBoezeo8fTDRAe9EJg=
github.com/dolthub/vitess v0.0.0-20250729225143-5ab74d1f0182/go.mod h1:1gQZs/byeHLMSul3Lvl3MzioMtOW1je79QYGyi2fd70=
github.com/dolthub/vitess v0.0.0-20250729231828-87bce34800ce h1:WAOnVqPsJF+y5Js5LFGzEUFOb9nnenBIcaOeRni8rZw=
github.com/dolthub/vitess v0.0.0-20250729231828-87bce34800ce/go.mod h1:1gQZs/byeHLMSul3Lvl3MzioMtOW1je79QYGyi2fd70=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=

View File

@@ -30,15 +30,6 @@ import (
type ctxKey int
type ctxValue int
const (
doltCICtxKey ctxKey = iota
)
const (
doltCICtxValueUnspecified ctxValue = iota
doltCICtxValueAllow
)
const (
// DoltNamespace is the name prefix of dolt system tables. We reserve all tables that begin with dolt_ for system use.
DoltNamespace = "dolt"
@@ -130,6 +121,7 @@ var getWriteableSystemTables = func() []string {
ProceduresTableName,
IgnoreTableName,
GetRebaseTableName(),
GetQueryCatalogTableName(),
// TODO: find way to make these writable by the dolt process
// TODO: but not by user
@@ -374,6 +366,8 @@ var GetStashesTableName = func() string {
return StashesTableName
}
var GetQueryCatalogTableName = func() string { return DoltQueryCatalogTableName }
const (
// LogTableName is the log system table name
LogTableName = "dolt_log"

View File

@@ -1572,7 +1572,7 @@ func (m *secondaryMerger) merge(ctx *sql.Context, diff tree.ThreeWayDiff, leftSc
// If the left-side has the delete, the index is already correct and no work needs to be done.
// If the right-side has the delete, remove the key from the index.
if diff.Right == nil {
err = applyEdit(ctx, idx, diff.Key, diff.Base, nil)
err = applyEdit(ctx, idx, diff.Key, diff.Left, nil)
}
default:
// Any changes to the left-side of the merge are not needed, since we currently

View File

@@ -367,7 +367,10 @@ func createCVsForPartialKeyMatches(
kb := val.NewTupleBuilder(primaryKD, primaryIdx.NodeStore())
for k, _, err := itr.Next(ctx); err == nil; k, _, err = itr.Next(ctx) {
for k, _, err := itr.Next(ctx); err != io.EOF; k, _, err = itr.Next(ctx) {
if err != nil {
return err
}
// convert secondary idx entry to primary row key
// the pks of the table are the last keys of the index
@@ -390,14 +393,20 @@ func createCVsForPartialKeyMatches(
return err
}
// If a value wasn't found, then there is a row in the secondary index
// that can't be found in the primary index. This is never expected, so
// we return an error.
if value == nil {
return fmt.Errorf("unable to find row from secondary index in the primary index, with key: %v", primaryIdxKey)
}
// If a value was found, then there is a row in the child table that references the
// deleted parent row, so we report a constraint violation.
err = receiver.ProllyFKViolationFound(ctx, primaryIdxKey, value)
if err != nil {
return err
}
}
if err != nil && err != io.EOF {
return err
}
return nil
}

View File

@@ -49,6 +49,9 @@ func (db database) Schema() string {
return ""
}
func (db database) Close() {
}
func (db database) GetTableInsensitive(ctx *sql.Context, tblName string) (sql.Table, bool, error) {
tblName = strings.ToLower(tblName)
if tblName == StatusTableName {

View File

@@ -140,6 +140,10 @@ func (db Database) ValidateSchema(sch sql.Schema) error {
return nil
}
func (db Database) Close() {
db.gs.Close()
}
// Revision implements dsess.RevisionDatabase
func (db Database) Revision() string {
return db.revision
@@ -803,6 +807,17 @@ func (db Database) getTableInsensitive(ctx *sql.Context, head *doltdb.Commit, ds
if !resolve.UseSearchPath || isDoltgresSystemTable {
dt, found = dtables.NewBackupsTable(db, lwrName), true
}
case doltdb.DoltQueryCatalogTableName:
backingTable, _, err := db.getTable(ctx, root, doltdb.DoltQueryCatalogTableName)
if err != nil {
return nil, false, err
}
if backingTable == nil {
dt, found = dtables.NewEmptyQueryCatalogTable(ctx), true
} else {
versionableTable := backingTable.(dtables.VersionableTable)
dt, found = dtables.NewQueryCatalogTable(ctx, versionableTable), true
}
}
if found {

View File

@@ -210,6 +210,12 @@ func (p *DoltDatabaseProvider) FileSystem() filesys.Filesys {
return p.fs
}
func (p *DoltDatabaseProvider) Close() {
for _, db := range p.databases {
db.Close()
}
}
// Installs an InitDatabaseHook which configures new databases--those
// created with `CREATE DATABASE` and `call dolt_clone` for
// example--for push replication. Pull-on-read replication is already

View File

@@ -15,12 +15,17 @@
package dsess
import (
"context"
"fmt"
"sync"
"testing"
"github.com/dolthub/go-mysql-server/sql"
"github.com/stretchr/testify/assert"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess/mutexmap"
"github.com/dolthub/dolt/go/store/hash"
)
func TestCoerceAutoIncrementValue(t *testing.T) {
@@ -73,3 +78,41 @@ func TestCoerceAutoIncrementValue(t *testing.T) {
})
}
}
func TestInitWithRoots(t *testing.T) {
t.Run("EmptyRoots", func(t *testing.T) {
ait := AutoIncrementTracker{
dbName: "test_database",
sequences: &sync.Map{},
mm: mutexmap.NewMutexMap(),
init: make(chan struct{}),
cancelInit: make(chan struct{}),
}
go ait.initWithRoots(context.Background())
assert.NoError(t, ait.waitForInit())
})
t.Run("CloseCancelsInit", func(t *testing.T) {
ait := AutoIncrementTracker{
dbName: "test_database",
sequences: &sync.Map{},
mm: mutexmap.NewMutexMap(),
init: make(chan struct{}),
cancelInit: make(chan struct{}),
}
go ait.initWithRoots(context.Background(), blockingRoot{})
ait.Close()
assert.Error(t, ait.waitForInit())
})
}
type blockingRoot struct {
}
func (blockingRoot) ResolveRootValue(ctx context.Context) (doltdb.RootValue, error) {
<-ctx.Done()
return nil, context.Cause(ctx)
}
func (blockingRoot) HashOf() (hash.Hash, error) {
return hash.Hash{}, nil
}

View File

@@ -17,6 +17,7 @@ package dsess
import (
"context"
"errors"
"fmt"
"io"
"math"
"strings"
@@ -51,8 +52,18 @@ type AutoIncrementTracker struct {
sequences *sync.Map // map[string]uint64
mm *mutexmap.MutexMap
lockMode LockMode
init chan struct{}
initErr error
// AutoIncrementTracker is lazily initialized by loading
// tracker state for every given |root|. On first access, we
// block on initialization being completed and we terminally
// return |initErr| if there was any error initializing.
init chan struct{}
initErr error
// To clean up effectively we need to stop all access to
// storage. As part of that, we have the possibility to cancel
// async initialization and block on the process completing.
cancelInit chan struct{}
}
var _ globalstate.AutoIncrementTracker = &AutoIncrementTracker{}
@@ -63,10 +74,11 @@ var _ globalstate.AutoIncrementTracker = &AutoIncrementTracker{}
// branches that don't have a local working set)
func NewAutoIncrementTracker(ctx context.Context, dbName string, roots ...doltdb.Rootish) (*AutoIncrementTracker, error) {
ait := AutoIncrementTracker{
dbName: dbName,
sequences: &sync.Map{},
mm: mutexmap.NewMutexMap(),
init: make(chan struct{}),
dbName: dbName,
sequences: &sync.Map{},
mm: mutexmap.NewMutexMap(),
init: make(chan struct{}),
cancelInit: make(chan struct{}),
}
gcSafepointController := getGCSafepointController(ctx)
ctx = context.Background()
@@ -100,6 +112,11 @@ func loadAutoIncValue(sequences *sync.Map, tableName string) uint64 {
return current.(uint64)
}
func (a *AutoIncrementTracker) Close() {
close(a.cancelInit)
<-a.init
}
// Current returns the next value to be generated in the auto increment sequence for the table named
func (a *AutoIncrementTracker) Current(tableName string) (uint64, error) {
err := a.waitForInit()
@@ -483,30 +500,45 @@ func (a *AutoIncrementTracker) waitForInit() error {
// |initWithRoots| is called with appropriately outlives the end of
// the method and that it participates in GC lifecycle callbacks
// appropriately, if that is necessary.
func (a *AutoIncrementTracker) initWithRoots(ctx context.Context, roots ...doltdb.Rootish) error {
func (a *AutoIncrementTracker) initWithRoots(ctx context.Context, roots ...doltdb.Rootish) {
defer close(a.init)
eg, egCtx := errgroup.WithContext(ctx)
// Cancel the parent context so that the errgroup work will
// complete with an error if we see cancelInit closed.
finishedCh := make(chan struct{})
defer close(finishedCh)
ctx, cancel := context.WithCancelCause(ctx)
go func() {
select {
case <-a.cancelInit:
fmt.Printf("canceling it...\n")
cancel(errors.New("initialization canceled. did not complete successfully."))
case <-finishedCh:
}
}()
eg, ctx := errgroup.WithContext(ctx)
eg.SetLimit(128)
for _, root := range roots {
eg.Go(func() error {
if egCtx.Err() != nil {
return egCtx.Err()
if ctx.Err() != nil {
return context.Cause(ctx)
}
r, rerr := root.ResolveRootValue(egCtx)
if rerr != nil {
return rerr
r, err := root.ResolveRootValue(ctx)
if err != nil {
return err
}
return r.IterTables(egCtx, func(tableName doltdb.TableName, table *doltdb.Table, sch schema.Schema) (bool, error) {
return r.IterTables(ctx, func(tableName doltdb.TableName, table *doltdb.Table, sch schema.Schema) (bool, error) {
if !schema.HasAutoIncrement(sch) {
return false, nil
}
seq, iErr := table.GetAutoIncrementValue(egCtx)
if iErr != nil {
return true, iErr
seq, err := table.GetAutoIncrementValue(ctx)
if err != nil {
return true, err
}
tableNameStr := tableName.ToLower().Name
@@ -519,7 +551,7 @@ func (a *AutoIncrementTracker) initWithRoots(ctx context.Context, roots ...doltd
})
}
return eg.Wait()
a.initErr = eg.Wait()
}
// incrementAutoIncVal determines whether to increment the auto-increment value.

View File

@@ -100,7 +100,7 @@ func NewGlobalStateStoreForDb(ctx context.Context, dbName string, db *doltdb.Dol
}
type GlobalStateImpl struct {
aiTracker globalstate.AutoIncrementTracker
aiTracker *AutoIncrementTracker
mu *sync.Mutex
}
@@ -109,3 +109,7 @@ var _ globalstate.GlobalState = GlobalStateImpl{}
func (g GlobalStateImpl) AutoIncrementTracker(ctx *sql.Context) (globalstate.AutoIncrementTracker, error) {
return g.aiTracker, nil
}
func (g GlobalStateImpl) Close() {
g.aiTracker.Close()
}

View File

@@ -138,4 +138,12 @@ type SqlDatabase interface {
DoltDatabases() []*doltdb.DoltDB
// Schema returns the schema of the database.
Schema() string
// Clean up any global resources associated with the
// SqlDatabase itself. For DoltDatabases, this notably does
// not close the DoltDB, for example, but should shut down
// background threads not managed through
// sql.BackgroundThreads but which could be accessing or
// mutating database state.
Close()
}

View File

@@ -15,8 +15,6 @@
package dtables
import (
"fmt"
"github.com/dolthub/go-mysql-server/sql"
sqlTypes "github.com/dolthub/go-mysql-server/sql/types"
"github.com/dolthub/vitess/go/sqltypes"
@@ -198,82 +196,13 @@ func getDoltDocsTableName() doltdb.TableName {
// StatementBegin is called before the first operation of a statement. Integrators should mark the state of the data
// in some way that it may be returned to in the case of an error.
func (iw *docsWriter) StatementBegin(ctx *sql.Context) {
dbName := ctx.GetCurrentDatabase()
dSess := dsess.DSessFromSess(ctx.Session)
// TODO: this needs to use a revision qualified name
roots, _ := dSess.GetRoots(ctx, dbName)
dbState, ok, err := dSess.LookupDbState(ctx, dbName)
name := getDoltDocsTableName()
prevHash, tableWriter, err := createWriteableSystemTable(ctx, name, iw.it.Schema())
if err != nil {
iw.errDuringStatementBegin = err
return
}
if !ok {
iw.errDuringStatementBegin = fmt.Errorf("no root value found in session")
return
}
prevHash, err := roots.Working.HashOf()
if err != nil {
iw.errDuringStatementBegin = err
return
}
iw.prevHash = &prevHash
docsTableName := getDoltDocsTableName()
found, err := roots.Working.HasTable(ctx, docsTableName)
if err != nil {
iw.errDuringStatementBegin = err
return
}
if !found {
// TODO: This is effectively a duplicate of the schema declaration above in a different format.
// We should find a way to not repeat ourselves.
newSchema := doltdb.DocsSchema
// underlying table doesn't exist. Record this, then create the table.
newRootValue, err := doltdb.CreateEmptyTable(ctx, roots.Working, docsTableName, newSchema)
if err != nil {
iw.errDuringStatementBegin = err
return
}
if dbState.WorkingSet() == nil {
iw.errDuringStatementBegin = doltdb.ErrOperationNotSupportedInDetachedHead
return
}
// We use WriteSession.SetWorkingSet instead of DoltSession.SetWorkingRoot because we want to avoid modifying the root
// until the end of the transaction, but we still want the WriteSession to be able to find the newly
// created table.
if ws := dbState.WriteSession(); ws != nil {
err = ws.SetWorkingSet(ctx, dbState.WorkingSet().WithWorkingRoot(newRootValue))
if err != nil {
iw.errDuringStatementBegin = err
return
}
}
err = dSess.SetWorkingRoot(ctx, dbName, newRootValue)
if err != nil {
iw.errDuringStatementBegin = err
return
}
}
if ws := dbState.WriteSession(); ws != nil {
tableWriter, err := ws.GetTableWriter(ctx, docsTableName, dbName, dSess.SetWorkingRoot, false)
if err != nil {
iw.errDuringStatementBegin = err
return
}
iw.tableWriter = tableWriter
tableWriter.StatementBegin(ctx)
}
iw.prevHash = prevHash
iw.tableWriter = tableWriter
}
// DiscardChanges is called if a statement encounters an error, and all current changes since the statement beginning

View File

@@ -189,80 +189,14 @@ func (iw *ignoreWriter) Delete(ctx *sql.Context, r sql.Row) error {
// StatementBegin is called before the first operation of a statement. Integrators should mark the state of the data
// in some way that it may be returned to in the case of an error.
func (iw *ignoreWriter) StatementBegin(ctx *sql.Context) {
dbName := ctx.GetCurrentDatabase()
dSess := dsess.DSessFromSess(ctx.Session)
// TODO: this needs to use a revision qualified name
roots, _ := dSess.GetRoots(ctx, dbName)
dbState, ok, err := dSess.LookupDbState(ctx, dbName)
name := doltdb.TableName{Name: doltdb.IgnoreTableName, Schema: iw.it.schemaName}
prevHash, tableWriter, err := createWriteableSystemTable(ctx, name, iw.it.Schema())
if err != nil {
iw.errDuringStatementBegin = err
return
}
if !ok {
iw.errDuringStatementBegin = fmt.Errorf("no root value found in session")
return
}
prevHash, err := roots.Working.HashOf()
if err != nil {
iw.errDuringStatementBegin = err
return
}
iw.prevHash = &prevHash
tname := doltdb.TableName{Name: doltdb.IgnoreTableName, Schema: iw.it.schemaName}
found, err := roots.Working.HasTable(ctx, tname)
if err != nil {
iw.errDuringStatementBegin = err
return
}
if !found {
sch := sql.NewPrimaryKeySchema(iw.it.Schema())
doltSch, err := sqlutil.ToDoltSchema(ctx, roots.Working, tname, sch, roots.Head, sql.Collation_Default)
if err != nil {
iw.errDuringStatementBegin = err
return
}
// underlying table doesn't exist. Record this, then create the table.
newRootValue, err := doltdb.CreateEmptyTable(ctx, roots.Working, tname, doltSch)
if err != nil {
iw.errDuringStatementBegin = err
return
}
if dbState.WorkingSet() == nil {
iw.errDuringStatementBegin = doltdb.ErrOperationNotSupportedInDetachedHead
return
}
// We use WriteSession.SetWorkingSet instead of DoltSession.SetWorkingRoot because we want to avoid modifying the root
// until the end of the transaction, but we still want the WriteSession to be able to find the newly
// created table.
if ws := dbState.WriteSession(); ws != nil {
err = ws.SetWorkingSet(ctx, dbState.WorkingSet().WithWorkingRoot(newRootValue))
if err != nil {
iw.errDuringStatementBegin = err
return
}
}
dSess.SetWorkingRoot(ctx, dbName, newRootValue)
}
if ws := dbState.WriteSession(); ws != nil {
tableWriter, err := ws.GetTableWriter(ctx, tname, dbName, dSess.SetWorkingRoot, false)
if err != nil {
iw.errDuringStatementBegin = err
return
}
iw.tableWriter = tableWriter
tableWriter.StatementBegin(ctx)
}
iw.prevHash = prevHash
iw.tableWriter = tableWriter
}
// DiscardChanges is called if a statement encounters an error, and all current changes since the statement beginning
@@ -290,3 +224,74 @@ func (iw ignoreWriter) Close(ctx *sql.Context) error {
}
return nil
}
// CreateWriteableSystemTable is a helper function that creates a writeable system table (dolt_ignore, dolt_docs...) if it does not exist
// Then returns the hash of the previous working root, and a TableWriter.
func createWriteableSystemTable(ctx *sql.Context, tblName doltdb.TableName, tblSchema sql.Schema) (*hash.Hash, dsess.TableWriter, error) {
dbName := ctx.GetCurrentDatabase()
dSess := dsess.DSessFromSess(ctx.Session)
roots, _ := dSess.GetRoots(ctx, dbName)
dbState, ok, err := dSess.LookupDbState(ctx, dbName)
if err != nil {
return nil, nil, err
}
if !ok {
return nil, nil, fmt.Errorf("no root value found in session")
}
prevHash, err := roots.Working.HashOf()
if err != nil {
return nil, nil, err
}
found, err := roots.Working.HasTable(ctx, tblName)
if err != nil {
return nil, nil, err
}
if !found {
sch := sql.NewPrimaryKeySchema(tblSchema)
doltSch, err := sqlutil.ToDoltSchema(ctx, roots.Working, tblName, sch, roots.Head, sql.Collation_Default)
if err != nil {
return nil, nil, err
}
// underlying table doesn't exist. Record this, then create the table.
newRootValue, err := doltdb.CreateEmptyTable(ctx, roots.Working, tblName, doltSch)
if err != nil {
return nil, nil, err
}
if dbState.WorkingSet() == nil {
return nil, nil, doltdb.ErrOperationNotSupportedInDetachedHead
}
// We use WriteSession.SetWorkingSet instead of DoltSession.SetWorkingRoot because we want to avoid modifying the root
// until the end of the transaction, but we still want the WriteSession to be able to find the newly
// created table.
if ws := dbState.WriteSession(); ws != nil {
err = ws.SetWorkingSet(ctx, dbState.WorkingSet().WithWorkingRoot(newRootValue))
if err != nil {
return nil, nil, err
}
} else {
return nil, nil, fmt.Errorf("could not create dolt_ignore table, database does not allow writing")
}
}
var tableWriter dsess.TableWriter
if ws := dbState.WriteSession(); ws != nil {
tableWriter, err = ws.GetTableWriter(ctx, tblName, dbName, dSess.SetWorkingRoot, false)
if err != nil {
return nil, nil, err
}
tableWriter.StatementBegin(ctx)
} else {
return nil, nil, fmt.Errorf("could not create dolt_ignore table, database does not allow writing")
}
return &prevHash, tableWriter, nil
}

View File

@@ -15,396 +15,189 @@
package dtables
import (
"context"
"io"
"github.com/google/uuid"
"gopkg.in/src-d/go-errors.v1"
"github.com/dolthub/go-mysql-server/sql"
sqlTypes "github.com/dolthub/go-mysql-server/sql/types"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/val"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/resolve"
"github.com/dolthub/dolt/go/store/hash"
)
var queryCatalogCols = schema.NewColCollection(
// QueryCatalogIdCol is the name of the primary key column of the query catalog table
schema.NewColumn(doltdb.QueryCatalogIdCol, schema.QueryCatalogIdTag, types.StringKind, true, schema.NotNullConstraint{}),
// QueryCatalogOrderCol is the column containing the order of the queries in the catalog
schema.NewColumn(doltdb.QueryCatalogOrderCol, schema.QueryCatalogOrderTag, types.UintKind, false, schema.NotNullConstraint{}),
// QueryCatalogNameCol is the name of the column containing the name of a query in the catalog
// TODO: parser won't handle a reserved word here, but it should. Only an issue for create table statements.
schema.NewColumn(doltdb.QueryCatalogNameCol, schema.QueryCatalogNameTag, types.StringKind, false),
// QueryCatalogQueryCol is the name of the column containing the query of a catalog entry
schema.NewColumn(doltdb.QueryCatalogQueryCol, schema.QueryCatalogQueryTag, types.StringKind, false),
// QueryCatalogDescriptionCol is the name of the column containing the description of a query in the catalog
schema.NewColumn(doltdb.QueryCatalogDescriptionCol, schema.QueryCatalogDescriptionTag, types.StringKind, false),
)
var _ sql.Table = (*QueryCatalogTable)(nil)
var _ sql.UpdatableTable = (*QueryCatalogTable)(nil)
var _ sql.DeletableTable = (*QueryCatalogTable)(nil)
var _ sql.InsertableTable = (*QueryCatalogTable)(nil)
var _ sql.ReplaceableTable = (*QueryCatalogTable)(nil)
var ErrQueryNotFound = errors.NewKind("Query '%s' not found")
type SavedQuery struct {
ID string
Name string
Query string
Description string
Order uint64
// QueryCatalogTable is the system table that stores saved queries.
type QueryCatalogTable struct {
backingTable VersionableTable
}
func savedQueryFromKVProlly(id string, value val.Tuple) (SavedQuery, error) {
orderVal, ok := catalogVd.GetUint64(0, value)
if !ok {
orderVal = 0
}
nameVal, ok := catalogVd.GetString(1, value)
if !ok {
nameVal = ""
}
queryVal, ok := catalogVd.GetString(2, value)
if !ok {
nameVal = ""
}
descVal, ok := catalogVd.GetString(3, value)
if !ok {
descVal = ""
}
return SavedQuery{
ID: id,
Name: nameVal,
Query: queryVal,
Description: descVal,
Order: orderVal,
}, nil
func (i *QueryCatalogTable) Name() string {
return doltdb.DoltQueryCatalogTableName
}
func savedQueryFromKVNoms(id string, valTuple types.Tuple) (SavedQuery, error) {
tv, err := row.ParseTaggedValues(valTuple)
if err != nil {
return SavedQuery{}, err
}
nameVal := tv.GetWithDefault(schema.QueryCatalogNameTag, types.String(""))
queryVal := tv.GetWithDefault(schema.QueryCatalogQueryTag, types.String(""))
descVal := tv.GetWithDefault(schema.QueryCatalogDescriptionTag, types.String(""))
orderVal := tv.GetWithDefault(schema.QueryCatalogOrderTag, types.Uint(0))
return SavedQuery{
ID: id,
Name: string(nameVal.(types.String)),
Query: string(queryVal.(types.String)),
Description: string(descVal.(types.String)),
Order: uint64(orderVal.(types.Uint)),
}, nil
func (i *QueryCatalogTable) String() string {
return doltdb.DoltQueryCatalogTableName
}
func (sq SavedQuery) asRow(nbf *types.NomsBinFormat) (row.Row, error) {
taggedVals := make(row.TaggedValues)
taggedVals[schema.QueryCatalogIdTag] = types.String(sq.ID)
taggedVals[schema.QueryCatalogOrderTag] = types.Uint(sq.Order)
taggedVals[schema.QueryCatalogNameTag] = types.String(sq.Name)
taggedVals[schema.QueryCatalogQueryTag] = types.String(sq.Query)
taggedVals[schema.QueryCatalogDescriptionTag] = types.String(sq.Description)
return row.New(nbf, DoltQueryCatalogSchema, taggedVals)
}
var DoltQueryCatalogSchema = schema.MustSchemaFromCols(queryCatalogCols)
// system tables do not contain addressable columns, and do not require nodestore access.
var catalogKd = DoltQueryCatalogSchema.GetKeyDescriptor(nil)
var catalogVd = DoltQueryCatalogSchema.GetValueDescriptor(nil)
// Creates the query catalog table if it doesn't exist.
func createQueryCatalogIfNotExists(ctx context.Context, root doltdb.RootValue) (doltdb.RootValue, error) {
_, ok, err := root.GetTable(ctx, doltdb.TableName{Name: doltdb.DoltQueryCatalogTableName})
if err != nil {
return nil, err
}
if !ok {
return doltdb.CreateEmptyTable(ctx, root, doltdb.TableName{Name: doltdb.DoltQueryCatalogTableName}, DoltQueryCatalogSchema)
}
return root, nil
}
// NewQueryCatalogEntryWithRandID saves a new entry in the query catalog table and returns the new root value. An ID will be
// chosen automatically.
func NewQueryCatalogEntryWithRandID(ctx context.Context, root doltdb.RootValue, name, query, description string) (SavedQuery, doltdb.RootValue, error) {
uid, err := uuid.NewRandom()
if err != nil {
return SavedQuery{}, nil, err
}
// Use the last 12 hex digits of the uuid for the ID.
uidStr := uid.String()
id := uidStr[len(uidStr)-12:]
return newQueryCatalogEntry(ctx, root, id, name, query, description)
}
// NewQueryCatalogEntryWithNameAsID saves an entry in the query catalog table and returns the new root value. If an
// entry with the given name is already present, it will be overwritten.
func NewQueryCatalogEntryWithNameAsID(ctx context.Context, root doltdb.RootValue, name, query, description string) (SavedQuery, doltdb.RootValue, error) {
return newQueryCatalogEntry(ctx, root, name, name, query, description)
}
func newQueryCatalogEntry(ctx context.Context, root doltdb.RootValue, id, name, query, description string) (SavedQuery, doltdb.RootValue, error) {
root, err := createQueryCatalogIfNotExists(ctx, root)
if err != nil {
return SavedQuery{}, nil, err
}
tbl, _, err := root.GetTable(ctx, doltdb.TableName{Name: doltdb.DoltQueryCatalogTableName})
if err != nil {
return SavedQuery{}, nil, err
}
var sq SavedQuery
var newTable *doltdb.Table
if types.IsFormat_DOLT(tbl.Format()) {
sq, newTable, err = newQueryCatalogEntryProlly(ctx, tbl, id, name, query, description)
} else {
sq, newTable, err = newQueryCatalogEntryNoms(ctx, tbl, id, name, query, description)
}
if err != nil {
return SavedQuery{}, nil, err
}
root, err = root.PutTable(ctx, doltdb.TableName{Name: doltdb.DoltQueryCatalogTableName}, newTable)
if err != nil {
return SavedQuery{}, nil, err
}
return sq, root, err
}
func newQueryCatalogEntryNoms(ctx context.Context, tbl *doltdb.Table, id, name, query, description string) (SavedQuery, *doltdb.Table, error) {
data, err := tbl.GetNomsRowData(ctx)
if err != nil {
return SavedQuery{}, nil, err
}
order := getMaxQueryOrderNoms(data, ctx) + 1
existingSQ, err := retrieveFromQueryCatalogNoms(ctx, tbl, id)
if err != nil {
if !ErrQueryNotFound.Is(err) {
return SavedQuery{}, nil, err
}
} else {
order = existingSQ.Order
}
sq := SavedQuery{
ID: id,
Name: name,
Query: query,
Description: description,
Order: order,
}
r, err := sq.asRow(tbl.Format())
if err != nil {
return SavedQuery{}, nil, err
}
me := data.Edit()
me.Set(r.NomsMapKey(DoltQueryCatalogSchema), r.NomsMapValue(DoltQueryCatalogSchema))
updatedTable, err := me.Map(ctx)
if err != nil {
return SavedQuery{}, nil, err
}
newTable, err := tbl.UpdateNomsRows(ctx, updatedTable)
if err != nil {
return SavedQuery{}, nil, err
}
return sq, newTable, nil
}
func newQueryCatalogEntryProlly(ctx context.Context, tbl *doltdb.Table, id, name, query, description string) (SavedQuery, *doltdb.Table, error) {
idx, err := tbl.GetRowData(ctx)
if err != nil {
return SavedQuery{}, nil, err
}
m, err := durable.ProllyMapFromIndex(idx)
if err != nil {
return SavedQuery{}, nil, err
}
existingSQ, err := retrieveFromQueryCatalogProlly(ctx, tbl, id)
if err != nil && !ErrQueryNotFound.Is(err) {
return SavedQuery{}, nil, err
}
var order uint64
if ErrQueryNotFound.Is(err) {
order, err = getMaxQueryOrderProlly(ctx, m)
if err != nil {
return SavedQuery{}, nil, err
}
order++
} else {
order = existingSQ.Order
}
kb := val.NewTupleBuilder(catalogKd, m.NodeStore())
vb := val.NewTupleBuilder(catalogVd, m.NodeStore())
kb.PutString(0, id)
k, err := kb.Build(m.Pool())
if err != nil {
return SavedQuery{}, nil, err
}
vb.PutUint64(0, order)
vb.PutString(1, name)
vb.PutString(2, query)
vb.PutString(3, description)
v, err := vb.Build(m.Pool())
if err != nil {
return SavedQuery{}, nil, err
}
mut := m.Mutate()
err = mut.Put(ctx, k, v)
if err != nil {
return SavedQuery{}, nil, err
}
m, err = mut.Map(ctx)
if err != nil {
return SavedQuery{}, nil, err
}
idx = durable.IndexFromProllyMap(m)
tbl, err = tbl.UpdateRows(ctx, idx)
if err != nil {
return SavedQuery{}, nil, err
}
return SavedQuery{
ID: id,
Name: name,
Query: query,
Description: description,
Order: order,
}, tbl, nil
}
func RetrieveFromQueryCatalog(ctx context.Context, root doltdb.RootValue, id string) (SavedQuery, error) {
tbl, ok, err := root.GetTable(ctx, doltdb.TableName{Name: doltdb.DoltQueryCatalogTableName})
if err != nil {
return SavedQuery{}, err
} else if !ok {
return SavedQuery{}, doltdb.ErrTableNotFound
}
if types.IsFormat_DOLT(tbl.Format()) {
return retrieveFromQueryCatalogProlly(ctx, tbl, id)
}
return retrieveFromQueryCatalogNoms(ctx, tbl, id)
}
func retrieveFromQueryCatalogProlly(ctx context.Context, tbl *doltdb.Table, id string) (SavedQuery, error) {
idx, err := tbl.GetRowData(ctx)
if err != nil {
return SavedQuery{}, err
}
m, err := durable.ProllyMapFromIndex(idx)
if err != nil {
return SavedQuery{}, err
}
kb := val.NewTupleBuilder(catalogKd, m.NodeStore())
kb.PutString(0, id)
k, err := kb.Build(m.Pool())
if err != nil {
return SavedQuery{}, err
}
var value val.Tuple
_ = m.Get(ctx, k, func(_, v val.Tuple) error {
value = v
return nil
})
if value == nil {
return SavedQuery{}, ErrQueryNotFound.New(id)
}
return savedQueryFromKVProlly(id, value)
}
func retrieveFromQueryCatalogNoms(ctx context.Context, tbl *doltdb.Table, id string) (SavedQuery, error) {
m, err := tbl.GetNomsRowData(ctx)
if err != nil {
return SavedQuery{}, err
}
k, err := types.NewTuple(tbl.Format(), types.Uint(schema.QueryCatalogIdTag), types.String(id))
if err != nil {
return SavedQuery{}, err
}
val, ok, err := m.MaybeGet(ctx, k)
if err != nil {
return SavedQuery{}, err
} else if !ok {
return SavedQuery{}, ErrQueryNotFound.New(id)
}
return savedQueryFromKVNoms(id, val.(types.Tuple))
}
// Returns the largest order entry in the catalog
func getMaxQueryOrderNoms(data types.Map, ctx context.Context) uint64 {
maxOrder := uint64(0)
data.IterAll(ctx, func(key, value types.Value) error {
r, _ := row.FromNoms(DoltQueryCatalogSchema, key.(types.Tuple), value.(types.Tuple))
orderVal, ok := r.GetColVal(schema.QueryCatalogOrderTag)
if ok {
order := uint64(orderVal.(types.Uint))
if order > maxOrder {
maxOrder = order
}
}
return nil
})
return maxOrder
}
func getMaxQueryOrderProlly(ctx context.Context, data prolly.Map) (uint64, error) {
itr, err := data.IterAll(ctx)
if err != nil {
return 0, err
}
maxOrder := uint64(0)
for {
_, v, err := itr.Next(ctx)
if err != nil && err != io.EOF {
return 0, err
}
if err == io.EOF {
return maxOrder, nil
}
order, ok := catalogVd.GetUint64(0, v)
if ok {
if order > maxOrder {
maxOrder = order
}
}
func doltQueryCatalogSchema() sql.Schema {
return []*sql.Column{
{Name: doltdb.QueryCatalogIdCol, Type: sqlTypes.LongText, Source: doltdb.GetQueryCatalogTableName(), PrimaryKey: true},
{Name: doltdb.QueryCatalogOrderCol, Type: sqlTypes.Int32, Source: doltdb.GetQueryCatalogTableName(), Nullable: false},
{Name: doltdb.QueryCatalogNameCol, Type: sqlTypes.Text, Source: doltdb.GetQueryCatalogTableName(), Nullable: false},
{Name: doltdb.QueryCatalogQueryCol, Type: sqlTypes.Text, Source: doltdb.GetQueryCatalogTableName(), Nullable: false},
{Name: doltdb.QueryCatalogDescriptionCol, Type: sqlTypes.Text, Source: doltdb.GetQueryCatalogTableName()},
}
}
var GetDoltQueryCatalogSchema = doltQueryCatalogSchema
func (i *QueryCatalogTable) Schema() sql.Schema {
return GetDoltQueryCatalogSchema()
}
func (i *QueryCatalogTable) Collation() sql.CollationID {
return sql.Collation_Default
}
func (i *QueryCatalogTable) Partitions(context *sql.Context) (sql.PartitionIter, error) {
if i.backingTable == nil {
// no backing table; return an empty iter.
return index.SinglePartitionIterFromNomsMap(nil), nil
}
return i.backingTable.Partitions(context)
}
func (i *QueryCatalogTable) PartitionRows(context *sql.Context, partition sql.Partition) (sql.RowIter, error) {
if i.backingTable == nil {
// no backing table; return an empty iter.
return sql.RowsToRowIter(), nil
}
return i.backingTable.PartitionRows(context, partition)
}
// NewQueryCatalogTable creates a QueryCatalogTable
func NewQueryCatalogTable(_ *sql.Context, backingTable VersionableTable) sql.Table {
return &QueryCatalogTable{backingTable: backingTable}
}
// NewEmptyQueryCatalogTable creates an QueryCatalogTable
func NewEmptyQueryCatalogTable(_ *sql.Context) sql.Table {
return &QueryCatalogTable{}
}
func (qt *QueryCatalogTable) Replacer(_ *sql.Context) sql.RowReplacer {
return newQueryCatalogWriter(qt)
}
// Updater returns a RowUpdater for this table. The RowUpdater will have Update called once for each row to be
// updated, followed by a call to Close() when all rows have been processed.
func (qt *QueryCatalogTable) Updater(_ *sql.Context) sql.RowUpdater {
return newQueryCatalogWriter(qt)
}
// Inserter returns an Inserter for this table. The Inserter will get one call to Insert() for each row to be
// inserted, and will end with a call to Close() to finalize the insert operation.
func (qt *QueryCatalogTable) Inserter(*sql.Context) sql.RowInserter {
return newQueryCatalogWriter(qt)
}
// Deleter returns a RowDeleter for this table. The RowDeleter will get one call to Delete for each row to be deleted,
// and will end with a call to Close() to finalize the delete operation.
func (qt *QueryCatalogTable) Deleter(*sql.Context) sql.RowDeleter {
return newQueryCatalogWriter(qt)
}
var _ sql.RowReplacer = (*queryCatalogWriter)(nil)
var _ sql.RowUpdater = (*queryCatalogWriter)(nil)
var _ sql.RowInserter = (*queryCatalogWriter)(nil)
var _ sql.RowDeleter = (*queryCatalogWriter)(nil)
type queryCatalogWriter struct {
qt *QueryCatalogTable
errDuringStatementBegin error
prevHash *hash.Hash
tableWriter dsess.TableWriter
}
func newQueryCatalogWriter(qt *QueryCatalogTable) *queryCatalogWriter {
return &queryCatalogWriter{qt, nil, nil, nil}
}
// Insert inserts the row given, returning an error if it cannot. Insert will be called once for each row to process
// for the insert operation, which may involve many rows. After all rows in an operation have been processed, Close
// is called.
func (qw *queryCatalogWriter) Insert(ctx *sql.Context, r sql.Row) error {
if err := qw.errDuringStatementBegin; err != nil {
return err
}
return qw.tableWriter.Insert(ctx, r)
}
// Update the given row. Provides both the old and new rows.
func (qw *queryCatalogWriter) Update(ctx *sql.Context, old sql.Row, new sql.Row) error {
if err := qw.errDuringStatementBegin; err != nil {
return err
}
return qw.tableWriter.Update(ctx, old, new)
}
// Delete deletes the given row. Returns ErrDeleteRowNotFound if the row was not found. Delete will be called once for
// each row to process for the delete operation, which may involve many rows. After all rows have been processed,
// Close is called.
func (qw *queryCatalogWriter) Delete(ctx *sql.Context, r sql.Row) error {
if err := qw.errDuringStatementBegin; err != nil {
return err
}
return qw.tableWriter.Delete(ctx, r)
}
// StatementBegin is called before the first operation of a statement. Integrators should mark the state of the data
// in some way that it may be returned to in the case of an error.
func (qw *queryCatalogWriter) StatementBegin(ctx *sql.Context) {
name := getDoltQueryCatalogTableName()
prevHash, tableWriter, err := createWriteableSystemTable(ctx, name, qw.qt.Schema())
if err != nil {
qw.errDuringStatementBegin = err
return
}
qw.prevHash = prevHash
qw.tableWriter = tableWriter
}
func getDoltQueryCatalogTableName() doltdb.TableName {
if resolve.UseSearchPath {
return doltdb.TableName{Schema: doltdb.DoltNamespace, Name: doltdb.GetQueryCatalogTableName()}
}
return doltdb.TableName{Name: doltdb.GetQueryCatalogTableName()}
}
// DiscardChanges is called if a statement encounters an error, and all current changes since the statement beginning
// should be discarded.
func (qw *queryCatalogWriter) DiscardChanges(ctx *sql.Context, errorEncountered error) error {
if qw.tableWriter != nil {
return qw.tableWriter.DiscardChanges(ctx, errorEncountered)
}
return nil
}
// StatementComplete is called after the last operation of the statement, indicating that it has successfully completed.
// The mark set in StatementBegin may be removed, and a new one should be created on the next StatementBegin.
func (qw *queryCatalogWriter) StatementComplete(ctx *sql.Context) error {
if qw.tableWriter != nil {
return qw.tableWriter.StatementComplete(ctx)
}
return nil
}
// Close finalizes the delete operation, persisting the result.
func (qw *queryCatalogWriter) Close(ctx *sql.Context) error {
if qw.tableWriter != nil {
return qw.tableWriter.Close(ctx)
}
return nil
}

View File

@@ -1,108 +0,0 @@
// Copyright 2020 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dtables_test
import (
"context"
"testing"
"github.com/dolthub/go-mysql-server/sql"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/dtestutils"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dtables"
)
func TestInsertIntoQueryCatalogTable(t *testing.T) {
ctx := context.Background()
dEnv := dtestutils.CreateTestEnv()
defer dEnv.DoltDB(ctx).Close()
root, _ := dEnv.WorkingRoot(ctx)
_, ok, err := root.GetTable(ctx, doltdb.TableName{Name: doltdb.DoltQueryCatalogTableName})
require.NoError(t, err)
require.False(t, ok)
queryStr := "select 1 from dual"
sq, root, err := dtables.NewQueryCatalogEntryWithRandID(ctx, root, "name", queryStr, "description")
require.NoError(t, err)
require.True(t, sq.ID != "")
assert.Equal(t, queryStr, sq.Query)
assert.Equal(t, "name", sq.Name)
assert.Equal(t, "description", sq.Description)
retrieved, err := dtables.RetrieveFromQueryCatalog(ctx, root, sq.ID)
require.NoError(t, err)
assert.Equal(t, sq, retrieved)
_, ok, err = root.GetTable(ctx, doltdb.TableName{Name: doltdb.DoltQueryCatalogTableName})
require.NoError(t, err)
require.True(t, ok)
err = dEnv.UpdateWorkingRoot(ctx, root)
require.NoError(t, err)
rows, err := sqle.ExecuteSelect(ctx, dEnv, root, "select display_order, query, name, description from "+doltdb.DoltQueryCatalogTableName)
require.NoError(t, err)
expectedRows := []sql.Row{
{uint64(1), "select 1 from dual", "name", "description"},
}
assert.Equal(t, expectedRows, rows)
queryStr2 := "select 2 from dual"
sq2, root, err := dtables.NewQueryCatalogEntryWithNameAsID(ctx, root, "name2", queryStr2, "description2")
require.NoError(t, err)
assert.Equal(t, "name2", sq2.ID)
assert.Equal(t, "name2", sq2.Name)
assert.Equal(t, queryStr2, sq2.Query)
assert.Equal(t, "description2", sq2.Description)
retrieved2, err := dtables.RetrieveFromQueryCatalog(ctx, root, sq2.ID)
require.NoError(t, err)
assert.Equal(t, sq2, retrieved2)
err = dEnv.UpdateWorkingRoot(ctx, root)
require.NoError(t, err)
rows, err = sqle.ExecuteSelect(ctx, dEnv, root, "select display_order, query, name, description from "+doltdb.DoltQueryCatalogTableName+" order by display_order")
require.NoError(t, err)
expectedRows = []sql.Row{
{uint64(1), "select 1 from dual", "name", "description"},
{uint64(2), "select 2 from dual", "name2", "description2"},
}
assert.Equal(t, expectedRows, rows)
rows, err = sqle.ExecuteSelect(ctx, dEnv, root, "select id from "+doltdb.DoltQueryCatalogTableName)
require.NoError(t, err)
for _, r := range rows {
assert.NotEmpty(t, r)
assert.NotEmpty(t, r[0])
}
queryStr3 := "select 3 from dual"
sq3, root, err := dtables.NewQueryCatalogEntryWithNameAsID(ctx, root, "name2", queryStr3, "description3")
require.NoError(t, err)
assert.Equal(t, "name2", sq3.ID)
assert.Equal(t, "name2", sq3.Name)
assert.Equal(t, queryStr3, sq3.Query)
assert.Equal(t, "description3", sq3.Description)
assert.Equal(t, sq2.Order, sq3.Order)
}

View File

@@ -2147,3 +2147,9 @@ func TestTimeQueries(t *testing.T) {
defer harness.Close()
enginetest.TestTimeQueries(t, harness)
}
func TestDoltQueryCatalogSystemTable(t *testing.T) {
harness := newDoltEnginetestHarness(t)
defer harness.Close()
RunDoltQueryCatalogTests(t, harness)
}

View File

@@ -2054,3 +2054,15 @@ func RunDoltRmTests(t *testing.T, h DoltEnginetestHarness) {
enginetest.TestScriptWithEngine(t, readOnlyEngine, h, script)
}
}
func RunDoltQueryCatalogTests(t *testing.T, harness DoltEnginetestHarness) {
dtables.DoltCommand = doltcmd.DoltCommand
for _, script := range DoltQueryCatalogScripts {
t.Run(script.Name, func(t *testing.T) {
harness = harness.NewHarness(t)
defer harness.Close()
enginetest.TestScript(t, harness, script)
})
}
}

View File

@@ -0,0 +1,118 @@
// Copyright 2025 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package enginetest
import (
"github.com/dolthub/go-mysql-server/enginetest/queries"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/types"
)
var DoltQueryCatalogScripts = []queries.ScriptTest{
{
Name: "can insert into dolt query catalog",
Assertions: []queries.ScriptTestAssertion{
{
Query: "insert into dolt_query_catalog values ('show', 1, 'show', 'show tables;', '')",
Expected: []sql.Row{
{types.OkResult{RowsAffected: 1}},
},
},
},
},
{
Name: "can drop dolt query catalog, cannot drop twice",
SetUpScript: []string{
"INSERT INTO dolt_query_catalog VALUES ('show', 1, 'show', 'show tables;', '')",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "drop table dolt_query_catalog",
Expected: []sql.Row{{types.NewOkResult(0)}},
},
{
Query: "drop table dolt_query_catalog",
ExpectedErrStr: "table not found: dolt_query_catalog",
},
},
},
{
Name: "can run delete from on dolt query catalog",
Assertions: []queries.ScriptTestAssertion{
{
Query: "DELETE FROM dolt_query_catalog",
Expected: []sql.Row{
{types.OkResult{RowsAffected: 0}},
},
},
},
},
{
Name: "select from dolt_query_catalog",
SetUpScript: []string{
"INSERT INTO dolt_query_catalog VALUES ('show', 1, 'show', 'show tables;', 'my message')",
"INSERT INTO dolt_query_catalog VALUES ('get commits', 2, 'get commits', 'select * from dolt_commits;', '')",
"INSERT INTO dolt_query_catalog VALUES ('get branches', 3, 'get branches', 'select * from dolt_branches;', '')",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT * FROM dolt_query_catalog",
Expected: []sql.Row{
{"show", 1, "show", "show tables;", "my message"},
{"get commits", 2, "get commits", "select * from dolt_commits;", ""},
{"get branches", 3, "get branches", "select * from dolt_branches;", ""},
},
},
{
Query: "SELECT * FROM dolt_query_catalog where display_order = 2",
Expected: []sql.Row{
{"get commits", 2, "get commits", "select * from dolt_commits;", ""},
},
},
},
},
{
Name: "can replace row in dolt_query_catalog",
SetUpScript: []string{
"INSERT INTO dolt_query_catalog VALUES ('test', 1, 'test', 'show tables;', '')",
"INSERT INTO dolt_query_catalog VALUES ('test2', 2, 'test2', 'select * from dolt_commits;', '')",
"REPLACE INTO dolt_query_catalog VALUES ('test', 1, 'new name', 'describe dolt_query_catalog;', 'a new message')",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT * FROM dolt_query_catalog",
Expected: []sql.Row{
{"test", 1, "new name", "describe dolt_query_catalog;", "a new message"},
{"test2", 2, "test2", "select * from dolt_commits;", ""},
},
},
},
},
{
Name: "can update dolt query catalog",
SetUpScript: []string{
"INSERT INTO dolt_query_catalog VALUES ('show', 1, 'show', 'show tables;', '')",
"UPDATE dolt_query_catalog SET display_order = display_order + 1",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT * FROM dolt_query_catalog",
Expected: []sql.Row{
{"show", 2, "show", "show tables;", ""},
},
},
},
},
}

View File

@@ -863,6 +863,33 @@ var SchemaChangeTestsCollations = []MergeScriptTest{
}
var SchemaChangeTestsConstraints = []MergeScriptTest{
{
// Regression test for a bug where rows weren't being deleted in
// a secondary index because the incorrect/non-matching tuple was
// used to update the index, and foreign key violations were
// incorrectly identified.
Name: "updating fk index when ancestor schema has changed",
AncSetUpScript: []string{
"CREATE TABLE parent(pk int primary key, c1 varchar(100));",
"CREATE TABLE child(pk int primary key, remove_me int, parent_id int, KEY `fk_idx1` (`parent_id`), foreign key fk1 (parent_id) references parent(pk));",
"INSERT INTO parent VALUES (100, 'one hundred'), (200, 'two hundred');",
"INSERT INTO child VALUES (1, -1, 100), (2, -1, 200);",
},
RightSetUpScript: []string{
"DELETE FROM child;",
"DELETE FROM parent;",
"ALTER TABLE child drop column remove_me;",
},
LeftSetUpScript: []string{
"ALTER TABLE child drop column remove_me;",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{doltCommit, 0, 0, "merge successful"}},
},
},
},
{
Name: "removing a not-null constraint",
AncSetUpScript: []string{

View File

@@ -35,7 +35,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dtables"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/writer"
"github.com/dolthub/dolt/go/store/types"
@@ -819,11 +818,10 @@ func TestAlterSystemTables(t *testing.T) {
CreateTestTable(t, dEnv, "dolt_docs", doltdb.DocsSchema,
"INSERT INTO dolt_docs VALUES ('LICENSE.md','A license')")
CreateTestTable(t, dEnv, doltdb.DoltQueryCatalogTableName, dtables.DoltQueryCatalogSchema,
"INSERT INTO dolt_query_catalog VALUES ('abc123', 1, 'example', 'select 2+2 from dual', 'description')")
ExecuteSetupSQL(context.Background(), `
CREATE VIEW name as select 2+2 from dual;
CREATE PROCEDURE simple_proc2() SELECT 1+1;
INSERT INTO dolt_query_catalog VALUES ('test', 1, 'test', 'show tables;', '');
INSERT INTO dolt_ignore VALUES ('test', 1);`)(t, dEnv)
}

View File

@@ -24,7 +24,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dtables"
)
// Set to the name of a single test to run just that test, useful for debugging
@@ -196,15 +195,6 @@ var systemTableDeleteTests = []DeleteTest{
ExpectedRows: []sql.Row{},
ExpectedSchema: CompressSchema(doltdb.DocsSchema),
},
{
Name: "delete dolt_query_catalog",
AdditionalSetup: CreateTableFn(doltdb.DoltQueryCatalogTableName, dtables.DoltQueryCatalogSchema,
"INSERT INTO dolt_query_catalog VALUES ('abc123', 1, 'example', 'create view example as select 2+2 from dual', 'description')"),
DeleteQuery: "delete from dolt_query_catalog",
SelectQuery: "select * from dolt_query_catalog",
ExpectedRows: ToSqlRows(dtables.DoltQueryCatalogSchema),
ExpectedSchema: CompressSchema(dtables.DoltQueryCatalogSchema),
},
{
Name: "delete dolt_schemas",
AdditionalSetup: CreateTableFn(doltdb.SchemasTableName, SchemaTableSchema(),

View File

@@ -25,7 +25,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dtables"
"github.com/dolthub/dolt/go/store/types"
)
@@ -383,18 +382,6 @@ var systemTableInsertTests = []InsertTest{
ExpectedRows: []sql.Row{{"README.md", "Some text"}},
ExpectedSchema: CompressSchema(doltdb.DocsSchema),
},
{
Name: "insert into dolt_query_catalog",
AdditionalSetup: CreateTableFn(doltdb.DoltQueryCatalogTableName, dtables.DoltQueryCatalogSchema,
"INSERT INTO dolt_query_catalog VALUES ('existingEntry', 2, 'example', 'select 2+2 from dual', 'description')"),
InsertQuery: "insert into dolt_query_catalog (id, display_order, name, query, description) values ('abc123', 1, 'example', 'select 1+1 from dual', 'description')",
SelectQuery: "select * from dolt_query_catalog ORDER BY id",
ExpectedRows: ToSqlRows(CompressSchema(dtables.DoltQueryCatalogSchema),
NewRow(types.String("abc123"), types.Uint(1), types.String("example"), types.String("select 1+1 from dual"), types.String("description")),
NewRow(types.String("existingEntry"), types.Uint(2), types.String("example"), types.String("select 2+2 from dual"), types.String("description")),
),
ExpectedSchema: CompressSchema(dtables.DoltQueryCatalogSchema),
},
{
Name: "insert into dolt_schemas",
AdditionalSetup: CreateTableFn(doltdb.SchemasTableName, SchemaTableSchema(), ""),

View File

@@ -25,7 +25,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dtables"
"github.com/dolthub/dolt/go/store/types"
)
@@ -259,17 +258,6 @@ var systemTableReplaceTests = []ReplaceTest{
ExpectedRows: []sql.Row{{"LICENSE.md", "Some text"}},
ExpectedSchema: CompressSchema(doltdb.DocsSchema),
},
{
Name: "replace into dolt_query_catalog",
AdditionalSetup: CreateTableFn(doltdb.DoltQueryCatalogTableName, dtables.DoltQueryCatalogSchema,
"INSERT INTO dolt_query_catalog VALUES ('existingEntry', 1, 'example', 'select 2+2 from dual', 'description')"),
ReplaceQuery: "replace into dolt_query_catalog (id, display_order, name, query, description) values ('existingEntry', 1, 'example', 'select 1+1 from dual', 'description')",
SelectQuery: "select * from dolt_query_catalog",
ExpectedRows: ToSqlRows(CompressSchema(dtables.DoltQueryCatalogSchema),
NewRow(types.String("existingEntry"), types.Uint(1), types.String("example"), types.String("select 1+1 from dual"), types.String("description")),
),
ExpectedSchema: CompressSchema(dtables.DoltQueryCatalogSchema),
},
{
Name: "replace into dolt_schemas",
AdditionalSetup: CreateTableFn(doltdb.SchemasTableName, SchemaTableSchema(),

View File

@@ -32,7 +32,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dtables"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/types"
)
@@ -1299,16 +1298,6 @@ var systemTableSelectTests = []SelectTest{
ExpectedRows: []sql.Row{{"LICENSE.md", "A license"}},
ExpectedSchema: CompressSchema(doltdb.DocsSchema),
},
{
Name: "select from dolt_query_catalog",
AdditionalSetup: CreateTableFn(doltdb.DoltQueryCatalogTableName, dtables.DoltQueryCatalogSchema,
"INSERT INTO dolt_query_catalog VALUES ('existingEntry', 2, 'example', 'select 2+2 from dual', 'description')"),
Query: "select * from dolt_query_catalog",
ExpectedRows: ToSqlRows(CompressSchema(dtables.DoltQueryCatalogSchema),
NewRow(types.String("existingEntry"), types.Uint(2), types.String("example"), types.String("select 2+2 from dual"), types.String("description")),
),
ExpectedSchema: CompressSchema(dtables.DoltQueryCatalogSchema),
},
{
Name: "select from dolt_schemas",
AdditionalSetup: CreateTableFn(doltdb.SchemasTableName, SchemaTableSchema(),

View File

@@ -26,7 +26,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dtables"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/json"
)
@@ -367,15 +366,6 @@ var systemTableUpdateTests = []UpdateTest{
ExpectedRows: []sql.Row{{"LICENSE.md", "Some text"}},
ExpectedSchema: CompressSchema(doltdb.DocsSchema),
},
{
Name: "update dolt_query_catalog",
AdditionalSetup: CreateTableFn(doltdb.DoltQueryCatalogTableName, dtables.DoltQueryCatalogSchema,
"INSERT INTO dolt_query_catalog VALUES ('abc123', 1, 'example', 'select 2+2 from dual', 'description')"),
UpdateQuery: "update dolt_query_catalog set display_order = display_order + 1",
SelectQuery: "select * from dolt_query_catalog",
ExpectedRows: []sql.Row{{"abc123", uint64(2), "example", "select 2+2 from dual", "description"}},
ExpectedSchema: CompressSchema(dtables.DoltQueryCatalogSchema),
},
}
// Tests the given query on a freshly created dataset, asserting that the result has the given schema and rows. If

View File

@@ -47,6 +47,9 @@ func (db *UserSpaceDatabase) Schema() string {
return ""
}
func (db *UserSpaceDatabase) Close() {
}
func (db *UserSpaceDatabase) GetTableInsensitive(ctx *sql.Context, tableName string) (sql.Table, bool, error) {
tname := doltdb.TableName{Name: tableName}
if doltdb.IsReadOnlySystemTable(tname) {

View File

@@ -78,8 +78,7 @@ RUN go install .
WORKDIR /
# install dotnet
RUN curl -LO https://download.visualstudio.microsoft.com/download/pr/13b9d84c-a35b-4ffe-8f62-447a01403d64/1f9ae31daa0f7d98513e7551246899f2/dotnet-sdk-5.0.400-linux-x64.tar.gz && \
tar -C /usr/local/bin -xzf dotnet-sdk-5.0.400-linux-x64.tar.gz && \
RUN curl -sSL https://dot.net/v1/dotnet-install.sh | bash /dev/stdin --version 8.0.412 --install-dir /usr/local/bin --no-path && \
dotnet --version
# install pip for python3.9

View File

@@ -41,7 +41,6 @@ SKIP_SERVER_TESTS=$(cat <<-EOM
~sql-create-tables.bats~
~sql-multi-db.bats~
~blame-system-view.bats~
~query-catalog.bats~
~import-mysqldump.bats~
~1pksupportedtypes.bats~
~init.bats~

View File

@@ -36,109 +36,14 @@ teardown() {
teardown_common
}
@test "query-catalog: save query" {
run dolt sql -q "desc dolt_query_catalog"
[ "$status" -eq 1 ]
run dolt sql -q "select pk,pk1,pk2 from one_pk,two_pk where one_pk.c1=two_pk.c1" -s "my name" -m "my message"
@test "query-catalog: can describe dolt_query_catalog" {
run dolt sql -q "desc dolt_query_catalog" -r csv
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 8 ]
run dolt sql -q "desc dolt_query_catalog"
[ "$status" -eq 0 ]
run dolt sql -q "select * from dolt_query_catalog" -r csv
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 2 ]
[[ "$output" =~ "id,display_order,name,query,description" ]] || false
[[ "$output" =~ "my message" ]] || false
[[ "$output" =~ "my name" ]] || false
[[ "$output" =~ "select pk,pk1,pk2 from one_pk,two_pk where one_pk.c1=two_pk.c1" ]] || false
run dolt status
[ "$status" -eq 0 ]
[[ "$output" =~ "dolt_query_catalog" ]] || false
run dolt add dolt_query_catalog
[ "$status" -eq 0 ]
run dolt commit -m "Added query catalog"
[ "$status" -eq 0 ]
run dolt status
[ "$status" -eq 0 ]
! [[ "$output" =~ "dolt_query_catalog" ]] || false
run dolt sql -q "select * from dolt_query_catalog" -r csv
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 2 ]
[[ "$output" =~ "id" ]] || false
[[ "$output" =~ "query" ]] || false
}
@test "query-catalog: empty directory" {
mkdir empty && cd empty
run dolt sql -q "show databases" --save name
[ "$status" -ne 0 ]
[[ ! "$output" =~ panic ]] || false
[[ "$output" =~ "--save must be used in a dolt database directory" ]] || false
run dolt sql --list-saved
[ "$status" -ne 0 ]
[[ ! "$output" =~ panic ]] || false
[[ "$output" =~ "--list-saved must be used in a dolt database directory" ]] || false
run dolt sql --execute name
[ "$status" -ne 0 ]
[[ ! "$output" =~ panic ]] || false
[[ "$output" =~ "--execute must be used in a dolt database directory" ]] || false
}
@test "query-catalog: conflict" {
dolt sql -q "select pk,pk1,pk2 from one_pk,two_pk where one_pk.c1=two_pk.c1" -s "name1" -m "my message"
dolt add .
dolt commit -m 'Added a test query'
dolt checkout -b edit_a
dolt sql -q "update dolt_query_catalog set name='name_a'"
dolt add .
dolt commit -m 'Changed name to edit_a'
dolt checkout main
dolt checkout -b edit_b
dolt sql -q "update dolt_query_catalog set name='name_b'"
dolt add .
dolt commit -m 'Changed name to edit_b'
dolt checkout main
dolt merge edit_a -m "merge edit_a"
run dolt merge edit_b -m "merge edit_b"
[ "$status" -eq 1 ]
[[ "$output" =~ "Merge conflict in dolt_query_catalog" ]] || false
run dolt conflicts cat .
[ "$status" -eq 0 ]
[[ "$output" =~ "name_a" ]] || false
[[ "$output" =~ "name_b" ]] || false
}
@test "query-catalog: executed saved" {
Q1="select pk, pk1, pk2 from one_pk,two_pk where one_pk.c1=two_pk.c1 order by 1"
Q2="select pk from one_pk order by pk"
dolt sql -q "$Q1" -s name1
dolt sql -q "$Q2" -s name2
# save Q1 and verify output
EXPECTED=$(cat <<'EOF'
pk,pk1,pk2
0,0,0
1,0,1
2,1,0
3,1,1
EOF
)
run dolt sql -r csv -x name1
[ "$status" -eq 0 ]
[[ "$output" =~ "$EXPECTED" ]] || false
# save Q2 and verify output
@test "query-catalog: save query without message" {
EXPECTED=$(cat <<'EOF'
pk
0
@@ -147,14 +52,25 @@ pk
3
EOF
)
run dolt sql -r csv -x name2
run dolt sql -q "select pk from one_pk" -s "select" -r csv
[ "$status" -eq 0 ]
[[ "$output" =~ "$EXPECTED" ]] || false
}
@test "query-catalog: save query with message" {
dolt sql -q "select pk from one_pk" -s "select" -m "my message"
run dolt sql -l -r csv
[ "$status" -eq 0 ]
[[ "$output" =~ "select" ]] || false
[[ "$output" =~ "my message" ]] || false
}
@test "query-catalog: can list saved queries" {
Q1="select pk, pk1, pk2 from one_pk,two_pk where one_pk.c1=two_pk.c1 order by 1"
Q2="select pk from one_pk order by pk"
dolt sql -q "$Q1" -s name1
dolt sql -q "$Q2" -s name2
# execute list-saved and verify output. I have no idea why the
# query on the second line isn't quoted, assuming it's a bash
# interpretation thing. Has quotes when run by hand.
EXPECTED=$(cat <<'EOF'
id,display_order,name,query,description
name1,1,name1,"select pk, pk1, pk2 from one_pk,two_pk where one_pk.c1=two_pk.c1 order by 1",""
@@ -165,33 +81,53 @@ EOF
run dolt sql --list-saved -r csv
[ "$status" -eq 0 ]
[[ "$output" =~ "$EXPECTED" ]] || false
}
# update an existing query, and verify query catalog is updated
Q1_UPDATED="select pk, pk1, pk2 from one_pk,two_pk where one_pk.c1=two_pk.c1 and pk < 3 order by 1 desc"
dolt sql -q "$Q1_UPDATED" -s name1
@test "query-catalog: can execute saved queries" {
Q1="select pk, pk1, pk2 from one_pk,two_pk where one_pk.c1=two_pk.c1 order by 1"
dolt sql -q "$Q1" -s name1
EXPECTED=$(cat <<'EOF'
pk,pk1,pk2
0,0,0
1,0,1
2,1,0
3,1,1
EOF
)
run dolt sql -x name1 -r csv
[ "$status" -eq 0 ]
[[ "$output" =~ "$EXPECTED" ]] || false
}
@test "query-catalog: can update saved query with --save" {
Q1="select pk, pk1, pk2 from one_pk,two_pk where one_pk.c1=two_pk.c1 order by 1"
Q2="select pk from one_pk order by pk"
dolt sql -q "$Q1" -s name1
# execute list-saved and verify output
EXPECTED=$(cat <<'EOF'
id,display_order,name,query,description
name1,1,name1,"select pk, pk1, pk2 from one_pk,two_pk where one_pk.c1=two_pk.c1 and pk < 3 order by 1 desc",""
name2,2,name2,select pk from one_pk order by pk,""
name1,1,name1,"select pk, pk1, pk2 from one_pk,two_pk where one_pk.c1=two_pk.c1 order by 1",""
EOF
)
run dolt sql --list-saved -r csv
[ "$status" -eq 0 ]
[[ "$output" =~ "$EXPECTED" ]] || false
dolt sql -q "$Q2" -s name1
# execute list-saved and verify output. I have no idea why the
# query isn't quoted, but I assume it's a bash
# interpretation thing. Has quotes when run by hand.
EXPECTED=$(cat <<'EOF'
pk,pk1,pk2
2,1,0
1,0,1
0,0,0
id,display_order,name,query,description
name1,1,name1,select pk from one_pk order by pk,""
EOF
)
# Execute updated saved query and verify once output
run dolt sql -r csv -x name1
run dolt sql --list-saved -r csv
[ "$status" -eq 0 ]
[[ "$output" =~ "$EXPECTED" ]] || false
}
}

View File

@@ -2197,3 +2197,23 @@ EOF
run grep -F "Dropping persisted '__dolt_local_user__@localhost' because this account name is reserved for Dolt" server_log.txt
[ $status -eq 0 ]
}
@test "sql-server: can create and use saved queries with --host and --use-db" {
cd repo1
dolt sql -q "create table test (i int)"
start_sql_server_with_args --host 0.0.0.0
cd ../repo2
run dolt --host 0.0.0.0 --no-tls --port $PORT --use-db repo1 sql -q "show tables" --save "show"
[ "$status" -eq 0 ]
[[ "$output" =~ "test" ]] || false
run dolt --host 0.0.0.0 --no-tls --port $PORT --use-db repo1 sql -l -r csv
[ "$status" -eq 0 ]
[[ "$output" =~ "show,1,show,show tables,\"\"" ]] || false
run dolt --host 0.0.0.0 --no-tls --port $PORT --use-db repo1 sql -x "show"
[ "$status" -eq 0 ]
[[ "$output" =~ "test" ]] || false
}

View File

@@ -1,12 +1,12 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net5.0</TargetFramework>
</PropertyGroup>
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net8.0</TargetFramework>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="MySql.Data" Version="8.0.21" />
</ItemGroup>
</Project>
</ItemGroup>
</Project>

View File

@@ -1,12 +1,11 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net5.0</TargetFramework>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="MySqlConnector" Version="1.3.0-beta.1" />
</ItemGroup>
</Project>
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net8.0</TargetFramework>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="MySqlConnector" Version="2.4.0" />
</ItemGroup>
</Project>