Merge branch 'master' into fix/1852

This commit is contained in:
wenyh
2022-10-17 13:29:11 +08:00
committed by GitHub
21 changed files with 762 additions and 41 deletions
+4 -4
View File
@@ -11,10 +11,10 @@
primary="true" readWeight="1" id="xx1">
</dbInstance>
<dbInstance name="instanceS1" url="172.18.0.3:3306" user="root" password="123456" maxCon="1000" minCon="10" readWeight="2">
<property name="testOnCreate">false</property>
<property name="testWhileIdle">false</property>
</dbInstance>
<dbInstance name="instanceS2" url="172.18.0.4:3306" user="root" password="123456" maxCon="1000" minCon="10" readWeight="2">
<property name="testOnCreate">false</property>
<property name="testWhileIdle">false</property>
</dbInstance>
</dbGroup>
@@ -24,10 +24,10 @@
primary="true">
</dbInstance>
<dbInstance name="instanceS3" url="172.18.0.6:3306" user="root" password="123456" maxCon="1000" minCon="10" readWeight="2">
<property name="testOnCreate">false</property>
<property name="testWhileIdle">false</property>
</dbInstance>
<dbInstance name="instanceS4" url="172.18.0.7:3306" user="root" password="123456" maxCon="1000" minCon="10" readWeight="2">
<property name="testOnCreate">false</property>
<property name="testWhileIdle">false</property>
</dbInstance>
</dbGroup>
</dble:db>
@@ -16,6 +16,7 @@ import com.actiontech.dble.config.model.ClusterConfig;
import com.actiontech.dble.config.model.SystemConfig;
import com.actiontech.dble.config.util.ConfigUtil;
import com.actiontech.dble.log.general.GeneralLogProcessor;
import com.actiontech.dble.log.sqldump.SqlDumpLogHelper;
import com.actiontech.dble.log.transaction.TxnLogProcessor;
import com.actiontech.dble.meta.ProxyMetaManager;
import com.actiontech.dble.net.IOProcessor;
@@ -242,6 +243,7 @@ public final class DbleServer {
if (SystemConfig.getInstance().getEnableGeneralLog() == 1) {
generalLogProcessor.start();
}
SqlDumpLogHelper.init();
SequenceManager.init(ClusterConfig.getInstance().getSequenceHandlerType());
LOGGER.info("===================================Sequence manager init finish===================================");
@@ -8,7 +8,6 @@ package com.actiontech.dble;
import com.actiontech.dble.cluster.ClusterController;
import com.actiontech.dble.config.Versions;
import com.actiontech.dble.config.loader.SystemConfigLoader;
import com.actiontech.dble.config.model.SystemConfig;
import com.actiontech.dble.config.util.StartProblemReporter;
import com.actiontech.dble.singleton.CustomMySQLHa;
import com.actiontech.dble.singleton.OnlineStatus;
@@ -22,15 +21,10 @@ public final class DbleStartup {
try {
CheckConfigurationUtil.checkConfiguration();
ClusterController.loadClusterProperties();
//lod system properties
// load system properties
SystemConfigLoader.initSystemConfig();
if (SystemConfig.getInstance().getInstanceName() == null) {
StartProblemReporter.getInstance().addError("You must config instanceName in bootstrap.cnf and make sure it is an unique key for cluster");
}
String home = SystemConfig.getInstance().getHomePath();
if (home == null) {
StartProblemReporter.getInstance().addError("homePath is not set.");
}
// load system other properties
SystemConfigLoader.verifyOtherParam();
if (StartProblemReporter.getInstance().getErrorConfigs().size() > 0) {
for (String errInfo : StartProblemReporter.getInstance().getErrorConfigs()) {
System.out.println(errInfo);
@@ -12,6 +12,7 @@ import com.actiontech.dble.config.model.SystemConfig;
import com.actiontech.dble.config.util.ParameterMapping;
import com.actiontech.dble.config.util.StartProblemReporter;
import com.actiontech.dble.memory.unsafe.Platform;
import com.actiontech.dble.server.status.SqlDumpLog;
import com.actiontech.dble.services.manager.handler.WriteDynamicBootstrap;
import com.actiontech.dble.util.ResourceUtil;
import com.actiontech.dble.util.StringUtil;
@@ -21,7 +22,10 @@ import org.slf4j.LoggerFactory;
import java.io.*;
import java.lang.reflect.InvocationTargetException;
import java.util.*;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Properties;
import java.util.Set;
public final class SystemConfigLoader {
private static final Logger LOGGER = LoggerFactory.getLogger(SystemConfigLoader.class);
@@ -145,7 +149,6 @@ public final class SystemConfigLoader {
/**
* do postCheck and postSetting here
*
*/
private static void postSet(SystemConfig systemConfig) {
final StartProblemReporter problemReporter = StartProblemReporter.getInstance();
@@ -205,4 +208,17 @@ public final class SystemConfigLoader {
}
public static void verifyOtherParam() {
// other
SqlDumpLog.getInstance().verify();
// base
if (SystemConfig.getInstance().getInstanceName() == null) {
StartProblemReporter.getInstance().addError("You must config instanceName in bootstrap.cnf and make sure it is an unique key for cluster");
}
String home = SystemConfig.getInstance().getHomePath();
if (home == null) {
StartProblemReporter.getInstance().addError("homePath is not set.");
}
}
}
@@ -180,6 +180,17 @@ public final class SystemConfig {
private int generalLogFileSize = 16; //mb
private int generalLogQueueSize = 4096;
//sqldump log
private int enableSqlDumpLog = 0;
private String sqlDumpLogBasePath = "sqldump";
private String sqlDumpLogFileName = "sqldump.log";
private String sqlDumpLogCompressFilePattern = "${date:yyyy-MM}/sqldump-%d{MM-dd}-%i.log.gz";
private int sqlDumpLogOnStartupRotate = 1; // 1-on, 0-off
private String sqlDumpLogSizeBasedRotate = "50 MB";
private int sqlDumpLogTimeBasedRotate = 1; // interval 1 day
private String sqlDumpLogDeleteFileAge = "90d"; // expiration time daydefault 90 day
private String sqlDumpLogCompressFilePath = "*/sqldump-*.log.gz"; // log.gz path
//alert switch
private int enableAlert = 1;
//load data
@@ -507,6 +518,103 @@ public final class SystemConfig {
}
public int getEnableSqlDumpLog() {
return enableSqlDumpLog;
}
@SuppressWarnings("unused")
public void setEnableSqlDumpLog(int enableSqlDumpLog) {
if (enableSqlDumpLog >= 0 && enableSqlDumpLog <= 1) {
this.enableSqlDumpLog = enableSqlDumpLog;
} else {
problemReporter.warn(String.format(WARNING_FORMAT, "enableSqlDumpLog", enableSqlDumpLog, this.enableSqlDumpLog));
}
}
public String getSqlDumpLogBasePath() {
return sqlDumpLogBasePath;
}
@SuppressWarnings("unused")
public void setSqlDumpLogBasePath(String sqlDumpLogBasePath) {
if (!StringUtil.isBlank(sqlDumpLogBasePath)) {
this.sqlDumpLogBasePath = sqlDumpLogBasePath;
}
}
public String getSqlDumpLogFileName() {
return sqlDumpLogFileName;
}
@SuppressWarnings("unused")
public void setSqlDumpLogFileName(String sqlDumpLogFileName) {
if (!StringUtil.isBlank(sqlDumpLogFileName)) {
this.sqlDumpLogFileName = sqlDumpLogFileName;
}
}
public String getSqlDumpLogCompressFilePattern() {
return sqlDumpLogCompressFilePattern;
}
@SuppressWarnings("unused")
public void setSqlDumpLogCompressFilePattern(String sqlDumpLogCompressFilePattern) {
if (!StringUtil.isBlank(sqlDumpLogCompressFilePattern)) {
this.sqlDumpLogCompressFilePattern = sqlDumpLogCompressFilePattern;
}
}
public String getSqlDumpLogCompressFilePath() {
return sqlDumpLogCompressFilePath;
}
@SuppressWarnings("unused")
public void setSqlDumpLogCompressFilePath(String sqlDumpLogCompressFilePath) {
if (!StringUtil.isBlank(sqlDumpLogCompressFilePath)) {
this.sqlDumpLogCompressFilePath = sqlDumpLogCompressFilePath;
}
}
public int getSqlDumpLogOnStartupRotate() {
return sqlDumpLogOnStartupRotate;
}
@SuppressWarnings("unused")
public void setSqlDumpLogOnStartupRotate(int sqlDumpLogOnStartupRotate) {
this.sqlDumpLogOnStartupRotate = sqlDumpLogOnStartupRotate;
}
public String getSqlDumpLogSizeBasedRotate() {
return sqlDumpLogSizeBasedRotate;
}
@SuppressWarnings("unused")
public void setSqlDumpLogSizeBasedRotate(String sqlDumpLogSizeBasedRotate) {
if (!StringUtil.isBlank(sqlDumpLogSizeBasedRotate)) {
this.sqlDumpLogSizeBasedRotate = sqlDumpLogSizeBasedRotate;
}
}
public int getSqlDumpLogTimeBasedRotate() {
return sqlDumpLogTimeBasedRotate;
}
@SuppressWarnings("unused")
public void setSqlDumpLogTimeBasedRotate(int sqlDumpLogTimeBasedRotate) {
this.sqlDumpLogTimeBasedRotate = sqlDumpLogTimeBasedRotate;
}
public String getSqlDumpLogDeleteFileAge() {
return sqlDumpLogDeleteFileAge;
}
@SuppressWarnings("unused")
public void setSqlDumpLogDeleteFileAge(String sqlDumpLogDeleteFileAge) {
if (!StringUtil.isBlank(sqlDumpLogDeleteFileAge)) {
this.sqlDumpLogDeleteFileAge = sqlDumpLogDeleteFileAge;
}
}
public int getTransactionRotateSize() {
return transactionRotateSize;
}
@@ -1851,6 +1959,15 @@ public final class SystemConfig {
", releaseTimeout=" + releaseTimeout +
", enableAsyncRelease=" + enableAsyncRelease +
", xaIdCheckPeriod=" + xaIdCheckPeriod +
", enableSqlDumpLog=" + enableSqlDumpLog +
", sqlDumpLogBasePath='" + sqlDumpLogBasePath + '\'' +
", sqlDumpLogFileName='" + sqlDumpLogFileName + '\'' +
", sqlDumpLogCompressFilePattern='" + sqlDumpLogCompressFilePattern + '\'' +
", sqlDumpLogCompressFilePath='" + sqlDumpLogCompressFilePath + '\'' +
", sqlDumpLogOnStartupRotate=" + sqlDumpLogOnStartupRotate +
", sqlDumpLogSizeBasedRotate='" + sqlDumpLogSizeBasedRotate + '\'' +
", sqlDumpLogTimeBasedRotate=" + sqlDumpLogTimeBasedRotate +
", sqlDumpLogDeleteFileAge='" + sqlDumpLogDeleteFileAge + '\'' +
"]";
}
@@ -1858,6 +1975,7 @@ public final class SystemConfig {
return closeHeartBeatRecord;
}
@SuppressWarnings("unused")
public void setCloseHeartBeatRecord(boolean closeHeartBeatRecord) {
this.closeHeartBeatRecord = closeHeartBeatRecord;
}
@@ -48,7 +48,7 @@ public class GeneralLogEntry extends LogEntry {
private String toPackag() {
switch (entryType) {
case 1:
String[] arr = GeneralLogHandler.packagLog(data, charset);
String[] arr = GeneralLogHandler.packageLog(data, charset);
command = arr[0];
content = arr[1];
return toLogString();
@@ -14,7 +14,8 @@ public final class GeneralLogHandler {
private GeneralLogHandler() {
}
public static String[] packagLog(byte[] data, String charset) {
public static String[] packageLog(byte[] data, String charset) {
String type, content = null;
String[] arr = new String[2];
arr[0] = (type = MySQLPacket.TO_STRING.get(data[4])) == null ? "UNKNOWN" : type;
@@ -0,0 +1,341 @@
package com.actiontech.dble.log.sqldump;
import com.actiontech.dble.backend.mysql.MySQLMessage;
import com.actiontech.dble.net.mysql.MySQLPacket;
import com.actiontech.dble.route.parser.util.ParseUtil;
import com.actiontech.dble.rwsplit.RWSplitNonBlockingSession;
import com.actiontech.dble.server.parser.RwSplitServerParse;
import com.actiontech.dble.server.parser.ServerParseFactory;
import com.actiontech.dble.server.status.SqlDumpLog;
import com.actiontech.dble.services.mysqlsharding.MySQLResponseService;
import com.actiontech.dble.services.rwsplit.RWSplitService;
import com.actiontech.dble.util.StringUtil;
import com.alibaba.druid.DbType;
import com.alibaba.druid.sql.visitor.ParameterizedOutputVisitorUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.core.Appender;
import org.apache.logging.log4j.core.LoggerContext;
import org.apache.logging.log4j.core.appender.RollingFileAppender;
import org.apache.logging.log4j.core.appender.rolling.*;
import org.apache.logging.log4j.core.appender.rolling.action.*;
import org.apache.logging.log4j.core.config.AppenderRef;
import org.apache.logging.log4j.core.config.Configuration;
import org.apache.logging.log4j.core.config.LoggerConfig;
import org.apache.logging.log4j.core.layout.PatternLayout;
import org.apache.logging.log4j.spi.ExtendedLogger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.locks.ReentrantReadWriteLock;
public final class SqlDumpLogHelper {
private static final Logger LOGGER = LoggerFactory.getLogger(SqlDumpLogHelper.class);
private static final SqlDumpLogHelper INSTANCE = new SqlDumpLogHelper();
private volatile boolean isOpen = false;
private volatile ExtendedLogger logger;
private static final ReentrantReadWriteLock LOCK = new ReentrantReadWriteLock();
private static final RwSplitServerParse PARSER = ServerParseFactory.getRwSplitParser();
private SqlDumpLogHelper() {
}
public static void init() {
boolean isOpen0 = SqlDumpLog.getInstance().getEnableSqlDumpLog() == 1;
String errMsg = onOff(isOpen0);
if (isOpen0) {
if (StringUtil.isEmpty(errMsg)) {
LOGGER.info("SqlDumpLog's param: enableSqlDumpLog[{}], sqlDumpLogBasePath[{}], sqlDumpLogFileName[{}]," +
" sqlDumpLogCompressFilePattern[{}], sqlDumpLogCompressFilePath[{}], sqlDumpLogOnStartupRotate[{}]," +
" sqlDumpLogSizeBasedRotate[{}], sqlDumpLogTimeBasedRotate[{}], sqlDumpLogDeleteFileAge[{}]",
SqlDumpLog.getInstance().getEnableSqlDumpLog(), SqlDumpLog.getInstance().getSqlDumpLogBasePath(),
SqlDumpLog.getInstance().getSqlDumpLogFileName(), SqlDumpLog.getInstance().getSqlDumpLogCompressFilePattern(),
SqlDumpLog.getInstance().getSqlDumpLogCompressFilePath(), SqlDumpLog.getInstance().getSqlDumpLogOnStartupRotate(),
SqlDumpLog.getInstance().getSqlDumpLogSizeBasedRotate(), SqlDumpLog.getInstance().getSqlDumpLogTimeBasedRotate(),
SqlDumpLog.getInstance().getSqlDumpLogDeleteFileAge());
LOGGER.info("===========================================Init SqlDumpLog Success=================================");
} else {
LOGGER.info("===========================================Init SqlDumpLog Fail=================================");
}
}
}
public static String onOff(boolean isOpen0) {
final ReentrantReadWriteLock lock = INSTANCE.LOCK;
lock.writeLock().lock();
try {
if (isOpen0 == INSTANCE.isOpen)
return null;
if (isOpen0) {
try {
INSTANCE.isOpen = true;
SqlDumpLog.getInstance().setEnableSqlDumpLog(1);
INSTANCE.logger = SqlDumpLoggerUtil.getLogger(
SqlDumpLog.getInstance().getSqlDumpLogBasePath(), SqlDumpLog.getInstance().getSqlDumpLogFileName(),
SqlDumpLog.getInstance().getSqlDumpLogCompressFilePattern(),
SqlDumpLog.getInstance().getSqlDumpLogOnStartupRotate(), SqlDumpLog.getInstance().getSqlDumpLogSizeBasedRotate(), SqlDumpLog.getInstance().getSqlDumpLogTimeBasedRotate(),
SqlDumpLog.getInstance().getSqlDumpLogDeleteFileAge(), SqlDumpLog.getInstance().getSqlDumpLogCompressFilePath());
} catch (Exception ei) { // rollback
try {
INSTANCE.isOpen = false;
SqlDumpLog.getInstance().setEnableSqlDumpLog(0);
INSTANCE.logger = null;
SqlDumpLoggerUtil.clearLogger();
} catch (Exception eii) {
LOGGER.warn("enable sqlDumpLog failed, rollback exception: {}", eii);
return "enable sqlDumpLog failed exception: " + ei.getMessage() + ", and rollback exception" + eii.getMessage();
}
LOGGER.warn("enable sqlDumpLog failed exception: {}", ei);
return "enable sqlDumpLog failed exception: " + ei.getMessage();
}
} else {
try {
SqlDumpLoggerUtil.clearLogger();
INSTANCE.isOpen = false;
SqlDumpLog.getInstance().setEnableSqlDumpLog(0);
INSTANCE.logger = null;
} catch (Exception ei) {
LOGGER.warn("disable sqlDumpLog failed exception: {}", ei);
return "disable sqlDumpLog failed exception: " + ei.getMessage();
}
}
return null;
} finally {
lock.writeLock().unlock();
}
}
public static void info(byte[] originPacket, boolean isHint, RWSplitService rwSplitService, MySQLResponseService responseService, long affectRows) {
String[] arr = null;
if (originPacket != null) {
arr = packageLog(rwSplitService, originPacket, rwSplitService.getCharset().getResults());
} else if (isHint) {
arr = packageLog(rwSplitService.getSession2(), rwSplitService.getExecuteSql());
} else {
arr = packageLog(rwSplitService, rwSplitService.getExecuteSqlBytes(), rwSplitService.getCharset().getResults());
}
if (arr == null)
return;
// flush
String sqlDigest;
if (arr[1].equalsIgnoreCase("begin")) {
sqlDigest = "begin";
} else {
sqlDigest = ParameterizedOutputVisitorUtils.parameterize(arr[1], DbType.mysql).replaceAll("[\\t\\n\\r]", " ");
}
String digestHash = Integer.toHexString(sqlDigest.hashCode()); // hashcode convert hex
long dura = responseService.getConnection().getLastReadTime() - responseService.getConnection().getLastWriteTime();
info0(digestHash, arr[0], rwSplitService.getTransactionsCounter() + "", affectRows, rwSplitService.getUser().getFullName(),
rwSplitService.getConnection().getHost(), rwSplitService.getConnection().getLocalPort(),
responseService.getConnection().getHost(), responseService.getConnection().getPort(), dura, sqlDigest);
}
private static void info0(String digestHash, String sqlType, String transactionId, long affectRows, String userName,
String clientHost, int clientPort,
String backHost, int backPort, long dura, String sqlDigest) {
try {
final ReentrantReadWriteLock lock = INSTANCE.LOCK;
lock.readLock().lock();
try {
final ExtendedLogger log = INSTANCE.logger;
if (log != null) {
sqlDigest = sqlDigest.length() > 100 ? sqlDigest.substring(0, 100) : sqlDigest;
log.info("[{}][{}][{}][{}][{}][{}:{}][{}:{}][{}] {}",
digestHash, sqlType, transactionId, affectRows, userName,
clientHost, clientPort, backHost, backPort, dura, sqlDigest);
}
} finally {
lock.readLock().unlock();
}
} catch (Exception e) {
LOGGER.warn("SqlDumpLogHelper.info() happen exception: {}", e.getMessage());
}
}
private static String[] packageLog(RWSplitService rwSplitService, byte[] data, String charset) {
try {
switch (data[4]) {
case MySQLPacket.COM_QUERY:
case MySQLPacket.COM_STMT_PREPARE:
MySQLMessage mm = new MySQLMessage(data);
mm.position(5);
String originSql = mm.readString(charset);
return packageLog(rwSplitService.getSession2(), originSql);
default:
return null;
}
} catch (UnsupportedEncodingException e) {
LOGGER.warn("SqlDumpLogHelper.packageLog() happen exception: {}", e.getMessage());
}
return null;
}
private static String[] packageLog(RWSplitNonBlockingSession session2, String originSql) {
String sql = originSql;
if (session2.getRemingSql() != null)
sql = session2.getRemingSql();
int index = ParseUtil.findNextBreak(sql);
boolean isMultiStatement = index + 1 < sql.length() && !ParseUtil.isEOF(sql, index);
if (isMultiStatement) {
session2.setRemingSql(sql.substring(index + 1));
sql = sql.substring(0, ParseUtil.findNextBreak(sql));
} else {
session2.setRemingSql(null);
if (sql.endsWith(";"))
sql = sql.substring(0, sql.length() - 1);
}
return packageLog(sql.trim());
}
private static String[] packageLog(String originSql) {
String[] arr = new String[2];
int rs = PARSER.parse(originSql);
int sqlType = rs & 0xff;
switch (sqlType) {
case RwSplitServerParse.SELECT:
arr[0] = "SELECT";
break;
case RwSplitServerParse.INSERT:
arr[0] = "INSERT";
break;
case RwSplitServerParse.DELETE:
arr[0] = "DELETE";
break;
case RwSplitServerParse.UPDATE:
arr[0] = "UPDATE";
break;
case RwSplitServerParse.DDL:
arr[0] = "DDL";
break;
default:
arr[0] = "OTHER";
break;
}
arr[1] = originSql;
return arr;
}
static class SqlDumpLoggerUtil {
static final String LOG_NAME = "SqlDumpLog";
static final String LOG_PATTERN = "[%d{yyyy-MM-dd HH:mm:ss.SSS}]%m%n";
static final String ROLLOVER_MAX = "100";
public static ExtendedLogger getLogger(String basePath, String fileName,
String compressFilePattern,
int onStartupRotate, String sizeBasedRotate, int timeBasedRotate,
String deleteFileAge, String compressFilePath) throws Exception {
final LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
final Configuration config = ctx.getConfiguration();
/**
* <PatternLayout>
* <Pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %5p [%t] (%l) - %m%n</Pattern>
* </PatternLayout>
*/
final PatternLayout layout = PatternLayout.newBuilder().withPattern(LOG_PATTERN).withConfiguration(config).build();
/**
* <Policies>
* <OnStartupTriggeringPolicy/>
* <SizeBasedTriggeringPolicy size="250 MB"/>
* <TimeBasedTriggeringPolicy/>
* </Policies>
*/
List<TriggeringPolicy> policies = new ArrayList<>();
if (participate(onStartupRotate)) {
OnStartupTriggeringPolicy onStartupTriggeringPolicy = OnStartupTriggeringPolicy.createPolicy(1);
policies.add(onStartupTriggeringPolicy);
}
if (participate(sizeBasedRotate)) {
SizeBasedTriggeringPolicy sizeBasedTriggeringPolicy = SizeBasedTriggeringPolicy.createPolicy(sizeBasedRotate);
policies.add(sizeBasedTriggeringPolicy);
}
if (participate(timeBasedRotate)) {
TimeBasedTriggeringPolicy timeBasedTriggeringPolicy = TimeBasedTriggeringPolicy.createPolicy(timeBasedRotate + "", "false");
policies.add(timeBasedTriggeringPolicy);
}
CompositeTriggeringPolicy triggeringPolicy = CompositeTriggeringPolicy.createPolicy(policies.toArray(new TriggeringPolicy[policies.size()]));
// <DefaultRolloverStrategy max="100">
// <Delete basePath="sqldump" maxDepth="5">
// <IfFileName glob="*/sqldump-*.log.gz">
// <IfLastModified age="2d"/>
// </IfFileName>
// </Delete>
// </DefaultRolloverStrategy>
DeleteAction deleteAction = null;
if (participate(deleteFileAge)) {
IfLastModified ifLastModified = IfLastModified.createAgeCondition(Duration.parse(deleteFileAge), new PathCondition[0]);
IfFileName ifFileName = IfFileName.createNameCondition(compressFilePath, null, new PathCondition[]{ifLastModified});
deleteAction = DeleteAction.createDeleteAction(basePath, false, 5, false, null, new PathCondition[]{ifFileName}, null, config);
}
DefaultRolloverStrategy strategy = DefaultRolloverStrategy.newBuilder().withMax(ROLLOVER_MAX).withConfig(config).withCustomActions(new Action[]{deleteAction}).build();
/**
* <RollingFile name="SqlDumpLog" fileName="sqldump/sqldump.log"
* filePattern="sqldump/$${date:yyyy-MM}/sqldump-%d{MM-dd}-%i.log.gz">
* ...
* </RollingFile>
*/
RollingFileAppender appender = RollingFileAppender.newBuilder()
.withName(LOG_NAME)
.withFileName(basePath + File.separator + fileName)
.withFilePattern(basePath + File.separator + compressFilePattern)
.withLayout(layout)
.withPolicy(triggeringPolicy)
.withStrategy(strategy)
.withAppend(true)
.withBufferedIo(true)
.withImmediateFlush(true)
.withConfiguration(config)
.build();
appender.start();
config.addAppender(appender);
/**
* <AsyncLogger name="SqlDumpLog" additivity="false" includeLocation="false">
* <AppenderRef ref="SqlDumpLog"/>
* </AsyncLogger>
*/
LoggerConfig loggerConfig = LoggerConfig.createLogger("false", null, LOG_NAME, "false",
new AppenderRef[]{AppenderRef.createAppenderRef(LOG_NAME, null, null)},
null, config, null);
loggerConfig.addAppender(appender, null, null);
config.addLogger(LOG_NAME, loggerConfig);
// update config
ctx.updateLoggers();
ExtendedLogger logger = ctx.getLogger(LOG_NAME);
return logger;
}
public static void clearLogger() {
final LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
final Configuration config = ctx.getConfiguration();
config.getAppender(LOG_NAME).stop();
Appender appender = config.getAppenders().remove(LOG_NAME);
if (appender != null)
appender.stop();
config.removeLogger(LOG_NAME);
}
private static boolean participate(Object value) {
if (value instanceof String) {
if (value.equals("-1"))
return false;
} else {
if ((int) value == -1)
return false;
}
return true;
}
}
}
@@ -46,33 +46,38 @@ public class ItemFuncJsonExtract extends ItemStrFunc {
public String valStr() {
final Item arg1 = args.get(0);
if (arg1.isNull()) {
LOGGER.debug("use inner json_extract() ,arg null");
this.nullValue = true;
return null;
return EMPTY;
}
String inputStr = arg1.valStr();
List<String> patterns = new ArrayList<>();
for (int i = 1; i < args.size(); i++) {
patterns.add(args.get(i).valStr());
}
final String result = jsonExtract(inputStr, patterns);
LOGGER.debug("use inner json_extract() , use arg {},{}", inputStr, patterns);
final String result = jsonExtract(inputStr, patterns, false);
if (result == null) {
this.nullValue = true;
return EMPTY;
}
this.nullValue = false;
return result;
}
private static String jsonExtract(String inputStr, List<String> args) {
private static String jsonExtract(String inputStr, List<String> args, boolean unquote) {
if (inputStr == null) {
return null;
}
Queue<JsonElement> results = new LinkedList<>();
//could return multi match in one results.
boolean couldReturnMultipleMatches = args.size() > 1;
for (int i = 0; i < args.size(); i++) {
final String arg = args.get(i);
//parse the query.
List<PathLeg> pathLegs = new JsonPath(arg).parsePathLegs();
final JsonSeeker seeker = new JsonSeeker();
//find the result
seeker.seek(inputStr, pathLegs);
results.addAll(seeker.getResults());
couldReturnMultipleMatches |= seeker.isCouldReturnMultipleMatches();
@@ -81,7 +86,13 @@ public class ItemFuncJsonExtract extends ItemStrFunc {
if (results.isEmpty()) {
outputResult = null;
} else if (!couldReturnMultipleMatches) {
outputResult = (results.peek().toString());
final JsonElement result = results.peek();
if (unquote && result.isJsonPrimitive()) {
outputResult = (result.getAsString());
} else {
outputResult = (result.toString());
}
} else {
outputResult = (results.toString());
}
@@ -98,7 +109,7 @@ public class ItemFuncJsonExtract extends ItemStrFunc {
couldReturnMultipleMatches = false;
results = new Stack<>();
{
JsonElement result = new JsonParser().parse(inputStr);
JsonElement result = JsonParser.parseString(inputStr);
results.push(result);
}
Stack<JsonElement> nextResults;
@@ -195,6 +206,7 @@ public class ItemFuncJsonExtract extends ItemStrFunc {
break;
case JPL_ELLIPSIS:
//process ** recursive lookup
nextResults = processEllipsis(pathLegIt);
break;
default:
@@ -296,6 +308,10 @@ public class ItemFuncJsonExtract extends ItemStrFunc {
return pathLeg;
}
/**
* for **
* @return
*/
private PathLeg parseEllipsisLeg() {
index++;
@@ -336,7 +352,7 @@ public class ItemFuncJsonExtract extends ItemStrFunc {
StringBuilder sb = new StringBuilder();
tmpS = sb.append(DOUBLE_QUOTE).append(pattern, beginIndex, endIndex - beginIndex).append(DOUBLE_QUOTE).toString();
}
tmpS = new JsonParser().parse(tmpS).getAsString();
tmpS = JsonParser.parseString(tmpS).getAsString();
leg = PathLeg.ofMemberProperty(tmpS);
}
return leg;
@@ -12,6 +12,7 @@ import java.util.List;
* Create Date: 2022-01-24
*/
public class ItemFuncJsonUnQuote extends ItemStrFunc {
public static final char QUOTE = '"';
public ItemFuncJsonUnQuote(List<Item> args, int charsetIndex) {
super(args, charsetIndex);
@@ -37,22 +38,33 @@ public class ItemFuncJsonUnQuote extends ItemStrFunc {
final Item arg1 = args.get(0);
if (arg1.isNull()) {
this.nullValue = true;
return null;
LOGGER.debug("use inner json_unquote() , use arg null");
return EMPTY;
}
if (args.size() != 1) {
throw new IllegalStateException("illegal argument count for json_unquote");
}
String inputStr = arg1.valStr();
LOGGER.debug("use inner json_unquote() , use arg {}", inputStr);
if (inputStr == null) {
this.nullValue = true;
return null;
return EMPTY;
}
final JsonElement parse = new JsonParser().parse(inputStr);
//exclude if not string
if (inputStr.length() < 2 || inputStr.charAt(0) != QUOTE || inputStr.charAt(inputStr.length() - 1) != QUOTE) {
this.nullValue = false;
return inputStr;
}
final JsonElement parse = JsonParser.parseString(inputStr);
if (parse.isJsonPrimitive() && parse.getAsJsonPrimitive().isString()) {
inputStr = parse.getAsString();
}
if (inputStr == null) {
this.nullValue = true;
return null;
return EMPTY;
}
this.nullValue = false;
return inputStr;
@@ -21,6 +21,7 @@ public final class ManagerParseOnOff {
public static final int GENERAL_LOG = 5;
public static final int STATISTIC = 6;
public static final int LOAD_DATA_BATCH = 7;
public static final int SQLDUMP_SQL = 8;
public static int parse(String stmt, int offset) {
@@ -89,16 +90,41 @@ public final class ManagerParseOnOff {
return OTHER;
}
// enable/disable @@SLOW_QUERY_LOG
private static int sCheck(String stmt, int offset) {
if (stmt.length() > offset + 13) {
if (stmt.length() > ++offset) {
switch (stmt.charAt(offset)) {
case 'L':
case 'l':
return slCheck(stmt, offset);
case 'T':
case 't':
return stCheck(stmt, offset);
case 'Q':
case 'q':
return sqCheck(stmt, offset);
default:
return OTHER;
}
}
return OTHER;
}
// enable/disable @@SLOW_QUERY_LOG
private static int slCheck(String stmt, int offset) {
if (stmt.length() > offset + 12) {
String prefix = stmt.substring(offset).toUpperCase();
if (prefix.startsWith("SLOW_QUERY_LOG") && (stmt.length() == offset + 14 || ParseUtil.isEOF(stmt, offset + 14))) {
if (prefix.startsWith("LOW_QUERY_LOG") && (stmt.length() == offset + 13 || ParseUtil.isEOF(stmt, offset + 13))) {
return SLOW_QUERY_LOG;
}
} else if (stmt.length() > offset + 8) {
}
return OTHER;
}
// enable/disable @@STATISTIC
private static int stCheck(String stmt, int offset) {
if (stmt.length() > offset + 7) {
String prefix = stmt.substring(offset).toUpperCase();
if (prefix.startsWith("STATISTIC") && (stmt.length() == offset + 9 || ParseUtil.isEOF(stmt, offset + 9))) {
if (prefix.startsWith("TATISTIC") && (stmt.length() == offset + 8 || ParseUtil.isEOF(stmt, offset + 8))) {
return STATISTIC;
}
@@ -106,6 +132,17 @@ public final class ManagerParseOnOff {
return OTHER;
}
// enable/disable @@SQLDUMP_SQL
private static int sqCheck(String stmt, int offset) {
if (stmt.length() > offset + 9) {
String prefix = stmt.substring(offset).toUpperCase();
if (prefix.startsWith("QLDUMP_SQL") && (stmt.length() == offset + 10 || ParseUtil.isEOF(stmt, offset + 10))) {
return SQLDUMP_SQL;
}
}
return OTHER;
}
private static int gCheck(String stmt, int offset) {
if (stmt.length() > offset + 10) {
String prefix = stmt.substring(offset).toUpperCase();
@@ -44,6 +44,15 @@ public class RWSplitNonBlockingSession extends Session {
private volatile boolean preSendIsWrite = false; // Has the previous SQL been delivered to the write node?
private volatile long preWriteResponseTime = 0; // Response time of the previous write node
private int reSelectNum;
private volatile String remingSql = null;
public String getRemingSql() {
return remingSql;
}
public void setRemingSql(String remingSql) {
this.remingSql = remingSql;
}
public RWSplitNonBlockingSession(RWSplitService service) {
this.rwSplitService = service;
@@ -0,0 +1,103 @@
package com.actiontech.dble.server.status;
import com.actiontech.dble.config.model.SystemConfig;
import com.actiontech.dble.config.util.StartProblemReporter;
import org.apache.logging.log4j.core.appender.rolling.FileSize;
import org.apache.logging.log4j.core.appender.rolling.action.Duration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SqlDumpLog {
private static final Logger LOGGER = LoggerFactory.getLogger(SqlDumpLog.class);
private static final SqlDumpLog INSTANCE = new SqlDumpLog();
private volatile int enableSqlDumpLog = 0;
private String sqlDumpLogBasePath;
private String sqlDumpLogFileName;
private String sqlDumpLogCompressFilePattern;
private int sqlDumpLogOnStartupRotate;
private String sqlDumpLogSizeBasedRotate;
private int sqlDumpLogTimeBasedRotate;
private String sqlDumpLogDeleteFileAge;
private String sqlDumpLogCompressFilePath;
public SqlDumpLog() {
// switch
this.enableSqlDumpLog = SystemConfig.getInstance().getEnableSqlDumpLog();
// base param
this.sqlDumpLogBasePath = SystemConfig.getInstance().getSqlDumpLogBasePath();
this.sqlDumpLogFileName = SystemConfig.getInstance().getSqlDumpLogFileName();
this.sqlDumpLogCompressFilePattern = SystemConfig.getInstance().getSqlDumpLogCompressFilePattern();
// policies param (optional)
this.sqlDumpLogOnStartupRotate = SystemConfig.getInstance().getSqlDumpLogOnStartupRotate() == 1 ? 1 : 0;
this.sqlDumpLogSizeBasedRotate = SystemConfig.getInstance().getSqlDumpLogSizeBasedRotate();
this.sqlDumpLogTimeBasedRotate = SystemConfig.getInstance().getSqlDumpLogTimeBasedRotate() < 1 ? -1 : SystemConfig.getInstance().getSqlDumpLogTimeBasedRotate();
// rollover param (optional)
this.sqlDumpLogDeleteFileAge = SystemConfig.getInstance().getSqlDumpLogDeleteFileAge();
this.sqlDumpLogCompressFilePath = SystemConfig.getInstance().getSqlDumpLogCompressFilePath();
}
public void verify() {
// '-1' means that it is not configured
if (!sqlDumpLogSizeBasedRotate.equals("-1")) {
// default: 50 MB
sqlDumpLogSizeBasedRotate = FileSize.parse(sqlDumpLogSizeBasedRotate, 52428800L) + "";
}
if (!sqlDumpLogDeleteFileAge.equals("-1")) {
try {
Duration.parse(sqlDumpLogDeleteFileAge);
} catch (Exception e) {
StartProblemReporter.getInstance().warn("parse [sqlDumpLogDeleteFileAge] failed: " + e.getMessage());
}
if (sqlDumpLogCompressFilePath.equals("-1")) {
StartProblemReporter.getInstance().warn("[sqlDumpLogCompressFilePath] can't be null");
}
}
}
public static SqlDumpLog getInstance() {
return INSTANCE;
}
public void setEnableSqlDumpLog(int enableSqlDumpLog) {
this.enableSqlDumpLog = enableSqlDumpLog;
}
public int getEnableSqlDumpLog() {
return enableSqlDumpLog;
}
public String getSqlDumpLogBasePath() {
return sqlDumpLogBasePath;
}
public String getSqlDumpLogFileName() {
return sqlDumpLogFileName;
}
public String getSqlDumpLogCompressFilePattern() {
return sqlDumpLogCompressFilePattern;
}
public String getSqlDumpLogCompressFilePath() {
return sqlDumpLogCompressFilePath;
}
public int getSqlDumpLogOnStartupRotate() {
return sqlDumpLogOnStartupRotate;
}
public String getSqlDumpLogSizeBasedRotate() {
return sqlDumpLogSizeBasedRotate;
}
public int getSqlDumpLogTimeBasedRotate() {
return sqlDumpLogTimeBasedRotate;
}
public String getSqlDumpLogDeleteFileAge() {
return sqlDumpLogDeleteFileAge;
}
}
@@ -38,6 +38,9 @@ public final class DisableHandler {
case ManagerParseOnOff.LOAD_DATA_BATCH:
OnOffLoadDataBatch.execute(service, false);
break;
case ManagerParseOnOff.SQLDUMP_SQL:
SqlDumpLog.OnOff.execute(service, false);
break;
default:
service.writeErrMessage(ErrorCode.ER_YES, "Unsupported statement");
}
@@ -38,6 +38,9 @@ public final class EnableHandler {
case ManagerParseOnOff.LOAD_DATA_BATCH:
OnOffLoadDataBatch.execute(service, true);
break;
case ManagerParseOnOff.SQLDUMP_SQL:
SqlDumpLog.OnOff.execute(service, true);
break;
default:
service.writeErrMessage(ErrorCode.ER_YES, "Unsupported statement");
}
@@ -226,11 +226,15 @@ public final class ShowHelp {
HELPS.put("enable @@cap_client_found_rows", "Turn on the clientFoundRows capabilities");
HELPS.put("disable @@cap_client_found_rows", "Turn off the clientFoundRows capabilities");
// general log
HELPS.put("show @@general_log", "Show the general log information");
HELPS.put("enable @@general_log", "Turn on the general log");
HELPS.put("disable @@general_log", "Turn off the general log");
HELPS.put("reload @@general_log_file='?'", "Reset file path of general log");
// sqldump log
HELPS.put("enable @@sqldump_sql", "Turn on the sqldump log");
HELPS.put("disable @@sqldump_sql", "Turn off the sqldump log");
HELPS.put("show @@statistic", "Turn off statistic information");
HELPS.put("enable @@statistic", "Turn on statistic sql");
@@ -0,0 +1,38 @@
package com.actiontech.dble.services.manager.response;
import com.actiontech.dble.config.ErrorCode;
import com.actiontech.dble.log.sqldump.SqlDumpLogHelper;
import com.actiontech.dble.net.mysql.OkPacket;
import com.actiontech.dble.services.manager.ManagerService;
import com.actiontech.dble.services.manager.handler.WriteDynamicBootstrap;
import com.actiontech.dble.util.StringUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SqlDumpLog {
private static final Logger LOGGER = LoggerFactory.getLogger(SqlDumpLog.class);
public static class OnOff {
public static void execute(ManagerService service, boolean isOn) {
String onOffStatus = isOn ? "enable" : "disable";
try {
WriteDynamicBootstrap.getInstance().changeValue("enableSqlDumpLog", isOn ? "1" : "0");
} catch (Exception ex) {
LOGGER.warn("enable/disable SqlDumpLog failed, exception", ex);
service.writeErrMessage(ErrorCode.ER_YES, onOffStatus + " SqlDumpLog failed");
return;
}
String errMsg = SqlDumpLogHelper.onOff(isOn);
if (StringUtil.isEmpty(errMsg)) {
OkPacket ok = new OkPacket();
ok.setPacketId(1);
ok.setAffectedRows(1);
ok.setServerStatus(2);
ok.write(service.getConnection());
} else {
service.writeErrMessage(ErrorCode.ER_YES, errMsg);
}
}
}
}
@@ -9,6 +9,7 @@ import com.actiontech.dble.backend.mysql.nio.handler.LoadDataResponseHandler;
import com.actiontech.dble.backend.mysql.nio.handler.PreparedResponseHandler;
import com.actiontech.dble.backend.mysql.nio.handler.ResponseHandler;
import com.actiontech.dble.config.ErrorCode;
import com.actiontech.dble.log.sqldump.SqlDumpLogHelper;
import com.actiontech.dble.net.connection.AbstractConnection;
import com.actiontech.dble.net.connection.BackendConnection;
import com.actiontech.dble.net.connection.FrontendConnection;
@@ -90,8 +91,9 @@ public class RWSplitHandler implements ResponseHandler, LoadDataResponseHandler,
@Override
public void errorResponse(byte[] data, @NotNull AbstractService service) {
StatisticListener.getInstance().record(rwSplitService, r -> r.onBackendSqlError(data));
MySQLResponseService mysqlService = (MySQLResponseService) service;
SqlDumpLogHelper.info(originPacket, isHint, rwSplitService, mysqlService, 0);
StatisticListener.getInstance().record(rwSplitService, r -> r.onBackendSqlError(data));
final boolean syncFinished = mysqlService.syncAndExecute();
loadDataClean();
initDbClean();
@@ -122,11 +124,13 @@ public class RWSplitHandler implements ResponseHandler, LoadDataResponseHandler,
@Override
public void okResponse(byte[] data, @NotNull AbstractService service) {
MySQLResponseService mysqlService = (MySQLResponseService) service;
boolean executeResponse = mysqlService.syncAndExecute();
if (executeResponse) {
final OkPacket packet = new OkPacket();
packet.read(data);
loadDataClean();
SqlDumpLogHelper.info(originPacket, isHint, rwSplitService, mysqlService, packet.getAffectedRows());
StatisticListener.getInstance().record(rwSplitService, r -> r.onBackendSqlSetRowsAndEnd(packet.getAffectedRows()));
rwSplitService.getSession2().recordLastSqlResponseTime();
if ((packet.getServerStatus() & StatusFlags.SERVER_MORE_RESULTS_EXISTS) == 0) {
@@ -185,6 +189,7 @@ public class RWSplitHandler implements ResponseHandler, LoadDataResponseHandler,
@Override
public void rowEofResponse(byte[] eof, boolean isLeft, @NotNull AbstractService service) {
synchronized (this) {
SqlDumpLogHelper.info(originPacket, isHint, rwSplitService, (MySQLResponseService) service, selectRows);
StatisticListener.getInstance().record(rwSplitService, r -> r.onBackendSqlSetRowsAndEnd(selectRows));
selectRows = 0;
if (!write2Client) {
@@ -18,6 +18,7 @@ import com.actiontech.dble.net.ssl.SSLWrapperRegistry;
import com.actiontech.dble.server.status.GeneralLog;
import com.actiontech.dble.server.status.LoadDataBatch;
import com.actiontech.dble.server.status.SlowQueryLog;
import com.actiontech.dble.server.status.SqlDumpLog;
import com.actiontech.dble.statistic.sql.StatisticManager;
import com.actiontech.dble.statistic.stat.FrontActiveRatioStat;
@@ -163,7 +164,14 @@ public final class SystemParams {
readOnlyParams.add(new ParamInfo("enableAsyncRelease", sysConfig.getEnableAsyncRelease() + "", "Whether enable async release . default value is 0(off)."));
readOnlyParams.add(new ParamInfo("releaseTimeout", sysConfig.getReleaseTimeout() + "", "time wait for release ,unit is ms, default value is 10mins"));
readOnlyParams.add(new ParamInfo("sqlDumpLogBasePath", SqlDumpLog.getInstance().getSqlDumpLogBasePath() + "", "The base path of sqldump log, the default value is 'sqldump'"));
readOnlyParams.add(new ParamInfo("sqlDumpLogFileName", SqlDumpLog.getInstance().getSqlDumpLogFileName() + "", "The sqldump log file name, the default value is 'sqldump.log'"));
readOnlyParams.add(new ParamInfo("sqlDumpLogCompressFilePattern", SqlDumpLog.getInstance().getSqlDumpLogCompressFilePattern() + "", "The compression of sqldump log file, the default value is '${date:yyyy-MM}/sqldump-%d{MM-dd}-%i.log.gz'"));
readOnlyParams.add(new ParamInfo("sqlDumpLogOnStartupRotate", SqlDumpLog.getInstance().getSqlDumpLogOnStartupRotate() + "", "The onStartup of rotate policy, the default value is 1"));
readOnlyParams.add(new ParamInfo("sqlDumpLogSizeBasedRotate", SqlDumpLog.getInstance().getSqlDumpLogSizeBasedRotate() + "(byte)", "The sizeBased of rotate policy, the default value is '50 MB'"));
readOnlyParams.add(new ParamInfo("sqlDumpLogTimeBasedRotate", SqlDumpLog.getInstance().getSqlDumpLogTimeBasedRotate() + "", "The timeBased of rotate policy, the default value is 1"));
readOnlyParams.add(new ParamInfo("sqlDumpLogDeleteFileAge", SqlDumpLog.getInstance().getSqlDumpLogDeleteFileAge() + "", "The expiration time deletion strategy, the default value is '90d'"));
readOnlyParams.add(new ParamInfo("sqlDumpLogCompressFilePath", SqlDumpLog.getInstance().getSqlDumpLogCompressFilePath() + "", "The compression of sqldump log file path, the default value is '*/sqldump-*.log.gz'"));
}
public List<ParamInfo> getVolatileParams() {
@@ -188,6 +196,7 @@ public final class SystemParams {
params.add(new ParamInfo("sqlLogTableSize", StatisticManager.getInstance().getSqlLogSize() + "", "SqlLog table size, the default is 1024"));
params.add(new ParamInfo("samplingRate", StatisticManager.getInstance().getSamplingRate() + "", "Sampling rate, the default is 0, it is a percentage"));
params.add(new ParamInfo("xaIdCheckPeriod", XaCheckHandler.getXaIdCheckPeriod() + "s", "The period for check xaId, the default is 300 second"));
params.add(new ParamInfo("enableSqlDumpLog", SqlDumpLog.getInstance().getEnableSqlDumpLog() + "", "Whether enable sqlDumpLog, the default value is 0(off)"));
return params;
}
}
+10
View File
@@ -223,3 +223,13 @@
-DenableSessionActiveRatioStat=1
-DenableConnectionAssociateThread=1
-DxaIdCheckPeriod=300
-DenableSqlDumpLog=0
-DsqlDumpLogBasePath=sqldump
-DsqlDumpLogFileName=sqldump.log
-DsqlDumpLogCompressFilePattern=${date:yyyy-MM}/sqldump-%d{MM-dd}-%i.log.gz
-DsqlDumpLogOnStartupRotate=1
-DsqlDumpLogSizeBasedRotate=50MB
-DsqlDumpLogTimeBasedRotate=1
-DsqlDumpLogDeleteFileAge=90d
-DsqlDumpLogCompressFilePath=*/sqldump-*.log.gz
+6 -6
View File
@@ -9,10 +9,10 @@
<heartbeat>show slave status</heartbeat>
<dbInstance name="instanceM1" url="ip1:3306" user="your_user" password="your_psw" maxCon="1000" minCon="10"
primary="true" readWeight="1" id="xx1">
<property name="testOnCreate">true</property>
<property name="testWhileIdle">true</property>
</dbInstance>
<!--<dbInstance name="instanceS1" url="ip3:3306" user="your_user" password="your_psw" maxCon="1000" minCon="10" readWeight="2" disabled="true">-->
<!--<property name="testOnCreate">false</property>-->
<!--<property name="testWhileIdle">false</property>-->
<!--</dbInstance>-->
</dbGroup>
@@ -20,12 +20,12 @@
<heartbeat errorRetryCount="1" timeout="10" keepAlive="60">show slave status</heartbeat>
<dbInstance name="instanceM2" url="ip2:3306" user="your_user" password="your_psw" maxCon="1000" minCon="10"
primary="true">
<property name="testOnCreate">true</property>
<property name="testWhileIdle">true</property>
</dbInstance>
<!-- can have multi read instances -->
<!--<dbInstance name="instanceS2" url="ip4:3306" user="your_user" password="your_psw" maxCon="1000" minCon="10" usingDecrypt="true">-->
<!--<property name="testOnCreate">true</property>-->
<!--<property name="testWhileIdle">true</property>-->
<!--</dbInstance>-->
</dbGroup>
<!--for clickhouse-->
@@ -33,11 +33,11 @@
<heartbeat errorRetryCount="1" timeout="10" keepAlive="60">show databases</heartbeat>
<dbInstance name="instanceM2" url="ip2:9004" user="your_user" password="your_psw" maxCon="1000" minCon="10" databaseType="clickhouse"
primary="true">
<property name="testOnCreate">true</property>
<property name="testWhileIdle">true</property>
</dbInstance>
<!-- can have multi read instances -->
<!--<dbInstance name="instanceS2" url="ip4:9004" user="your_user" password="your_psw" maxCon="1000" minCon="10" usingDecrypt="true" databaseType="clickhouse">-->
<!--<property name="testOnCreate">true</property>-->
<!--<property name="testWhileIdle">true</property>-->
<!--</dbInstance>-->
</dbGroup>
</dble:db>