connection pool refactor (#1875)

* connection pool refactor

* fix connection pool

* fix connection pool

* make get connection sync

* fix conflict

* add copyResources.sh for linux
This commit is contained in:
Collapsar
2020-06-19 16:43:13 +08:00
committed by GitHub
parent 75ac07efce
commit 8937e965d9
84 changed files with 2132 additions and 2068 deletions
+1
View File
@@ -0,0 +1 @@
cp src/main/resources/* target/classes/
+1
View File
@@ -16,6 +16,7 @@
<suppress checks=".*" files="AlertBlockQueue.java"/>
<suppress checks=".*" files="UshardInterface.java"/>
<suppress checks=".*" files="DbleClusterGrpc.java"/>
<suppress checks=".*" files="EvictionTimer.java"/>
<suppress checks="Indentation" files="MyTime.java"/>
<suppress checks="CyclomaticComplexity" files="MyTime.java"/>
<suppress checks="CyclomaticComplexity" files="TimSort.java"/>
+1 -1
View File
@@ -23,7 +23,7 @@
<project.build.sourceEncoding>
UTF-8
</project.build.sourceEncoding>
<grpc.version>1.4.0</grpc.version><!-- CURRENT_GRPC_VERSION -->
<grpc.version>1.5.0</grpc.version><!-- CURRENT_GRPC_VERSION -->
</properties>
<repositories>
<repository>
@@ -292,13 +292,11 @@ public final class DbleServer {
}
}
private void initDbGroup() {
Map<String, PhysicalDbGroup> dbGroups = this.getConfig().getDbGroups();
LOGGER.info("Initialize dbGroup ...");
for (PhysicalDbGroup node : dbGroups.values()) {
node.init();
node.startHeartbeat();
}
}
@@ -306,7 +304,6 @@ public final class DbleServer {
systemVariables = sys;
}
public NIOProcessor nextFrontProcessor() {
int i = ++nextFrontProcessor;
if (i >= frontProcessors.length) {
@@ -395,6 +392,7 @@ public final class DbleServer {
}
}
}
private void pullVarAndMeta() throws IOException {
ProxyMetaManager tmManager = new ProxyMetaManager();
ProxyMeta.getInstance().setTmManager(tmManager);
@@ -481,8 +479,8 @@ public final class DbleServer {
for (BaseTableConfig table : schema.getTables().values()) {
for (String shardingNode : table.getShardingNodes()) {
ShardingNode dn = DbleServer.getInstance().getConfig().getShardingNodes().get(shardingNode);
if (participantLogEntry.compareAddress(dn.getDbGroup().getWriteSource().getConfig().getIp(), dn.getDbGroup().getWriteSource().getConfig().getPort(), dn.getDatabase())) {
xaCmd.append(coordinatorLogEntry.getId().substring(0, coordinatorLogEntry.getId().length() - 1));
if (participantLogEntry.compareAddress(dn.getDbGroup().getWriteDbInstance().getConfig().getIp(), dn.getDbGroup().getWriteDbInstance().getConfig().getPort(), dn.getDatabase())) {
xaCmd.append(coordinatorLogEntry.getId(), 0, coordinatorLogEntry.getId().length() - 1);
xaCmd.append(".");
xaCmd.append(dn.getDatabase());
if (participantLogEntry.getExpires() != 0) {
@@ -491,7 +489,7 @@ public final class DbleServer {
}
xaCmd.append("'");
XARecoverHandler handler = new XARecoverHandler(needCommit, participantLogEntry);
handler.execute(xaCmd.toString(), dn.getDatabase(), dn.getDbGroup().getWriteSource());
handler.execute(xaCmd.toString(), dn.getDatabase(), dn.getDbGroup().getWriteDbInstance());
if (!handler.isSuccess()) {
throw new RuntimeException("Fail to recover xa when dble start, please check backend mysql.");
}
@@ -6,6 +6,7 @@
package com.actiontech.dble.backend;
import com.actiontech.dble.backend.mysql.nio.handler.ResponseHandler;
import com.actiontech.dble.backend.pool.PooledEntry;
import com.actiontech.dble.net.ClosableConnection;
import com.actiontech.dble.route.RouteResultsetNode;
import com.actiontech.dble.server.NonBlockingSession;
@@ -13,7 +14,8 @@ import com.actiontech.dble.server.ServerConnection;
import java.io.UnsupportedEncodingException;
public interface BackendConnection extends ClosableConnection {
public interface BackendConnection extends ClosableConnection, PooledEntry {
boolean isDDL();
boolean isFromSlaveDB();
@@ -26,9 +28,7 @@ public interface BackendConnection extends ClosableConnection {
void setAttachment(Object attachment);
void setLastTime(long currentTimeMillis);
void release();
void ping();
boolean setResponseHandler(ResponseHandler commandHandler);
@@ -42,9 +42,6 @@ public interface BackendConnection extends ClosableConnection {
Object getAttachment();
// long getThreadId();
void execute(RouteResultsetNode node, ServerConnection source,
boolean autocommit);
@@ -52,10 +49,6 @@ public interface BackendConnection extends ClosableConnection {
void rollback();
boolean isBorrowed();
void setBorrowed(boolean borrowed);
int getTxIsolation();
boolean isAutocommit();
@@ -1,114 +0,0 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.backend;
import com.actiontech.dble.DbleServer;
import com.actiontech.dble.backend.datasource.PhysicalDbInstance;
import com.actiontech.dble.backend.mysql.nio.MySQLConnection;
import com.actiontech.dble.net.NIOProcessor;
import com.actiontech.dble.util.StringUtil;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
public class ConMap {
// key--sharding
private final ConcurrentMap<String, ConQueue> items = new ConcurrentHashMap<>();
private static final String KEY_STRING_FOR_NULL_DATABASE = "KEY FOR NULL";
public ConQueue createAndGetSchemaConQueue(String schema) {
ConQueue queue = items.get(schema == null ? KEY_STRING_FOR_NULL_DATABASE : schema);
if (queue == null) {
ConQueue newQueue = new ConQueue();
queue = items.putIfAbsent(schema == null ? KEY_STRING_FOR_NULL_DATABASE : schema, newQueue);
return (queue == null) ? newQueue : queue;
}
return queue;
}
public ConQueue getSchemaConQueue(String schema) {
return items.get(schema == null ? KEY_STRING_FOR_NULL_DATABASE : schema);
}
public BackendConnection tryTakeCon(final String schema, boolean autoCommit) {
final ConQueue queue = items.get(schema == null ? KEY_STRING_FOR_NULL_DATABASE : schema);
BackendConnection con = null;
if (queue != null) {
con = tryTakeCon(queue, autoCommit);
}
if (con != null) {
return con;
} else {
for (ConQueue queue2 : items.values()) {
if (queue != queue2) {
con = tryTakeCon(queue2, autoCommit);
if (con != null) {
return con;
}
}
}
}
return null;
}
private BackendConnection tryTakeCon(ConQueue queue, boolean autoCommit) {
BackendConnection con;
if (queue != null && ((con = queue.takeIdleCon(autoCommit)) != null)) {
return con;
} else {
return null;
}
}
public Collection<ConQueue> getAllConQueue() {
return items.values();
}
public int getActiveCountForSchema(String schema, PhysicalDbInstance dbInstance) {
int total = 0;
for (NIOProcessor processor : DbleServer.getInstance().getBackendProcessors()) {
for (BackendConnection con : processor.getBackends().values()) {
if (con instanceof MySQLConnection) {
MySQLConnection mysqlCon = (MySQLConnection) con;
if (StringUtil.equals(mysqlCon.getSchema(), schema) &&
mysqlCon.getPool() == dbInstance &&
mysqlCon.isBorrowed()) {
total++;
}
}
}
}
return total;
}
public void clearConnections(String reason, PhysicalDbInstance dbInstance) {
for (NIOProcessor processor : DbleServer.getInstance().getBackendProcessors()) {
ConcurrentMap<Long, BackendConnection> map = processor.getBackends();
Iterator<Entry<Long, BackendConnection>> iterator = map.entrySet().iterator();
while (iterator.hasNext()) {
Entry<Long, BackendConnection> entry = iterator.next();
BackendConnection con = entry.getValue();
if (con instanceof MySQLConnection) {
if (((MySQLConnection) con).getPool() == dbInstance) {
con.close(reason);
iterator.remove();
}
}
}
}
items.clear();
}
}
@@ -1,100 +0,0 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.backend;
import java.util.ArrayList;
import java.util.concurrent.ConcurrentLinkedQueue;
public class ConQueue {
private final ConcurrentLinkedQueue<BackendConnection> autoCommitCons = new ConcurrentLinkedQueue<>();
private final ConcurrentLinkedQueue<BackendConnection> manCommitCons = new ConcurrentLinkedQueue<>();
private long executeCount;
public BackendConnection takeIdleCon(boolean autoCommit) {
ConcurrentLinkedQueue<BackendConnection> f1 = autoCommitCons;
ConcurrentLinkedQueue<BackendConnection> f2 = manCommitCons;
if (!autoCommit) {
f1 = manCommitCons;
f2 = autoCommitCons;
}
BackendConnection con = f1.poll();
if (con == null || con.isClosed()) {
con = f2.poll();
}
if (con == null || con.isClosed()) {
return null;
} else {
return con;
}
}
public long getExecuteCount() {
return executeCount;
}
public void incExecuteCount() {
this.executeCount++;
}
public void removeCon(BackendConnection con) {
if (!autoCommitCons.remove(con)) {
manCommitCons.remove(con);
}
}
public ConcurrentLinkedQueue<BackendConnection> getAutoCommitCons() {
return autoCommitCons;
}
public ConcurrentLinkedQueue<BackendConnection> getManCommitCons() {
return manCommitCons;
}
public ArrayList<BackendConnection> getIdleConsToClose(int count) {
ArrayList<BackendConnection> readyCloseCons = new ArrayList<>(count);
while (!manCommitCons.isEmpty() && readyCloseCons.size() < count) {
BackendConnection theCon = manCommitCons.poll();
if (theCon != null) {
readyCloseCons.add(theCon);
}
}
while (!autoCommitCons.isEmpty() && readyCloseCons.size() < count) {
BackendConnection theCon = autoCommitCons.poll();
if (theCon != null) {
readyCloseCons.add(theCon);
}
}
return readyCloseCons;
}
public ArrayList<BackendConnection> getIdleConsToClose() {
ArrayList<BackendConnection> readyCloseCons = new ArrayList<>(
autoCommitCons.size() + manCommitCons.size());
while (!manCommitCons.isEmpty()) {
BackendConnection theCon = manCommitCons.poll();
if (theCon != null) {
readyCloseCons.add(theCon);
}
}
while (!autoCommitCons.isEmpty()) {
BackendConnection theCon = autoCommitCons.poll();
if (theCon != null) {
readyCloseCons.add(theCon);
}
}
return readyCloseCons;
}
}
@@ -10,10 +10,8 @@ import com.actiontech.dble.alarm.AlarmCode;
import com.actiontech.dble.alarm.Alert;
import com.actiontech.dble.alarm.AlertUtil;
import com.actiontech.dble.backend.BackendConnection;
import com.actiontech.dble.backend.heartbeat.MySQLHeartbeat;
import com.actiontech.dble.backend.mysql.nio.MySQLConnection;
import com.actiontech.dble.backend.mysql.nio.MySQLInstance;
import com.actiontech.dble.backend.mysql.nio.handler.GetConnectionHandler;
import com.actiontech.dble.backend.mysql.nio.handler.ResponseHandler;
import com.actiontech.dble.cluster.zkprocess.parse.JsonProcessBase;
import com.actiontech.dble.cluster.zkprocess.zookeeper.process.DbInstanceStatus;
@@ -31,7 +29,6 @@ import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.lang.reflect.Type;
import java.util.*;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -40,8 +37,6 @@ public class PhysicalDbGroup {
public static final String JSON_NAME = "dbGroup";
public static final String JSON_LIST = "dbInstance";
private volatile PhysicalDbInstance writeSource;
private Map<String, PhysicalDbInstance> allSourceMap = new HashMap<>();
public static final int RW_SPLIT_OFF = 0;
private static final int RW_SPLIT_ALL_SLAVES = 1;
private static final int RW_SPLIT_ALL = 2;
@@ -49,26 +44,29 @@ public class PhysicalDbGroup {
public static final int WEIGHT = 0;
private final String groupName;
private final int rwSplitMode;
private final DbGroupConfig dbGroupConfig;
private final ThreadLocalRandom random = ThreadLocalRandom.current();
private volatile boolean initSuccess = false;
private volatile PhysicalDbInstance writeDbInstance;
private Map<String, PhysicalDbInstance> allSourceMap = new HashMap<>();
private final int rwSplitMode;
protected String[] schemas;
private final ThreadLocalRandom random = ThreadLocalRandom.current();
private final ReentrantReadWriteLock adjustLock = new ReentrantReadWriteLock();
public PhysicalDbGroup(String name, DbGroupConfig config, PhysicalDbInstance writeSource, PhysicalDbInstance[] readSources, int rwSplitMode) {
public PhysicalDbGroup(String name, DbGroupConfig config, PhysicalDbInstance writeDbInstances, PhysicalDbInstance[] readDbInstances, int rwSplitMode) {
this.groupName = name;
this.rwSplitMode = rwSplitMode;
this.dbGroupConfig = config;
this.writeSource = writeSource;
allSourceMap.put(writeSource.getName(), writeSource);
for (PhysicalDbInstance s : readSources) {
allSourceMap.put(s.getName(), s);
writeDbInstances.setDbGroup(this);
this.writeDbInstance = writeDbInstances;
allSourceMap.put(writeDbInstances.getName(), writeDbInstances);
for (PhysicalDbInstance readDbInstance : readDbInstances) {
readDbInstance.setDbGroup(this);
allSourceMap.put(readDbInstance.getName(), readDbInstance);
}
setDbInstanceProps();
}
public PhysicalDbGroup(PhysicalDbGroup org) {
@@ -79,18 +77,12 @@ public class PhysicalDbGroup {
for (Map.Entry<String, PhysicalDbInstance> entry : org.allSourceMap.entrySet()) {
MySQLInstance newSource = new MySQLInstance((MySQLInstance) entry.getValue());
allSourceMap.put(entry.getKey(), newSource);
if (entry.getValue() == org.writeSource) {
writeSource = newSource;
if (entry.getValue() == org.writeDbInstance) {
writeDbInstance = newSource;
}
}
}
public boolean isInitSuccess() {
return initSuccess;
}
public String[] getSchemas() {
return schemas;
}
@@ -114,8 +106,9 @@ public class PhysicalDbGroup {
PhysicalDbInstance findDbInstance(BackendConnection exitsCon) {
MySQLConnection con = (MySQLConnection) exitsCon;
PhysicalDbInstance source = allSourceMap.get(con.getPool().getName());
if (source != null && source == con.getPool()) {
PhysicalDbInstance source = con.getDbInstance();
PhysicalDbInstance target = allSourceMap.get(source.getName());
if (source == target) {
return source;
}
LOGGER.info("can't find connection in pool " + this.groupName + " con:" + exitsCon);
@@ -123,72 +116,31 @@ public class PhysicalDbGroup {
}
boolean isSlave(PhysicalDbInstance ds) {
return !(writeSource == ds);
return !(writeDbInstance == ds);
}
public PhysicalDbInstance getWriteSource() {
return writeSource;
public PhysicalDbInstance getWriteDbInstance() {
return writeDbInstance;
}
public boolean init() {
if (rwSplitMode != 0) {
for (Map.Entry<String, PhysicalDbInstance> entry : allSourceMap.entrySet()) {
if (initSource(entry.getValue())) {
initSuccess = true;
LOGGER.info(groupName + " " + entry.getKey() + " init success");
}
}
} else {
if (initSource(writeSource)) {
initSuccess = true;
LOGGER.info(groupName + " " + writeSource.getName() + " init success");
}
public void init() {
if (rwSplitMode == 0) {
writeDbInstance.init();
return;
}
if (!initSuccess) {
LOGGER.warn(groupName + " init failure");
}
return initSuccess;
}
public void doHeartbeat() {
for (PhysicalDbInstance source : allSourceMap.values()) {
if (source != null) {
source.doHeartbeat();
} else {
LOGGER.warn(groupName + " current dbInstance is null!");
}
for (Map.Entry<String, PhysicalDbInstance> entry : allSourceMap.entrySet()) {
entry.getValue().init();
}
}
public void heartbeatCheck(long ildCheckPeriod) {
for (PhysicalDbInstance ds : allSourceMap.values()) {
// only read node or all write node
// and current write node will check
if (ds != null && (ds.getHeartbeat().getStatus() == MySQLHeartbeat.OK_STATUS) &&
(ds.isReadInstance() || ds == this.getWriteSource())) {
ds.connectionHeatBeatCheck(ildCheckPeriod);
}
}
public void stop(String reason) {
stop(reason, false);
}
public void startHeartbeat() {
for (PhysicalDbInstance source : allSourceMap.values()) {
source.startHeartbeat();
}
}
public void stopHeartbeat() {
for (PhysicalDbInstance source : allSourceMap.values()) {
source.stopHeartbeat();
}
}
public void clearDbInstances(String reason) {
for (PhysicalDbInstance source : allSourceMap.values()) {
LOGGER.info("clear dbInstance of pool " + this.groupName + " ds:" + source.getConfig());
source.clearCons(reason);
source.stopHeartbeat();
public void stop(String reason, boolean closeFront) {
for (PhysicalDbInstance dbInstance : allSourceMap.values()) {
dbInstance.stop(reason, closeFront);
}
}
@@ -196,22 +148,20 @@ public class PhysicalDbGroup {
if (this.dbGroupConfig.getRwSplitMode() != RW_SPLIT_OFF) {
return allSourceMap.values();
} else {
return Collections.singletonList(writeSource);
return Collections.singletonList(writeDbInstance);
}
}
public Collection<PhysicalDbInstance> getAllDbInstances() {
return new LinkedList<>(allSourceMap.values());
}
public Map<String, PhysicalDbInstance> getAllDbInstanceMap() {
return allSourceMap;
}
void getRWSplistCon(String schema, boolean autocommit, ResponseHandler handler, Object attachment) throws Exception {
PhysicalDbInstance theNode = getRWSplistNode();
void getRWSplitCon(String schema, ResponseHandler handler, Object attachment) throws Exception {
PhysicalDbInstance theNode = getRWSplitNode();
if (theNode.isDisabled() || theNode.isFakeNode()) {
if (this.getAllActiveDbInstances().size() > 0) {
theNode = this.getAllActiveDbInstances().iterator().next();
@@ -235,10 +185,11 @@ public class PhysicalDbGroup {
AlertUtil.alert(AlarmCode.DB_INSTANCE_CAN_NOT_REACH, Alert.AlertLevel.WARN, heartbeatError, "mysql", theNode.getConfig().getId(), labels);
throw new IOException(heartbeatError);
}
theNode.getConnection(schema, autocommit, handler, attachment, false);
theNode.getConnection(schema, handler, attachment, false);
}
PhysicalDbInstance getRWSplistNode() {
PhysicalDbInstance getRWSplitNode() {
PhysicalDbInstance theNode;
ArrayList<PhysicalDbInstance> okSources;
switch (rwSplitMode) {
@@ -255,17 +206,17 @@ public class PhysicalDbGroup {
case RW_SPLIT_OFF:
default:
// return default primary dbInstance
theNode = this.getWriteSource();
theNode = this.writeDbInstance;
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("select read dbInstance " + theNode.getName() + " for dbGroup:" + this.getGroupName());
}
theNode.setReadCount();
theNode.incrementReadCount();
return theNode;
}
PhysicalDbInstance getRandomAliveReadNode() throws Exception {
PhysicalDbInstance getRandomAliveReadNode() {
if (rwSplitMode == RW_SPLIT_OFF) {
return null;
} else {
@@ -273,23 +224,22 @@ public class PhysicalDbGroup {
}
}
boolean getReadCon(String schema, boolean autocommit, ResponseHandler handler, Object attachment) throws
Exception {
boolean getReadCon(String schema, ResponseHandler handler, Object attachment) throws Exception {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("!readSources.isEmpty() " + (allSourceMap.values().size() > 1));
}
if (allSourceMap.values().size() > 1) {
PhysicalDbInstance theNode = getRandomAliveReadNode();
if (theNode != null) {
theNode.setReadCount();
theNode.getConnection(schema, autocommit, handler, attachment, false);
theNode.incrementReadCount();
theNode.getConnection(schema, handler, attachment, false);
return true;
} else {
LOGGER.info("read host is not available.");
return false;
}
} else {
LOGGER.info("read host is empty, readSources is empty.");
LOGGER.info("read host is empty, read dbInstance is empty.");
return false;
}
}
@@ -298,7 +248,7 @@ public class PhysicalDbGroup {
PhysicalDbInstance[] readSources = new PhysicalDbInstance[allSourceMap.size() - 1];
int i = 0;
for (PhysicalDbInstance source : allSourceMap.values()) {
if (source.getName().equals(writeSource.getName())) {
if (source.getName().equals(writeDbInstance.getName())) {
continue;
}
readSources[i++] = source;
@@ -306,87 +256,16 @@ public class PhysicalDbGroup {
return readSources;
}
private void setDbInstanceProps() {
for (PhysicalDbInstance ds : this.allSourceMap.values()) {
ds.setDbGroup(this);
}
}
private boolean initSource(PhysicalDbInstance ds) {
if (ds.getConfig().isDisabled() || ds.isFakeNode()) {
LOGGER.info(ds.getConfig().getInstanceName() + " is disabled or fakeNode, skipped");
return true;
}
int initSize = ds.getConfig().getMinCon();
if (initSize < this.schemas.length + 1) {
initSize = this.schemas.length + 1;
LOGGER.warn("minCon size is less than (the count of schema +1), so dble will create at least 1 conn for every schema and an empty schema conn, " +
"minCon size before:{}, now:{}", ds.getConfig().getMinCon(), initSize);
ds.getConfig().setMinCon(initSize);
}
if (ds.getConfig().getMaxCon() < initSize) {
ds.getConfig().setMaxCon(initSize);
ds.setSize(initSize);
LOGGER.warn("maxCon is less than the initSize of dbInstance:" + initSize + " change the maxCon into " + initSize);
}
LOGGER.info("init backend mysql source ,create connections total " + initSize + " for " + ds.getName());
CopyOnWriteArrayList<BackendConnection> list = new CopyOnWriteArrayList<>();
GetConnectionHandler getConHandler = new GetConnectionHandler(list, initSize);
// long start = System.currentTimeMillis();
// long timeOut = start + 5000 * 1000L;
boolean hasConnectionInPool = false;
try {
if (ds.getTotalConCount() <= 0) {
ds.initMinConnection(null, true, getConHandler, null);
} else {
LOGGER.info("connection with null schema has been created,because we tested the connection of the dbInstance at first");
getConHandler.initIncrement();
hasConnectionInPool = true;
}
} catch (Exception e) {
LOGGER.warn("init connection with schema null error", e);
}
for (int i = 0; i < initSize - 1; i++) {
try {
ds.initMinConnection(this.schemas[i % schemas.length], true, getConHandler, null);
} catch (Exception e) {
LOGGER.warn(ds.getName() + " init connection error.", e);
}
}
long timeOut = System.currentTimeMillis() + 60 * 1000;
// waiting for finish
while (!getConHandler.finished() && (System.currentTimeMillis() < timeOut)) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
/*
* hardly triggered no error is needed
*/
LOGGER.info("initError", e);
}
}
LOGGER.info("init result :" + getConHandler.getStatusInfo());
return !list.isEmpty() || hasConnectionInPool;
}
private ArrayList<PhysicalDbInstance> getAllActiveRWSources(boolean includeWriteNode, boolean filterWithDelayThreshold) {
ArrayList<PhysicalDbInstance> okSources = new ArrayList<>(allSourceMap.values().size());
if (writeSource.isAlive() && includeWriteNode) {
okSources.add(writeSource);
if (writeDbInstance.isAlive() && includeWriteNode) {
okSources.add(writeDbInstance);
}
for (PhysicalDbInstance ds : allSourceMap.values()) {
if (ds == writeSource) {
if (ds == writeDbInstance) {
continue;
}
if (ds.isAlive() && (!filterWithDelayThreshold || canSelectAsReadNode(ds))) {
if (ds.isAlive() && (!filterWithDelayThreshold || ds.canSelectAsReadNode())) {
okSources.add(ds);
}
}
@@ -401,21 +280,11 @@ public class PhysicalDbGroup {
lock.readLock().lock();
adjustLock.writeLock().lock();
try {
HaConfigManager.getInstance().updateDbGroupConf(createDisableSnapshot(this, nameList), syncWriteConf);
for (String dsName : nameList) {
PhysicalDbInstance dbInstance = allSourceMap.get(dsName);
if (dbInstance.setDisabled(true)) {
//clear old resource
dbInstance.clearCons("ha command disable dbInstance");
dbInstance.stopHeartbeat();
}
allSourceMap.get(dsName).disable("ha command disable dbInstance");
}
return this.getClusterHaJson();
} catch (Exception e) {
e.printStackTrace();
throw e;
} finally {
lock.readLock().unlock();
adjustLock.writeLock().unlock();
@@ -442,15 +311,9 @@ public class PhysicalDbGroup {
HaConfigManager.getInstance().updateDbGroupConf(createEnableSnapshot(this, nameList), syncWriteConf);
for (String dsName : nameList) {
PhysicalDbInstance dbInstance = allSourceMap.get(dsName);
if (dbInstance.setDisabled(false)) {
dbInstance.startHeartbeat();
}
allSourceMap.get(dsName).enable();
}
return this.getClusterHaJson();
} catch (Exception e) {
LOGGER.warn("enableHosts Exception ", e);
throw e;
} finally {
lock.readLock().unlock();
adjustLock.writeLock().unlock();
@@ -474,9 +337,9 @@ public class PhysicalDbGroup {
HaConfigManager.getInstance().updateDbGroupConf(createSwitchSnapshot(writeHost), syncWriteConf);
PhysicalDbInstance newWriteHost = allSourceMap.get(writeHost);
writeSource.setReadInstance(true);
writeDbInstance.setReadInstance(true);
//close all old master connection ,so that new write query would not put into the old writeHost
writeSource.clearCons("ha command switch dbInstance");
writeDbInstance.closeAllConnection("ha command switch dbInstance");
if (!newWriteHost.isDisabled()) {
GetAndSyncDbInstanceKeyVariables task = new GetAndSyncDbInstanceKeyVariables(newWriteHost, true);
KeyVariables variables = task.call();
@@ -488,7 +351,7 @@ public class PhysicalDbGroup {
}
}
newWriteHost.setReadInstance(false);
writeSource = newWriteHost;
writeDbInstance = newWriteHost;
return this.getClusterHaJson();
} catch (Exception e) {
LOGGER.warn("switchMaster Exception ", e);
@@ -502,9 +365,9 @@ public class PhysicalDbGroup {
private PhysicalDbGroup createSwitchSnapshot(String writeHost) {
PhysicalDbGroup snapshot = new PhysicalDbGroup(this);
PhysicalDbInstance newWriteHost = snapshot.allSourceMap.get(writeHost);
snapshot.writeSource.setReadInstance(true);
snapshot.writeDbInstance.setReadInstance(true);
newWriteHost.setReadInstance(false);
snapshot.writeSource = newWriteHost;
snapshot.writeDbInstance = newWriteHost;
return snapshot;
}
@@ -520,24 +383,20 @@ public class PhysicalDbGroup {
}.getType();
List<DbInstanceStatus> list = base.toBeanformJson(jsonObj.get(JSON_LIST).toString(), parseType);
for (DbInstanceStatus status : list) {
PhysicalDbInstance phys = allSourceMap.get(status.getName());
if (phys != null) {
if (phys.setDisabled(status.isDisable())) {
if (status.isDisable()) {
//clear old resource
phys.clearCons("ha command disable dbInstance");
phys.stopHeartbeat();
} else {
//change dbInstance from disable to enable ,start heartbeat
phys.startHeartbeat();
}
PhysicalDbInstance dbInstance = allSourceMap.get(status.getName());
if (dbInstance != null) {
if (status.isDisable()) {
//clear old resource
dbInstance.disable("ha command disable dbInstance");
} else {
//change dbInstance from disable to enable ,start heartbeat
dbInstance.enable();
}
if (status.isPrimary() &&
phys != writeSource) {
writeSource.setReadInstance(true);
writeSource.clearCons("ha command switch dbInstance");
phys.setReadInstance(false);
writeSource = phys;
if (status.isPrimary() && dbInstance != writeDbInstance) {
writeDbInstance.setReadInstance(true);
writeDbInstance.closeAllConnection("ha command switch dbInstance");
dbInstance.setReadInstance(false);
writeDbInstance = dbInstance;
}
} else {
LOGGER.warn("Can match dbInstance " + status.getName() + ".Check for the config file please");
@@ -591,7 +450,7 @@ public class PhysicalDbGroup {
public PhysicalDbInstance randomSelect(ArrayList<PhysicalDbInstance> okSources, boolean useWriteWhenEmpty) {
if (okSources.isEmpty()) {
if (useWriteWhenEmpty) {
return this.getWriteSource();
return writeDbInstance;
} else {
return null;
}
@@ -621,27 +480,14 @@ public class PhysicalDbGroup {
}
}
private boolean checkSlaveSynStatus() {
return (dbGroupConfig.getDelayThreshold() != -1) &&
(dbGroupConfig.isShowSlaveSql());
}
private boolean canSelectAsReadNode(PhysicalDbInstance theSource) {
Integer slaveBehindMaster = theSource.getHeartbeat().getSlaveBehindMaster();
int dbSynStatus = theSource.getHeartbeat().getDbSynStatus();
if (slaveBehindMaster == null || dbSynStatus == MySQLHeartbeat.DB_SYN_ERROR) {
return false;
}
boolean isSync = dbSynStatus == MySQLHeartbeat.DB_SYN_NORMAL;
boolean isNotDelay = slaveBehindMaster < this.dbGroupConfig.getDelayThreshold();
return isSync && isNotDelay;
}
boolean equalsBaseInfo(PhysicalDbGroup pool) {
return pool.getDbGroupConfig().getName().equals(this.dbGroupConfig.getName()) &&
pool.getDbGroupConfig().getHearbeatSQL().equals(this.dbGroupConfig.getHearbeatSQL()) &&
pool.getDbGroupConfig().getHeartbeatSQL().equals(this.dbGroupConfig.getHeartbeatSQL()) &&
pool.getDbGroupConfig().getHeartbeatTimeout() == this.dbGroupConfig.getHeartbeatTimeout() &&
pool.getDbGroupConfig().getErrorRetryCount() == this.dbGroupConfig.getErrorRetryCount() &&
pool.getDbGroupConfig().getRwSplitMode() == this.dbGroupConfig.getRwSplitMode() &&
@@ -19,14 +19,8 @@ public class PhysicalDbGroupDiff {
public static final String CHANGE_TYPE_NO = "NO_CHANGE";
private String changeType = null;
private PhysicalDbGroup orgPool = null;
private PhysicalDbGroup newPool = null;
//private Set<BaseInfoDiff> baseDiff = null;
private PhysicalDbGroup orgPool;
private PhysicalDbGroup newPool;
public PhysicalDbGroupDiff(PhysicalDbGroup newPool, PhysicalDbGroup orgPool) {
this.orgPool = orgPool;
@@ -49,15 +43,14 @@ public class PhysicalDbGroupDiff {
}
}
private Set<PhysicalDbInstanceDiff> createHostChangeSet(PhysicalDbGroup newDbGroup, PhysicalDbGroup orgDbGroup) {
Set<PhysicalDbInstanceDiff> hostDiff = new HashSet<>();
//add or not change
PhysicalDbInstance newWriteHost = newDbGroup.getWriteSource();
PhysicalDbInstance newWriteHost = newDbGroup.getWriteDbInstance();
PhysicalDbInstance[] newReadHost = newDbGroup.getReadSources();
PhysicalDbInstance oldHost = orgDbGroup.getWriteSource();
PhysicalDbInstance oldHost = orgDbGroup.getWriteDbInstance();
PhysicalDbInstance[] oldRHost = orgDbGroup.getReadSources();
boolean sameFlag = false;
@@ -78,7 +71,6 @@ public class PhysicalDbGroupDiff {
return hostDiff;
}
private boolean calculateForDbInstances(PhysicalDbInstance[] olds, PhysicalDbInstance[] news) {
if (olds != null) {
for (int k = 0; k < olds.length; k++) {
@@ -92,7 +84,6 @@ public class PhysicalDbGroupDiff {
return true;
}
public String getChangeType() {
return changeType;
}
@@ -6,79 +6,193 @@
package com.actiontech.dble.backend.datasource;
import com.actiontech.dble.DbleServer;
import com.actiontech.dble.alarm.AlarmCode;
import com.actiontech.dble.alarm.Alert;
import com.actiontech.dble.alarm.AlertUtil;
import com.actiontech.dble.alarm.ToResolveContainer;
import com.actiontech.dble.backend.BackendConnection;
import com.actiontech.dble.backend.ConMap;
import com.actiontech.dble.backend.ConQueue;
import com.actiontech.dble.backend.heartbeat.MySQLHeartbeat;
import com.actiontech.dble.backend.mysql.nio.handler.ConnectionHeartBeatHandler;
import com.actiontech.dble.backend.mysql.nio.handler.DelegateResponseHandler;
import com.actiontech.dble.backend.mysql.nio.handler.NewConnectionRespHandler;
import com.actiontech.dble.backend.mysql.nio.handler.ResponseHandler;
import com.actiontech.dble.backend.pool.ConnectionPool;
import com.actiontech.dble.backend.pool.PooledEntry;
import com.actiontech.dble.config.model.SystemConfig;
import com.actiontech.dble.config.model.db.DbGroupConfig;
import com.actiontech.dble.config.model.db.DbInstanceConfig;
import com.actiontech.dble.singleton.Scheduler;
import com.actiontech.dble.util.StringUtil;
import com.actiontech.dble.util.TimeUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.LongAdder;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
public abstract class PhysicalDbInstance {
private static final Logger LOGGER = LoggerFactory.getLogger(PhysicalDbInstance.class);
private final String name;
private int size;
private final DbInstanceConfig config;
private final ConMap conMap = new ConMap();
private MySQLHeartbeat heartbeat;
private volatile boolean readInstance;
private volatile long heartbeatRecoveryTime;
private final DbGroupConfig dbGroupConfig;
private PhysicalDbGroup dbGroup;
private final AtomicInteger connectionCount;
private volatile AtomicBoolean disabled;
private volatile boolean autocommitSynced = false;
private volatile boolean isolationSynced = false;
private final AtomicBoolean disabled;
private String dsVersion;
private volatile boolean autocommitSynced;
private volatile boolean isolationSynced;
private volatile boolean testConnSuccess = false;
private volatile boolean readOnly = false;
private volatile boolean fakeNode = false;
private AtomicLong readCount = new AtomicLong(0);
private AtomicLong writeCount = new AtomicLong(0);
private String dsVersion;
private final LongAdder readCount = new LongAdder();
private final LongAdder writeCount = new LongAdder();
private final AtomicBoolean isInitial = new AtomicBoolean(false);
// connection pool
private ConnectionPool connectionPool;
protected MySQLHeartbeat heartbeat;
private volatile long heartbeatRecoveryTime;
public PhysicalDbInstance(DbInstanceConfig config, DbGroupConfig dbGroupConfig, boolean isReadNode) {
this.size = config.getMaxCon();
this.config = config;
this.name = config.getInstanceName();
this.dbGroupConfig = dbGroupConfig;
heartbeat = this.createHeartBeat();
this.heartbeat = new MySQLHeartbeat(this);
this.readInstance = isReadNode;
this.connectionCount = new AtomicInteger();
this.disabled = new AtomicBoolean(config.isDisabled());
this.connectionPool = new ConnectionPool(config, this);
}
public PhysicalDbInstance(PhysicalDbInstance org) {
this.size = org.size;
this.config = org.config;
this.name = org.name;
this.dbGroupConfig = org.dbGroupConfig;
this.readInstance = org.readInstance;
this.connectionCount = org.connectionCount;
this.disabled = new AtomicBoolean(org.disabled.get());
}
public void init() {
if (disabled.get() || fakeNode) {
LOGGER.info("{} is disabled or a fakeNode, skip initialization", name);
return;
}
if (!isInitial.compareAndSet(false, true)) {
LOGGER.info("{} has been initialized, skip", name);
return;
}
int size = config.getMinCon();
String[] physicalSchemas = dbGroup.getSchemas();
int initSize = physicalSchemas.length + 1;
if (size < initSize) {
LOGGER.warn("For db instance[{}], minIdle is less than (the count of schema +1), so dble will create at least 1 conn for every schema and empty schema, " +
"minCon size before:{}, now:{}", new Object[]{name, size, initSize});
config.setMinCon(initSize);
}
size = config.getMaxCon();
if (size < initSize) {
LOGGER.warn("For db instance[{}], maxTotal[{}] is less than the initSize of dataHost,change the maxCon into {}", new Object[]{name, size, initSize});
config.setMaxCon(initSize);
}
this.connectionPool.startEvictor();
startHeartbeat();
}
public void createConnectionSkipPool(String schema, ResponseHandler handler) {
connectionPool.newConnection(schema, handler);
}
public void getConnection(String schema, final ResponseHandler handler,
final Object attachment, boolean mustWrite) throws IOException {
if (mustWrite && readInstance) {
throw new IOException("primary dbInstance switched");
}
DbleServer.getInstance().getComplexQueryExecutor().execute(new Runnable() {
@Override
public void run() {
BackendConnection con = null;
try {
con = getConnection(schema, config.getPoolConfig().getConnectionTimeout());
} catch (IOException e) {
handler.connectionError(e, con);
return;
}
con.setAttachment(attachment);
handler.connectionAcquired(con);
}
});
}
// execute in complex executor guard by business executor
public BackendConnection getConnection(String schema, final Object attachment) throws IOException {
BackendConnection con = getConnection(schema, config.getPoolConfig().getConnectionTimeout());
con.setAttachment(attachment);
return con;
}
public BackendConnection getConnection(final String schema, final long hardTimeout) throws IOException {
if (this.connectionPool == null) {
throw new IOException("connection pool isn't initalized");
}
if (disabled.get()) {
throw new IOException("the dbInstance[" + name + "] is disabled.");
}
final long startTime = System.currentTimeMillis();
try {
long timeout = hardTimeout;
do {
final BackendConnection conn = this.connectionPool.borrow(schema, timeout, MILLISECONDS);
if (conn == null) {
break; // We timed out... break and throw exception
}
final long now = System.currentTimeMillis();
if (config.getPoolConfig().getTestOnBorrow()) {
ConnectionHeartBeatHandler heartBeatHandler = new ConnectionHeartBeatHandler(conn, true, connectionPool);
boolean isFinished = heartBeatHandler.ping(config.getPoolConfig().getConnectionHeartbeatTimeout());
if (!isFinished) {
conn.close("connection test fail after create"); // Throw away the dead connection (passed max age or failed alive test)
timeout = hardTimeout - (now - startTime);
continue;
}
}
if (!StringUtil.equals(conn.getSchema(), schema)) {
// need do sharding syn in before sql send
conn.setSchema(schema);
}
return conn;
} while (timeout > 0L);
} catch (InterruptedException e) {
throw new IOException(name + " - Interrupted during connection acquisition", e);
}
throw new IOException(name + " - Connection is not available, request timed out after " + (System.currentTimeMillis() - startTime) + "ms.");
}
public void release(BackendConnection connection) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("release {}", connection);
}
connectionPool.release(connection);
}
public void close(BackendConnection connection) {
connectionPool.close(connection);
}
public void setTestConnSuccess(boolean testConnSuccess) {
this.testConnSuccess = testConnSuccess;
}
@@ -95,20 +209,27 @@ public abstract class PhysicalDbInstance {
this.readOnly = readOnly;
}
public long getReadCount() {
return readCount.get();
public long getHeartbeatRecoveryTime() {
return heartbeatRecoveryTime;
}
void setReadCount() {
readCount.addAndGet(1);
public void setHeartbeatRecoveryTime(long heartbeatRecoveryTime) {
this.heartbeatRecoveryTime = heartbeatRecoveryTime;
}
public long getWriteCount() {
return writeCount.get();
public long getCount(boolean isRead) {
if (isRead) {
return readCount.longValue();
}
return writeCount.longValue();
}
void setWriteCount() {
writeCount.addAndGet(1);
public void incrementReadCount() {
readCount.increment();
}
public void incrementWriteCount() {
writeCount.increment();
}
public DbGroupConfig getDbGroupConfig() {
@@ -132,10 +253,6 @@ public abstract class PhysicalDbInstance {
this.readInstance = value;
}
public int getSize() {
return size;
}
public void setDbGroup(PhysicalDbGroup dbGroup) {
this.dbGroup = dbGroup;
}
@@ -144,8 +261,6 @@ public abstract class PhysicalDbInstance {
return dbGroup;
}
public abstract MySQLHeartbeat createHeartBeat();
public boolean isAutocommitSynced() {
return autocommitSynced;
}
@@ -162,10 +277,6 @@ public abstract class PhysicalDbInstance {
this.isolationSynced = isolationSynced;
}
public void setSize(int size) {
this.size = size;
}
public String getDsVersion() {
return dsVersion;
}
@@ -178,46 +289,10 @@ public abstract class PhysicalDbInstance {
return name;
}
public long getExecuteCount() {
long executeCount = 0;
for (ConQueue queue : conMap.getAllConQueue()) {
executeCount += queue.getExecuteCount();
}
return executeCount;
}
public long getExecuteCountForSchema(String schema) {
ConQueue queue = conMap.getSchemaConQueue(schema);
return queue == null ? 0 : queue.getExecuteCount();
}
public int getActiveCountForSchema(String schema) {
return conMap.getActiveCountForSchema(schema, this);
}
public int getIdleCountForSchema(String schema) {
ConQueue queue = conMap.getSchemaConQueue(schema);
if (queue == null) {
return 0;
} else {
return queue.getAutoCommitCons().size() + queue.getManCommitCons().size();
}
}
public MySQLHeartbeat getHeartbeat() {
return heartbeat;
}
public int getIdleCount() {
int total = 0;
for (ConQueue queue : conMap.getAllConQueue()) {
total += queue.getAutoCommitCons().size() + queue.getManCommitCons().size();
}
return total;
}
public boolean isSalveOrRead() {
if (dbGroup != null) {
return dbGroup.isSlave(this) || this.readInstance;
@@ -226,414 +301,127 @@ public abstract class PhysicalDbInstance {
}
}
void connectionHeatBeatCheck(long conHeartBeatPeriod) {
long hearBeatTime = TimeUtil.currentTimeMillis() - conHeartBeatPeriod;
for (ConQueue queue : conMap.getAllConQueue()) {
longIdleHeartBeat(queue.getAutoCommitCons(), hearBeatTime);
longIdleHeartBeat(queue.getManCommitCons(), hearBeatTime);
}
//the following is about the idle connection number control
int idleCons = getIdleCount();
int totalCount = this.getTotalConCount();
int createCount = (config.getMinCon() - idleCons) / 3;
// create if idle too little
if ((createCount > 0) && totalCount < size) {
createByIdleLittle(idleCons, createCount);
} else if (idleCons > config.getMinCon()) {
closeByIdleMany(idleCons - config.getMinCon(), idleCons);
}
}
/**
* check if the connection is not be used for a while & do connection heart beat
*
* @param linkedQueue
* @param hearBeatTime
*/
private void longIdleHeartBeat(ConcurrentLinkedQueue<BackendConnection> linkedQueue, long hearBeatTime) {
long length = linkedQueue.size();
for (int i = 0; i < length; i++) {
BackendConnection con = linkedQueue.poll();
if (con == null) {
break;
} else if (con.isClosed()) {
continue;
} else if (con.getLastTime() < hearBeatTime) { //if the connection is idle for a long time
con.setBorrowed(true);
new ConnectionHeartBeatHandler().doHeartBeat(con);
} else {
linkedQueue.offer(con);
break;
}
}
}
private void closeByIdleMany(int idleCloseCount, int idleCons) {
LOGGER.info("too many ilde cons ,close some for datasouce " + name + " want close :" + idleCloseCount + " total idle " + idleCons);
List<BackendConnection> readyCloseCons = new ArrayList<BackendConnection>(idleCloseCount);
for (ConQueue queue : conMap.getAllConQueue()) {
int closeNumber = (queue.getManCommitCons().size() + queue.getAutoCommitCons().size()) * idleCloseCount / idleCons;
readyCloseCons.addAll(queue.getIdleConsToClose(closeNumber));
}
for (BackendConnection idleCon : readyCloseCons) {
if (idleCon.isBorrowed()) {
LOGGER.info("find idle con is using " + idleCon);
}
idleCon.close("too many idle con");
}
}
private void createByIdleLittle(int idleCons, int createCount) {
LOGGER.info("create connections ,because idle connection not enough ,cur is " +
idleCons + ", minCon is " + this.getConfig().getMinCon() + " for " + name);
final String[] schemas = dbGroup.getSchemas();
for (int i = 0; i < createCount; i++) {
NewConnectionRespHandler simpleHandler = new NewConnectionRespHandler();
try {
if (!disabled.get() && this.createNewCount()) {
// creat new connection
this.createNewConnection(simpleHandler, null, schemas[i % schemas.length], false);
simpleHandler.getBackConn().release();
} else {
break;
}
if (ToResolveContainer.CREATE_CONN_FAIL.contains(this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName())) {
Map<String, String> labels = AlertUtil.genSingleLabel("dbInstance", this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName());
AlertUtil.alertResolve(AlarmCode.CREATE_CONN_FAIL, Alert.AlertLevel.WARN, "mysql", this.getConfig().getId(),
labels, ToResolveContainer.CREATE_CONN_FAIL, this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName());
}
} catch (IOException e) {
String errMsg = "create connection err:";
LOGGER.warn(errMsg, e);
Map<String, String> labels = AlertUtil.genSingleLabel("dbInstance", this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName());
AlertUtil.alert(AlarmCode.CREATE_CONN_FAIL, Alert.AlertLevel.WARN, errMsg + e.getMessage(), "mysql", this.getConfig().getId(), labels);
ToResolveContainer.CREATE_CONN_FAIL.add(this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName());
}
}
}
public int getTotalConCount() {
return this.connectionCount.get();
}
private boolean createNewCount() {
int result = this.connectionCount.incrementAndGet();
if (result > size) {
this.connectionCount.decrementAndGet();
return false;
}
return true;
}
public void clearCons(String reason) {
this.conMap.clearConnections(reason, this);
}
void startHeartbeat() {
if (!this.isDisabled() && !this.isFakeNode()) {
heartbeat.start();
heartbeat.heartbeat();
}
}
void stopHeartbeat() {
heartbeat.stop();
}
void doHeartbeat() {
if (TimeUtil.currentTimeMillis() < heartbeatRecoveryTime) {
return;
}
if (!this.isDisabled() && !this.isFakeNode()) {
heartbeat.heartbeat();
}
}
private BackendConnection takeCon(BackendConnection conn, String schema) {
conn.setBorrowed(true);
if (!StringUtil.equals(conn.getSchema(), schema)) {
// need do sharding syn in before sql send
conn.setSchema(schema);
}
if (schema != null) {
ConQueue queue = conMap.createAndGetSchemaConQueue(schema);
queue.incExecuteCount();
}
// update last time, the schedule job will not close it
conn.setLastTime(System.currentTimeMillis());
return conn;
}
private void takeCon(BackendConnection conn,
final ResponseHandler handler, final Object attachment,
String schema) {
if (ToResolveContainer.CREATE_CONN_FAIL.contains(this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName())) {
Map<String, String> labels = AlertUtil.genSingleLabel("dbInstance", this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName());
AlertUtil.alertResolve(AlarmCode.CREATE_CONN_FAIL, Alert.AlertLevel.WARN, "mysql", this.getConfig().getId(), labels,
ToResolveContainer.CREATE_CONN_FAIL, this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName());
}
takeCon(conn, schema);
conn.setAttachment(attachment);
handler.connectionAcquired(conn);
}
private void createNewConnection(final ResponseHandler handler, final Object attachment,
final String schema, final boolean mustWrite) {
// aysn create connection
DbleServer.getInstance().getComplexQueryExecutor().execute(new Runnable() {
public void run() {
try {
createNewConnection(new DelegateResponseHandler(handler) {
@Override
public void connectionError(Throwable e, BackendConnection conn) {
Map<String, String> labels = AlertUtil.genSingleLabel("dbInstance", dbGroupConfig.getName() + "-" + config.getInstanceName());
AlertUtil.alert(AlarmCode.CREATE_CONN_FAIL, Alert.AlertLevel.WARN, "createNewConn Error" + e.getMessage(), "mysql", config.getId(), labels);
ToResolveContainer.CREATE_CONN_FAIL.add(dbGroupConfig.getName() + "-" + config.getInstanceName());
handler.connectionError(e, conn);
}
@Override
public void connectionAcquired(BackendConnection conn) {
if (disabled.get()) {
handler.connectionError(new IOException("dbInstance disabled"), conn);
conn.close("disabled dbInstance");
} else if (mustWrite && isReadInstance()) {
handler.connectionError(new IOException("primary dbInstance switched"), conn);
} else {
takeCon(conn, handler, attachment, schema);
}
}
}, schema);
} catch (IOException e) {
handler.connectionError(e, null);
}
}
});
}
protected abstract void createNewConnection(ResponseHandler handler, String schema) throws IOException;
public void getNewConnection(String schema, final ResponseHandler handler,
final Object attachment, boolean mustWrite, boolean forceCreate) throws IOException {
if (disabled.get()) {
throw new IOException("the dbInstance is disabled [" + this.name + "]");
} else if (!this.createNewCount()) {
if (forceCreate) {
this.connectionCount.incrementAndGet();
LOGGER.warn("connection pool [" + dbGroupConfig.getName() + "." + this.name + "] has reached maxCon, but we still try to create new connection for important task");
createNewConnection(handler, attachment, schema, mustWrite);
} else {
String maxConError = "the max active Connections size can not be max than maxCon for dbInstance[" + this.getDbGroupConfig().getName() + "." + this.getName() + "]";
LOGGER.warn(maxConError);
Map<String, String> labels = AlertUtil.genSingleLabel("dbInstance", this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName());
AlertUtil.alert(AlarmCode.REACH_MAX_CON, Alert.AlertLevel.WARN, maxConError, "dble", this.getConfig().getId(), labels);
ToResolveContainer.REACH_MAX_CON.add(this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName());
throw new IOException(maxConError);
}
} else { // create connection
if (ToResolveContainer.REACH_MAX_CON.contains(this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName())) {
Map<String, String> labels = AlertUtil.genSingleLabel("dbInstance", this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName());
AlertUtil.alertResolve(AlarmCode.REACH_MAX_CON, Alert.AlertLevel.WARN, "dble", this.getConfig().getId(), labels,
ToResolveContainer.REACH_MAX_CON, this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName());
}
LOGGER.info("no idle connection in pool [" + dbGroupConfig.getName() + "." + this.name + "],create new connection for schema: " + schema);
createNewConnection(handler, attachment, schema, mustWrite);
}
}
public void getConnection(String schema, boolean autocommit, final ResponseHandler handler,
final Object attachment, boolean mustWrite) throws IOException {
BackendConnection con = this.conMap.tryTakeCon(schema, autocommit);
if (con != null) {
takeCon(con, handler, attachment, schema);
} else {
getNewConnection(schema, handler, attachment, mustWrite, false);
}
}
public BackendConnection getConnection(String schema, boolean autocommit, final Object attachment) throws IOException {
BackendConnection con = this.conMap.tryTakeCon(schema, autocommit);
if (con == null) {
if (disabled.get()) {
throw new IOException("the dbInstance is disabled [" + this.name + "]");
} else if (!this.createNewCount()) {
String maxConError = "the max active Connections size can not be max than maxCon for dbInstance[" + this.getDbGroupConfig().getName() + "." + this.getName() + "]";
LOGGER.warn(maxConError);
Map<String, String> labels = AlertUtil.genSingleLabel("dbInstance", this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName());
AlertUtil.alert(AlarmCode.REACH_MAX_CON, Alert.AlertLevel.WARN, maxConError, "dble", this.getConfig().getId(), labels);
ToResolveContainer.REACH_MAX_CON.add(this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName());
throw new IOException(maxConError);
} else { // create connection
if (ToResolveContainer.REACH_MAX_CON.contains(this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName())) {
Map<String, String> labels = AlertUtil.genSingleLabel("dbInstance", this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName());
AlertUtil.alertResolve(AlarmCode.REACH_MAX_CON, Alert.AlertLevel.WARN, "dble", this.getConfig().getId(), labels,
ToResolveContainer.REACH_MAX_CON, this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName());
}
LOGGER.info("no ilde connection in pool,create new connection for " + this.name + " of schema " + schema);
con = createNewBackendConnection(schema);
}
}
con = takeCon(con, schema);
con.setAttachment(attachment);
return con;
}
public BackendConnection getConnectionForHeartbeat(String schema) throws IOException {
BackendConnection con;
if (!disabled.get()) {
if (!this.createNewCount()) {
ConQueue queue = conMap.getSchemaConQueue(null);
BackendConnection conIdle = queue.takeIdleCon(true);
this.connectionCount.incrementAndGet();
if (conIdle != null) {
conIdle.close("create new connection for heartbeat, so close an old idle con");
} else {
LOGGER.warn("now connection in pool and reached maxCon, but still try to create new connection for heartbeat ");
}
con = createNewBackendConnection(schema);
} else { // create connection
LOGGER.info("create new connection for heartbeat ");
con = createNewBackendConnection(schema);
}
} else {
return null;
}
con = takeCon(con, schema);
con.setAttachment(null);
return con;
}
private BackendConnection createNewBackendConnection(String schema) throws IOException {
BackendConnection con;
try {
NewConnectionRespHandler simpleHandler = new NewConnectionRespHandler();
this.createNewConnection(simpleHandler, schema);
con = simpleHandler.getBackConn();
if (ToResolveContainer.CREATE_CONN_FAIL.contains(this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName())) {
Map<String, String> labels = AlertUtil.genSingleLabel("dbInstance", this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName());
AlertUtil.alertResolve(AlarmCode.CREATE_CONN_FAIL, Alert.AlertLevel.WARN, "mysql", this.getConfig().getId(), labels,
ToResolveContainer.CREATE_CONN_FAIL, this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName());
}
} catch (IOException e) {
Map<String, String> labels = AlertUtil.genSingleLabel("dbInstance", this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName());
AlertUtil.alert(AlarmCode.CREATE_CONN_FAIL, Alert.AlertLevel.WARN, "createNewConn Error" + e.getMessage(), "mysql", this.getConfig().getId(), labels);
ToResolveContainer.CREATE_CONN_FAIL.add(this.getDbGroupConfig().getName() + "-" + this.getConfig().getInstanceName());
throw e;
}
return con;
}
void initMinConnection(String schema, boolean autocommit, final ResponseHandler handler,
final Object attachment) throws IOException {
LOGGER.info("create new connection for " +
this.name + " of schema " + schema);
if (this.createNewCount()) {
createNewConnection(handler, attachment, schema, false);
}
}
private void returnCon(BackendConnection c) {
if (c.isClosed()) {
return;
}
c.setAttachment(null);
c.setBorrowed(false);
c.setLastTime(TimeUtil.currentTimeMillis());
String errMsg = null;
boolean ok;
ConQueue queue = this.conMap.createAndGetSchemaConQueue(c.getSchema());
if (c.isAutocommit()) {
ok = queue.getAutoCommitCons().offer(c);
} else {
ok = queue.getManCommitCons().offer(c);
}
if (!ok) {
errMsg = "can't return to pool ,so close con " + c;
}
if (errMsg != null) {
LOGGER.info(errMsg);
c.close(errMsg);
}
}
public void releaseChannel(BackendConnection c) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("release channel " + c);
}
// release connection
returnCon(c);
}
public void connectionClosed(BackendConnection conn) {
//only used in mysqlConneciton synchronized function
this.connectionCount.decrementAndGet();
ConQueue queue = this.conMap.getSchemaConQueue(conn.getSchema());
if (queue != null) {
queue.removeCon(conn);
}
}
/**
* used for init or reload
*/
public abstract boolean testConnection() throws IOException;
public long getHeartbeatRecoveryTime() {
return heartbeatRecoveryTime;
}
public void setHeartbeatRecoveryTime(long heartbeatRecoveryTime) {
this.heartbeatRecoveryTime = heartbeatRecoveryTime;
}
public DbInstanceConfig getConfig() {
return config;
}
boolean canSelectAsReadNode() {
Integer slaveBehindMaster = heartbeat.getSlaveBehindMaster();
int dbSynStatus = heartbeat.getDbSynStatus();
if (slaveBehindMaster == null || dbSynStatus == MySQLHeartbeat.DB_SYN_ERROR) {
return false;
}
boolean isSync = dbSynStatus == MySQLHeartbeat.DB_SYN_NORMAL;
boolean isNotDelay = slaveBehindMaster < this.dbGroupConfig.getDelayThreshold();
return isSync && isNotDelay;
}
void startHeartbeat() {
if (this.isDisabled() || this.isFakeNode()) {
LOGGER.info("the instance[{}] is disabled or fake node, skip to start heartbeat.", name);
return;
}
heartbeat.start();
heartbeat.setScheduledFuture(Scheduler.getInstance().getScheduledExecutor().scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
if (DbleServer.getInstance().getConfig().isFullyConfigured()) {
if (TimeUtil.currentTimeMillis() < heartbeatRecoveryTime) {
return;
}
heartbeat.heartbeat();
}
}
}, 0L, SystemConfig.getInstance().getShardingNodeHeartbeatPeriod(), TimeUnit.MILLISECONDS));
}
void stopHeartbeat(String reason) {
heartbeat.stop(reason);
}
public void stop(String reason, boolean closeFront) {
heartbeat.stop(reason);
connectionPool.stop(reason, closeFront);
}
public void closeAllConnection(String reason) {
this.connectionPool.closeAllConnections(reason);
}
public boolean isAlive() {
return !disabled.get() && !isFakeNode() && ((heartbeat.getStatus() == MySQLHeartbeat.INIT_STATUS && testConnSuccess) || heartbeat.isHeartBeatOK());
}
public boolean equals(PhysicalDbInstance dbInstance) {
return dbInstance.getConfig().getUser().equals(this.getConfig().getUser()) && dbInstance.getConfig().getUrl().equals(this.getConfig().getUrl()) &&
dbInstance.getConfig().getPassword().equals(this.getConfig().getPassword()) && dbInstance.getConfig().getInstanceName().equals(this.getConfig().getInstanceName()) &&
dbInstance.isDisabled() == this.isDisabled() && dbInstance.getConfig().getReadWeight() == this.getConfig().getReadWeight();
}
public boolean equals(Object obj) {
return super.equals(obj);
}
public int hashCode() {
return super.hashCode();
return !disabled.get() && !isFakeNode() && heartbeat.isHeartBeatOK();
}
public boolean isDisabled() {
return disabled.get();
}
boolean setDisabled(boolean value) {
if (value) {
return disabled.compareAndSet(false, true);
} else {
return disabled.compareAndSet(true, false);
public void setDisabled(boolean isDisabled) {
disabled.set(isDisabled);
}
public boolean disable(String reason) {
if (disabled.compareAndSet(false, true)) {
stopHeartbeat(reason);
connectionPool.closeAllConnections(reason);
return true;
}
return false;
}
public boolean enable() {
if (disabled.compareAndSet(true, false)) {
startHeartbeat();
return true;
}
return false;
}
public final int getActiveConnections() {
return connectionPool.getCount(PooledEntry.STATE_IN_USE);
}
public final int getActiveConnections(String schema) {
return connectionPool.getCount(schema, PooledEntry.STATE_IN_USE);
}
public final int getIdleConnections() {
return connectionPool.getCount(PooledEntry.STATE_NOT_IN_USE);
}
public final int getIdleConnections(String schema) {
return connectionPool.getCount(schema, PooledEntry.STATE_NOT_IN_USE);
}
public final int getTotalConnections() {
return connectionPool.size() - connectionPool.getCount(PooledEntry.STATE_REMOVED);
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (!(other instanceof PhysicalDbInstance)) {
return false;
}
PhysicalDbInstance dbInstance = (PhysicalDbInstance) other;
return dbInstance.getConfig().getUser().equals(this.getConfig().getUser()) && dbInstance.getConfig().getUrl().equals(this.getConfig().getUrl()) &&
dbInstance.getConfig().getPassword().equals(this.getConfig().getPassword()) && dbInstance.getConfig().getInstanceName().equals(this.getConfig().getInstanceName()) &&
dbInstance.isDisabled() == this.isDisabled() && dbInstance.getConfig().getReadWeight() == this.getConfig().getReadWeight();
}
@Override
public int hashCode() {
return super.hashCode();
}
@Override
@@ -641,7 +429,7 @@ public abstract class PhysicalDbInstance {
return "dbInstance[name=" + name +
",disabled=" +
disabled.toString() + ",maxCon=" +
size + "]";
config.getMaxCon() + "]";
}
}
@@ -67,17 +67,15 @@ public class ShardingNode {
/**
* get connection from the same dbInstance
*
*/
public void getConnectionFromSameSource(String schema, boolean autocommit,
BackendConnection exitsCon, ResponseHandler handler,
public void getConnectionFromSameSource(String schema, BackendConnection exitsCon, ResponseHandler handler,
Object attachment) throws Exception {
PhysicalDbInstance ds = this.dbGroup.findDbInstance(exitsCon);
if (ds == null) {
throw new RuntimeException("can't find exits connection, maybe finished " + exitsCon);
} else {
ds.getConnection(schema, autocommit, handler, attachment, false);
ds.getConnection(schema, handler, attachment, false);
}
}
@@ -86,39 +84,35 @@ public class ShardingNode {
throw new RuntimeException("invalid param ,connection request db is :" + schema +
" and schema db is " + this.database);
}
if (!dbGroup.isInitSuccess() && !dbGroup.init()) {
throw new RuntimeException("dbGroup[" + dbGroup.getGroupName() + "]'s init error, please check it can be connected. " +
"The current Node is {dbGroup[" + dbGroup.getWriteSource().getConfig().getUrl() + ",Schema[" + schema + "]}");
}
}
public void getConnection(String schema, boolean isMustWrite, boolean autoCommit, RouteResultsetNode rrs,
ResponseHandler handler, Object attachment) throws Exception {
if (isMustWrite) {
getWriteNodeConnection(schema, autoCommit, handler, attachment);
getWriteNodeConnection(schema, handler, attachment);
return;
}
if (rrs.getRunOnSlave() == null) {
if (rrs.canRunINReadDB(autoCommit)) {
dbGroup.getRWSplistCon(schema, autoCommit, handler, attachment);
dbGroup.getRWSplitCon(schema, handler, attachment);
} else {
getWriteNodeConnection(schema, autoCommit, handler, attachment);
getWriteNodeConnection(schema, handler, attachment);
}
} else {
if (rrs.getRunOnSlave()) {
if (!dbGroup.getReadCon(schema, autoCommit, handler, attachment)) {
if (!dbGroup.getReadCon(schema, handler, attachment)) {
throw new IllegalArgumentException("no valid read dbInstance in dbGroup:" + dbGroup.getGroupName());
}
} else {
rrs.setCanRunInReadDB(false);
getWriteNodeConnection(schema, autoCommit, handler, attachment);
getWriteNodeConnection(schema, handler, attachment);
}
}
}
public BackendConnection getConnection(String schema, boolean autoCommit, Boolean runOnSlave, Object attachment) throws Exception {
public BackendConnection getConnection(String schema, Boolean runOnSlave, Object attachment) throws Exception {
if (runOnSlave == null) {
PhysicalDbInstance readSource = dbGroup.getRWSplistNode();
PhysicalDbInstance readSource = dbGroup.getRWSplitNode();
if (!readSource.isAlive()) {
String heartbeatError = "the dbInstance[" + readSource.getConfig().getUrl() + "] can't reach. Please check the dbInstance status";
if (dbGroup.getDbGroupConfig().isShowSlaveSql()) {
@@ -129,44 +123,36 @@ public class ShardingNode {
AlertUtil.alert(AlarmCode.DB_INSTANCE_CAN_NOT_REACH, Alert.AlertLevel.WARN, heartbeatError, "mysql", readSource.getConfig().getId(), labels);
throw new IOException(heartbeatError);
}
return readSource.getConnection(schema, autoCommit, attachment);
return readSource.getConnection(schema, attachment);
} else if (runOnSlave) {
PhysicalDbInstance source = dbGroup.getRandomAliveReadNode();
if (source == null) {
throw new IllegalArgumentException("no valid dbInstance in dbGroup:" + dbGroup.getGroupName());
}
return source.getConnection(schema, autoCommit, attachment);
return source.getConnection(schema, attachment);
} else {
checkRequest(schema);
if (dbGroup.isInitSuccess()) {
PhysicalDbInstance writeSource = dbGroup.getWriteSource();
if (writeSource.isReadOnly()) {
throw new IllegalArgumentException("The dbInstance[" + writeSource.getConfig().getUrl() + "] is running with the --read-only option so it cannot execute this statement");
}
writeSource.setWriteCount();
return writeSource.getConnection(schema, autoCommit, attachment);
} else {
throw new IllegalArgumentException("Invalid dbGroup:" + dbGroup.getGroupName());
}
}
}
private void getWriteNodeConnection(String schema, boolean autoCommit, ResponseHandler handler, Object attachment) throws IOException {
checkRequest(schema);
if (dbGroup.isInitSuccess()) {
PhysicalDbInstance writeSource = dbGroup.getWriteSource();
if (writeSource.isDisabled()) {
throw new IllegalArgumentException("[" + writeSource.getDbGroupConfig().getName() + "." + writeSource.getConfig().getInstanceName() + "] is disabled");
} else if (writeSource.isFakeNode()) {
throw new IllegalArgumentException("[" + writeSource.getDbGroupConfig().getName() + "." + writeSource.getConfig().getInstanceName() + "] is fake node");
}
PhysicalDbInstance writeSource = dbGroup.getWriteDbInstance();
if (writeSource.isReadOnly()) {
throw new IllegalArgumentException("The dbInstance[" + writeSource.getConfig().getUrl() + "] is running with the --read-only option so it cannot execute this statement");
}
writeSource.setWriteCount();
writeSource.getConnection(schema, autoCommit, handler, attachment, true);
} else {
throw new IllegalArgumentException("Invalid dbGroup:" + dbGroup.getGroupName());
writeSource.incrementWriteCount();
return writeSource.getConnection(schema, attachment);
}
}
private void getWriteNodeConnection(String schema, ResponseHandler handler, Object attachment) throws IOException {
checkRequest(schema);
PhysicalDbInstance writeSource = dbGroup.getWriteDbInstance();
if (writeSource.isDisabled()) {
throw new IllegalArgumentException("[" + writeSource.getDbGroupConfig().getName() + "." + writeSource.getConfig().getInstanceName() + "] is disabled");
} else if (writeSource.isFakeNode()) {
throw new IllegalArgumentException("[" + writeSource.getDbGroupConfig().getName() + "." + writeSource.getConfig().getInstanceName() + "] is fake node");
}
if (writeSource.isReadOnly()) {
throw new IllegalArgumentException("The dbInstance[" + writeSource.getConfig().getUrl() + "] is running with the --read-only option so it cannot execute this statement");
}
writeSource.incrementWriteCount();
writeSource.getConnection(schema, handler, attachment, true);
}
}
@@ -3,15 +3,15 @@
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.sqlengine;
package com.actiontech.dble.backend.heartbeat;
import com.actiontech.dble.backend.BackendConnection;
import com.actiontech.dble.backend.heartbeat.MySQLHeartbeat;
import com.actiontech.dble.backend.mysql.nio.MySQLConnection;
import com.actiontech.dble.backend.mysql.nio.handler.ResponseHandler;
import com.actiontech.dble.net.mysql.ErrorPacket;
import com.actiontech.dble.net.mysql.FieldPacket;
import com.actiontech.dble.net.mysql.RowDataPacket;
import com.actiontech.dble.sqlengine.SQLJobHandler;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -28,10 +28,9 @@ public class HeartbeatSQLJob implements ResponseHandler {
private AtomicBoolean finished = new AtomicBoolean(false);
private MySQLHeartbeat heartbeat;
public HeartbeatSQLJob(MySQLHeartbeat heartbeat, final BackendConnection conn, SQLJobHandler jobHandler) {
public HeartbeatSQLJob(MySQLHeartbeat heartbeat, SQLJobHandler jobHandler) {
super();
this.sql = heartbeat.getHeartbeatSQL();
this.connection = conn;
this.jobHandler = jobHandler;
this.heartbeat = heartbeat;
}
@@ -46,13 +45,26 @@ public class HeartbeatSQLJob implements ResponseHandler {
@Override
public void connectionAcquired(final BackendConnection conn) {
LOGGER.warn("should be not reach here");
this.connection = conn;
conn.setResponseHandler(this);
((MySQLConnection) conn).setComplexQuery(true);
try {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("do heartbeat,conn is " + conn);
}
conn.query(sql);
} catch (Exception e) { // (UnsupportedEncodingException e) {
doFinished(true);
}
}
public void execute() {
connection.setResponseHandler(this);
((MySQLConnection) connection).setComplexQuery(true);
// reset
finished.set(false);
try {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("do heartbeat,conn is " + connection);
}
connection.query(sql);
} catch (Exception e) { // (UnsupportedEncodingException e) {
doFinished(true);
@@ -116,9 +128,11 @@ public class HeartbeatSQLJob implements ResponseHandler {
@Override
public void connectionClose(BackendConnection conn, String reason) {
LOGGER.warn("heartbeat conn for sql[" + sql + "] is closed, due to " + reason);
heartbeat.setErrorResult("heartbeat conn is closed, due to " + reason);
doFinished(true);
LOGGER.warn("heartbeat conn for sql[" + sql + "] is closed, due to " + reason + ", we will try immedia");
if (heartbeat.isChecking()) {
doFinished(false);
}
heartbeat.getSource().createConnectionSkipPool(null, this);
}
@Override
@@ -10,20 +10,16 @@ import com.actiontech.dble.alarm.AlarmCode;
import com.actiontech.dble.alarm.Alert;
import com.actiontech.dble.alarm.AlertUtil;
import com.actiontech.dble.alarm.ToResolveContainer;
import com.actiontech.dble.backend.BackendConnection;
import com.actiontech.dble.backend.datasource.PhysicalDbInstance;
import com.actiontech.dble.backend.mysql.nio.MySQLInstance;
import com.actiontech.dble.config.helper.GetAndSyncDbInstanceKeyVariables;
import com.actiontech.dble.config.helper.KeyVariables;
import com.actiontech.dble.config.model.SystemConfig;
import com.actiontech.dble.sqlengine.HeartbeatSQLJob;
import com.actiontech.dble.sqlengine.OneRawSQLQueryResultHandler;
import com.actiontech.dble.sqlengine.SQLQueryResult;
import com.actiontech.dble.sqlengine.SQLQueryResultListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -49,18 +45,10 @@ public class MySQLDetector implements SQLQueryResultListener<SQLQueryResult<Map<
private volatile long lastSendQryTime;
private volatile long lastReceivedQryTime;
private volatile HeartbeatSQLJob sqlJob;
private BackendConnection con;
public MySQLDetector(MySQLHeartbeat heartbeat) {
this.heartbeat = heartbeat;
this.isQuit = new AtomicBoolean(false);
con = null;
try {
MySQLInstance ds = heartbeat.getSource();
con = ds.getConnectionForHeartbeat(null);
} catch (IOException e) {
LOGGER.warn("create heartbeat conn error", e);
}
}
boolean isHeartbeatTimeout() {
@@ -72,28 +60,21 @@ public class MySQLDetector implements SQLQueryResultListener<SQLQueryResult<Map<
}
public void heartbeat() {
if (con == null) {
heartbeat.setErrorResult("can't create conn for heartbeat");
return;
} else if (con.isClosed()) {
heartbeat.setErrorResult("conn for heartbeat is closed");
return;
}
lastSendQryTime = System.currentTimeMillis();
String[] fetchCols = {};
if (heartbeat.getSource().getDbGroupConfig().isShowSlaveSql()) {
fetchCols = MYSQL_SLAVE_STATUS_COLS;
} else if (heartbeat.getSource().getDbGroupConfig().isSelectReadOnlySql()) {
fetchCols = MYSQL_READ_ONLY_COLS;
}
if (sqlJob == null) {
String[] fetchCols = {};
if (heartbeat.getSource().getDbGroupConfig().isShowSlaveSql()) {
fetchCols = MYSQL_SLAVE_STATUS_COLS;
} else if (heartbeat.getSource().getDbGroupConfig().isSelectReadOnlySql()) {
fetchCols = MYSQL_READ_ONLY_COLS;
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("do heartbeat,conn is " + con);
OneRawSQLQueryResultHandler resultHandler = new OneRawSQLQueryResultHandler(fetchCols, this);
sqlJob = new HeartbeatSQLJob(heartbeat, resultHandler);
heartbeat.getSource().createConnectionSkipPool(null, sqlJob);
} else {
sqlJob.execute();
}
OneRawSQLQueryResultHandler resultHandler = new OneRawSQLQueryResultHandler(fetchCols, this);
sqlJob = new HeartbeatSQLJob(heartbeat, con, resultHandler);
sqlJob.execute();
}
public void quit() {
@@ -128,7 +109,9 @@ public class MySQLDetector implements SQLQueryResultListener<SQLQueryResult<Map<
heartbeat.setResult(MySQLHeartbeat.OK_STATUS);
}
/** if recover failed, return true*/
/**
* if recover failed, return true
*/
private boolean checkRecoverFail(PhysicalDbInstance source) {
if (heartbeat.isStop()) {
return true;
@@ -158,7 +141,7 @@ public class MySQLDetector implements SQLQueryResultListener<SQLQueryResult<Map<
if (variables == null ||
variables.isLowerCase() != DbleServer.getInstance().getSystemVariables().isLowerCaseTableNames() ||
variables.getMaxPacketSize() < SystemConfig.getInstance().getMaxPacketSize()) {
String url = con.getHost() + ":" + con.getPort();
String url = heartbeat.getSource().getConfig().getUrl();
Map<String, String> labels = AlertUtil.genSingleLabel("dbInstance", url);
String errMsg;
if (variables == null) {
@@ -171,12 +154,12 @@ public class MySQLDetector implements SQLQueryResultListener<SQLQueryResult<Map<
LOGGER.warn(errMsg + ", set heartbeat Error");
if (variables != null) {
AlertUtil.alert(AlarmCode.DB_INSTANCE_LOWER_CASE_ERROR, Alert.AlertLevel.WARN, errMsg, "mysql", this.heartbeat.getSource().getConfig().getId(), labels);
ToResolveContainer.DB_INSTANCE_LOWER_CASE_ERROR.add(con.getHost() + ":" + con.getPort());
ToResolveContainer.DB_INSTANCE_LOWER_CASE_ERROR.add(url);
}
heartbeat.setErrorResult(errMsg);
return true;
} else {
String url = con.getHost() + ":" + con.getPort();
String url = heartbeat.getSource().getConfig().getUrl();
if (ToResolveContainer.DB_INSTANCE_LOWER_CASE_ERROR.contains(url)) {
Map<String, String> labels = AlertUtil.genSingleLabel("dbInstance", url);
AlertUtil.alertResolve(AlarmCode.DB_INSTANCE_LOWER_CASE_ERROR, Alert.AlertLevel.WARN, "mysql", this.heartbeat.getSource().getConfig().getId(), labels,
@@ -231,7 +214,6 @@ public class MySQLDetector implements SQLQueryResultListener<SQLQueryResult<Map<
heartbeat.setResult(MySQLHeartbeat.OK_STATUS);
}
public void close() {
HeartbeatSQLJob curJob = sqlJob;
if (curJob != null) {
@@ -8,7 +8,7 @@ package com.actiontech.dble.backend.heartbeat;
import com.actiontech.dble.alarm.AlarmCode;
import com.actiontech.dble.alarm.Alert;
import com.actiontech.dble.alarm.AlertUtil;
import com.actiontech.dble.backend.mysql.nio.MySQLInstance;
import com.actiontech.dble.backend.datasource.PhysicalDbInstance;
import com.actiontech.dble.statistic.DbInstanceSyncRecorder;
import com.actiontech.dble.statistic.HeartbeatRecorder;
import org.slf4j.Logger;
@@ -17,6 +17,7 @@ import org.slf4j.LoggerFactory;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Map;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
@@ -37,7 +38,7 @@ public class MySQLHeartbeat {
private final AtomicBoolean isChecking = new AtomicBoolean(false);
private final HeartbeatRecorder recorder = new HeartbeatRecorder();
private final DbInstanceSyncRecorder asyncRecorder = new DbInstanceSyncRecorder();
private final MySQLInstance source;
private final PhysicalDbInstance source;
protected volatile int status;
private String heartbeatSQL;
private long heartbeatTimeout; // during the time, heart failed will ignore
@@ -48,23 +49,28 @@ public class MySQLHeartbeat {
private volatile Integer slaveBehindMaster;
private MySQLDetector detector;
private volatile String message;
private volatile ScheduledFuture scheduledFuture;
public MySQLHeartbeat(MySQLInstance source) {
this.source = source;
public MySQLHeartbeat(PhysicalDbInstance dbInstance) {
this.source = dbInstance;
this.status = INIT_STATUS;
this.errorRetryCount = source.getDbGroupConfig().getErrorRetryCount();
this.heartbeatTimeout = source.getDbGroupConfig().getHeartbeatTimeout();
this.heartbeatSQL = source.getDbGroupConfig().getHearbeatSQL();
this.errorRetryCount = dbInstance.getDbGroupConfig().getErrorRetryCount();
this.heartbeatTimeout = dbInstance.getDbGroupConfig().getHeartbeatTimeout();
this.heartbeatSQL = dbInstance.getDbGroupConfig().getHeartbeatSQL();
}
public String getMessage() {
return message;
}
public MySQLInstance getSource() {
public PhysicalDbInstance getSource() {
return source;
}
public void setScheduledFuture(ScheduledFuture scheduledFuture) {
this.scheduledFuture = scheduledFuture;
}
public String getLastActiveTime() {
if (detector == null) {
return null;
@@ -78,8 +84,13 @@ public class MySQLHeartbeat {
isStop = false;
}
public void stop() {
public void stop(String reason) {
if (isStop) {
return;
}
LOGGER.info("stop heartbeat of instance[{}], due to {}", source.getName(), reason);
isStop = true;
scheduledFuture.cancel(false);
this.status = INIT_STATUS;
if (detector != null && !detector.isQuit()) {
detector.quit();
@@ -105,9 +116,6 @@ public class MySQLHeartbeat {
}
}
}
if (isStop) {
stop();
}
}
public void setErrorResult(String errMsg) {
@@ -231,10 +239,8 @@ public class MySQLHeartbeat {
}
public boolean isHeartBeatOK() {
if (status == OK_STATUS) {
if (status == OK_STATUS || status == INIT_STATUS) {
return true;
} else if (status == INIT_STATUS) { // init or timeout->ok
return false;
} else if (status == ERROR_STATUS) {
long timeDiff = System.currentTimeMillis() - this.startErrorTime.longValue();
if (timeDiff >= heartbeatTimeout) {
@@ -249,7 +255,7 @@ public class MySQLHeartbeat {
}
}
public String getHeartbeatSQL() {
String getHeartbeatSQL() {
return heartbeatSQL;
}
@@ -7,6 +7,7 @@ package com.actiontech.dble.backend.mysql.nio;
import com.actiontech.dble.DbleServer;
import com.actiontech.dble.backend.BackendConnection;
import com.actiontech.dble.backend.datasource.PhysicalDbInstance;
import com.actiontech.dble.backend.mysql.CharsetUtil;
import com.actiontech.dble.backend.mysql.nio.handler.ResponseHandler;
import com.actiontech.dble.backend.mysql.xa.TxState;
@@ -14,6 +15,7 @@ import com.actiontech.dble.btrace.provider.XaDelayProvider;
import com.actiontech.dble.config.Capabilities;
import com.actiontech.dble.config.Isolations;
import com.actiontech.dble.config.model.SystemConfig;
import com.actiontech.dble.config.model.db.DbInstanceConfig;
import com.actiontech.dble.net.AbstractConnection;
import com.actiontech.dble.net.NIOProcessor;
import com.actiontech.dble.net.handler.BackEndCleaner;
@@ -42,15 +44,54 @@ import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
/**
* @author mycat
* @author mycatc
*/
public class MySQLConnection extends AbstractConnection implements
BackendConnection {
public class MySQLConnection extends AbstractConnection implements BackendConnection {
private static final Logger LOGGER = LoggerFactory.getLogger(MySQLConnection.class);
public static final Comparator<BackendConnection> LAST_ACCESS_COMPARABLE;
private static final CommandPacket COMMIT = new CommandPacket();
private static final CommandPacket ROLLBACK = new CommandPacket();
static {
COMMIT.setPacketId(0);
COMMIT.setCommand(MySQLPacket.COM_QUERY);
COMMIT.setArg("commit".getBytes());
ROLLBACK.setPacketId(0);
ROLLBACK.setCommand(MySQLPacket.COM_QUERY);
ROLLBACK.setArg("rollback".getBytes());
}
static {
LAST_ACCESS_COMPARABLE = new Comparator<BackendConnection>() {
@Override
public int compare(final BackendConnection entryOne, final BackendConnection entryTwo) {
return Long.compare(entryOne.getLastTime(), entryTwo.getLastTime());
}
};
}
private AtomicInteger state = new AtomicInteger(INITIAL);
@Override
public boolean compareAndSet(int expect, int update) {
return state.compareAndSet(expect, update);
}
@Override
public void lazySet(int update) {
state.lazySet(update);
}
@Override
public int getState() {
return state.get();
}
private volatile long lastTime;
private volatile String schema = null;
private volatile String oldSchema;
private volatile boolean borrowed = false;
private volatile boolean isDDL = false;
private volatile boolean isRowDataFlowing = false;
private volatile boolean isExecuting = false;
@@ -94,20 +135,7 @@ public class MySQLConnection extends AbstractConnection implements
return flag;
}
private static final CommandPacket COMMIT = new CommandPacket();
private static final CommandPacket ROLLBACK = new CommandPacket();
static {
COMMIT.setPacketId(0);
COMMIT.setCommand(MySQLPacket.COM_QUERY);
COMMIT.setArg("commit".getBytes());
ROLLBACK.setPacketId(0);
ROLLBACK.setCommand(MySQLPacket.COM_QUERY);
ROLLBACK.setArg("rollback".getBytes());
}
private MySQLInstance pool;
private volatile PhysicalDbInstance dbInstance;
private boolean fromSlaveDB;
private long threadId;
private HandshakeV10Packet handshake;
@@ -119,9 +147,15 @@ public class MySQLConnection extends AbstractConnection implements
private boolean isolationSynced;
private volatile ResponseHandler respHandler;
public MySQLConnection(NetworkChannel channel, boolean fromSlaveDB, boolean autocommitSynced, boolean isolationSynced) {
public MySQLConnection(NetworkChannel channel, DbInstanceConfig config, boolean fromSlaveDB, boolean autocommitSynced, boolean isolationSynced) {
super(channel);
this.host = config.getIp();
this.port = config.getPort();
this.user = config.getUser();
this.password = config.getPassword();
this.fromSlaveDB = !config.isPrimary();
this.lastTime = TimeUtil.currentTimeMillis();
this.autocommitSynced = autocommitSynced;
boolean sysAutocommit = SystemConfig.getInstance().getAutocommit() == 1;
this.autocommit = sysAutocommit == autocommitSynced; // T + T-> T, T + F-> F, F +T ->F, F + F->T
@@ -201,12 +235,12 @@ public class MySQLConnection extends AbstractConnection implements
}
}
public MySQLInstance getPool() {
return pool;
public PhysicalDbInstance getDbInstance() {
return dbInstance;
}
public void setPool(MySQLInstance pool) {
this.pool = pool;
public void setDbInstance(PhysicalDbInstance instance) {
this.dbInstance = instance;
}
public void setUser(String user) {
@@ -322,6 +356,11 @@ public class MySQLConnection extends AbstractConnection implements
}
@Override
public void ping() {
write(PingPacket.PING);
}
private WriteToBackendTask sendQueryCmdTask(String query, CharsetNames clientCharset) {
CommandPacket packet = new CommandPacket();
packet.setPacketId(0);
@@ -594,17 +633,21 @@ public class MySQLConnection extends AbstractConnection implements
}
@Override
public long getLastTime() {
return lastTime;
}
public void setLastTime(long lastTime) {
this.lastTime = lastTime;
public void close() {
close("normal", false);
}
public void close() {
close("normal");
public void close(String reason, boolean closeFrontConn) {
if (closeFrontConn) {
session.getSource().close(reason);
} else {
close("normal");
}
}
/**
@@ -681,7 +724,7 @@ public class MySQLConnection extends AbstractConnection implements
handler.connectionClose(conn, reason);
respHandler = null;
} catch (Throwable e) {
LOGGER.warn("get error close mysqlconnection ", e);
LOGGER.warn("get error close mysql connection ", e);
}
}
});
@@ -713,7 +756,10 @@ public class MySQLConnection extends AbstractConnection implements
private synchronized void innerTerminate(String reason) {
if (!isClosed()) {
super.close(reason);
pool.connectionClosed(this);
// heartbeat conn is null
if (dbInstance != null) {
dbInstance.close(this);
}
}
}
@@ -761,7 +807,7 @@ public class MySQLConnection extends AbstractConnection implements
setResponseHandler(null);
setSession(null);
logResponse.set(false);
pool.releaseChannel(this);
dbInstance.release(this);
}
@@ -820,17 +866,6 @@ public class MySQLConnection extends AbstractConnection implements
return fromSlaveDB;
}
@Override
public boolean isBorrowed() {
return borrowed;
}
@Override
public void setBorrowed(boolean borrowed) {
this.lastTime = TimeUtil.currentTimeMillis();
this.borrowed = borrowed;
}
@Override
public String toString() {
StringBuilder result = new StringBuilder();
@@ -844,8 +879,6 @@ public class MySQLConnection extends AbstractConnection implements
result.append(schema);
result.append(", old schema=");
result.append(oldSchema);
result.append(", borrowed=");
result.append(borrowed);
result.append(", fromSlaveDB=");
result.append(fromSlaveDB);
result.append(", mysqlId=");
@@ -891,7 +924,6 @@ public class MySQLConnection extends AbstractConnection implements
return "MySQLConnection host=" + host + ", port=" + port + ", schema=" + schema;
}
@Override
public boolean isDDL() {
return isDDL;
@@ -1,12 +1,11 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.backend.mysql.nio;
import com.actiontech.dble.backend.mysql.SecurityUtil;
import com.actiontech.dble.backend.mysql.nio.handler.ResponseHandler;
import com.actiontech.dble.config.Capabilities;
import com.actiontech.dble.config.model.SystemConfig;
import com.actiontech.dble.net.ConnectionException;
@@ -26,21 +25,19 @@ import java.util.Arrays;
public class MySQLConnectionAuthenticator implements NIOHandler {
private static final Logger LOGGER = LoggerFactory.getLogger(MySQLConnectionAuthenticator.class);
private final MySQLConnection source;
private final ResponseHandler listener;
private byte[] publicKey = null;
private final MySQLConnectionListener listener;
private String authPluginName = null;
private byte[] authPluginData = null;
public MySQLConnectionAuthenticator(MySQLConnection source,
ResponseHandler listener) {
public MySQLConnectionAuthenticator(MySQLConnection source, MySQLConnectionListener listener) {
this.source = source;
this.listener = listener;
}
public void connectionError(MySQLConnection c, Throwable e) {
if (listener != null) {
listener.connectionError(e, c);
listener.onCreateFail(c, e);
}
}
@@ -49,7 +46,7 @@ public class MySQLConnectionAuthenticator implements NIOHandler {
try {
BinaryPacket bin2 = new BinaryPacket();
if (checkPubicKey(data)) {
publicKey = bin2.readKey(data);
byte[] publicKey = bin2.readKey(data);
if (Arrays.equals(source.getHandshake().getAuthPluginName(), HandshakeV10Packet.CACHING_SHA2_PASSWORD_PLUGIN)) {
PasswordAuthPlugin.sendEnPasswordWithPublicKey(authPluginData, PasswordAuthPlugin.GETPUBLICKEY, publicKey, source);
} else if (Arrays.equals(source.getHandshake().getAuthPluginName(), HandshakeV10Packet.NATIVE_PASSWORD_PLUGIN)) {
@@ -76,7 +73,7 @@ public class MySQLConnectionAuthenticator implements NIOHandler {
source.setSupportCompress(true);
}
if (listener != null) {
listener.connectionAcquired(source);
listener.onCreateSuccess(source);
}
break;
case ErrorPacket.FIELD_COUNT:
@@ -125,8 +122,7 @@ public class MySQLConnectionAuthenticator implements NIOHandler {
} catch (Exception e) {
LOGGER.warn(e.getMessage());
if (listener != null) {
listener.connectionError(e, source);
return;
listener.onCreateFail(source, e);
}
}
}
@@ -1,51 +0,0 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.backend.mysql.nio;
import com.actiontech.dble.DbleServer;
import com.actiontech.dble.backend.mysql.nio.handler.ResponseHandler;
import com.actiontech.dble.config.model.db.DbInstanceConfig;
import com.actiontech.dble.net.NIOConnector;
import com.actiontech.dble.net.factory.BackendConnectionFactory;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.channels.AsynchronousSocketChannel;
import java.nio.channels.CompletionHandler;
import java.nio.channels.NetworkChannel;
/**
* @author mycat
*/
public class MySQLConnectionFactory extends BackendConnectionFactory {
@SuppressWarnings({"unchecked", "rawtypes"})
public MySQLConnection make(MySQLInstance pool, ResponseHandler handler,
String schema) throws IOException {
DbInstanceConfig dsc = pool.getConfig();
NetworkChannel channel = openSocketChannel(DbleServer.getInstance().isAIO());
MySQLConnection c = new MySQLConnection(channel, pool.isReadInstance(), pool.isAutocommitSynced(), pool.isIsolationSynced());
c.setSocketParams(false);
c.setHost(dsc.getIp());
c.setPort(dsc.getPort());
c.setUser(dsc.getUser());
c.setPassword(dsc.getPassword());
c.setSchema(schema);
c.setHandler(new MySQLConnectionAuthenticator(c, handler));
c.setPool(pool);
c.setIdleTimeout(pool.getConfig().getIdleTimeout());
if (channel instanceof AsynchronousSocketChannel) {
((AsynchronousSocketChannel) channel).connect(
new InetSocketAddress(dsc.getIp(), dsc.getPort()), c,
(CompletionHandler) DbleServer.getInstance().getConnector());
} else {
((NIOConnector) DbleServer.getInstance().getConnector()).postConnect(c);
}
return c;
}
}
@@ -0,0 +1,20 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.backend.mysql.nio;
import com.actiontech.dble.backend.BackendConnection;
/**
* @author collapsar
*/
public interface MySQLConnectionListener {
void onCreateSuccess(BackendConnection conn);
void onCreateFail(BackendConnection conn, Throwable e);
void onHeartbeatSuccess(BackendConnection conn);
}
@@ -1,13 +1,12 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.backend.mysql.nio;
import com.actiontech.dble.backend.datasource.PhysicalDbInstance;
import com.actiontech.dble.backend.heartbeat.MySQLHeartbeat;
import com.actiontech.dble.backend.mysql.nio.handler.ResponseHandler;
import com.actiontech.dble.config.Capabilities;
import com.actiontech.dble.config.model.db.DbGroupConfig;
import com.actiontech.dble.config.model.db.DbInstanceConfig;
@@ -26,61 +25,20 @@ import java.nio.charset.StandardCharsets;
public class MySQLInstance extends PhysicalDbInstance {
private static final Logger LOGGER = LoggerFactory.getLogger(MySQLInstance.class);
private final MySQLConnectionFactory factory;
public MySQLInstance(DbInstanceConfig config, DbGroupConfig hostConfig,
boolean isReadNode) {
super(config, hostConfig, isReadNode);
this.factory = new MySQLConnectionFactory();
this.heartbeat = new MySQLHeartbeat(this);
}
public MySQLInstance(MySQLInstance org) {
super(org);
this.factory = new MySQLConnectionFactory();
}
@Override
public void createNewConnection(ResponseHandler handler, String schema) throws IOException {
factory.make(this, handler, schema);
}
private long getClientFlags(boolean isConnectWithDB) {
int flag = 0;
flag |= Capabilities.CLIENT_LONG_PASSWORD;
flag |= Capabilities.CLIENT_FOUND_ROWS;
flag |= Capabilities.CLIENT_LONG_FLAG;
if (isConnectWithDB) {
flag |= Capabilities.CLIENT_CONNECT_WITH_DB;
}
// flag |= Capabilities.CLIENT_NO_SCHEMA;
// flag |= Capabilities.CLIENT_COMPRESS;
flag |= Capabilities.CLIENT_ODBC;
// flag |= Capabilities.CLIENT_LOCAL_FILES;
flag |= Capabilities.CLIENT_IGNORE_SPACE;
flag |= Capabilities.CLIENT_PROTOCOL_41;
flag |= Capabilities.CLIENT_INTERACTIVE;
// flag |= Capabilities.CLIENT_SSL;
flag |= Capabilities.CLIENT_IGNORE_SIGPIPE;
flag |= Capabilities.CLIENT_TRANSACTIONS;
// flag |= Capabilities.CLIENT_RESERVED;
flag |= Capabilities.CLIENT_SECURE_CONNECTION;
// client extension
// flag |= Capabilities.CLIENT_MULTI_STATEMENTS;
// flag |= Capabilities.CLIENT_MULTI_RESULTS;
return flag;
}
private long getClientFlagSha(boolean isConnectWithDB) {
int flag = 0;
flag |= getClientFlags(isConnectWithDB);
flag |= Capabilities.CLIENT_PLUGIN_AUTH;
flag |= Capabilities.CLIENT_MULTIPLE_STATEMENTS;
return flag;
}
@Override
public boolean testConnection() throws IOException {
public boolean testConnection() {
boolean isConnected = true;
Socket socket = null;
@@ -233,12 +191,41 @@ public class MySQLInstance extends PhysicalDbInstance {
return isConnected;
}
@Override
public MySQLHeartbeat createHeartBeat() {
return new MySQLHeartbeat(this);
private long getClientFlags(boolean isConnectWithDB) {
int flag = 0;
flag |= Capabilities.CLIENT_LONG_PASSWORD;
flag |= Capabilities.CLIENT_FOUND_ROWS;
flag |= Capabilities.CLIENT_LONG_FLAG;
if (isConnectWithDB) {
flag |= Capabilities.CLIENT_CONNECT_WITH_DB;
}
// flag |= Capabilities.CLIENT_NO_SCHEMA;
// flag |= Capabilities.CLIENT_COMPRESS;
flag |= Capabilities.CLIENT_ODBC;
// flag |= Capabilities.CLIENT_LOCAL_FILES;
flag |= Capabilities.CLIENT_IGNORE_SPACE;
flag |= Capabilities.CLIENT_PROTOCOL_41;
flag |= Capabilities.CLIENT_INTERACTIVE;
// flag |= Capabilities.CLIENT_SSL;
flag |= Capabilities.CLIENT_IGNORE_SIGPIPE;
flag |= Capabilities.CLIENT_TRANSACTIONS;
// flag |= Capabilities.CLIENT_RESERVED;
flag |= Capabilities.CLIENT_SECURE_CONNECTION;
// client extension
// flag |= Capabilities.CLIENT_MULTI_STATEMENTS;
// flag |= Capabilities.CLIENT_MULTI_RESULTS;
return flag;
}
private long getClientFlagSha(boolean isConnectWithDB) {
int flag = 0;
flag |= getClientFlags(isConnectWithDB);
flag |= Capabilities.CLIENT_PLUGIN_AUTH;
flag |= Capabilities.CLIENT_MULTIPLE_STATEMENTS;
return flag;
}
public void startAuthPacket(OutputStream out, HandshakeV10Packet handshake, byte[] passwordSented, String authPluginName) {
AuthPacket authPacket = new AuthPacket();
authPacket.setPacketId(1);
@@ -6,17 +6,15 @@
package com.actiontech.dble.backend.mysql.nio.handler;
import com.actiontech.dble.backend.BackendConnection;
import com.actiontech.dble.backend.mysql.nio.MySQLConnection;
import com.actiontech.dble.backend.mysql.nio.MySQLConnectionListener;
import com.actiontech.dble.backend.pool.util.TimerHolder;
import com.actiontech.dble.net.mysql.FieldPacket;
import com.actiontech.dble.net.mysql.PingPacket;
import com.actiontech.dble.net.mysql.RowDataPacket;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.netty.util.Timeout;
import io.netty.util.TimerTask;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
/**
* heartbeat check for mysql connections
@@ -24,50 +22,65 @@ import java.util.concurrent.locks.ReentrantLock;
* @author wuzhih
*/
public class ConnectionHeartBeatHandler implements ResponseHandler {
private static final Logger LOGGER = LoggerFactory.getLogger(ConnectionHeartBeatHandler.class);
protected final ReentrantLock lock = new ReentrantLock();
final Condition condition = lock.newCondition();
private final Object heartbeatLock;
private volatile Timeout heartbeatTimeout;
private final BackendConnection conn;
private final MySQLConnectionListener listener;
private boolean finished = false;
public void doHeartBeat(BackendConnection conn) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("do heartbeat for con " + conn);
}
lock.lock();
try {
conn.setResponseHandler(this);
MySQLConnection mCon = (MySQLConnection) conn;
mCon.write(mCon.writeToBuffer(PingPacket.PING, mCon.allocate()));
long validateTime = 2;
if (!condition.await(validateTime, TimeUnit.SECONDS)) {
//if the thread be waked up by timer than close the connection
conn.close("heartbeat timeout ");
}
} catch (Exception e) {
executeException(conn, e);
} finally {
lock.unlock();
public ConnectionHeartBeatHandler(BackendConnection conn, boolean isBlock, MySQLConnectionListener listener) {
conn.setResponseHandler(this);
this.conn = conn;
this.listener = listener;
if (isBlock) {
this.heartbeatLock = new Object();
} else {
this.heartbeatLock = null;
}
}
public boolean ping(long timeout) {
conn.ping();
if (heartbeatLock != null) {
synchronized (heartbeatLock) {
try {
heartbeatLock.wait(timeout);
} catch (InterruptedException e) {
finished = false;
}
}
return finished;
} else {
heartbeatTimeout = TimerHolder.getTimer().newTimeout(new TimerTask() {
@Override
public void run(Timeout timeout) throws Exception {
conn.closeWithoutRsp("conn heart timeout");
}
}, timeout, TimeUnit.MILLISECONDS);
return true;
}
}
/**
* if the query returns ok than just release the connection
* and go on check the next one
*
* @param ok
* @param conn
* @param con
*/
@Override
public void okResponse(byte[] ok, BackendConnection conn) {
lock.lock();
try {
condition.signal();
conn.release();
} finally {
lock.unlock();
public void okResponse(byte[] ok, BackendConnection con) {
if (heartbeatLock != null) {
synchronized (heartbeatLock) {
finished = true;
heartbeatLock.notifyAll();
}
return;
}
heartbeatTimeout.cancel();
listener.onHeartbeatSuccess(con);
}
/**
@@ -75,84 +88,54 @@ public class ConnectionHeartBeatHandler implements ResponseHandler {
* start the next one
*
* @param data
* @param conn
* @param con
*/
@Override
public void errorResponse(byte[] data, BackendConnection conn) {
lock.lock();
try {
condition.signal();
conn.close("heatbeat return error");
} finally {
lock.unlock();
}
}
/**
* if the heartbeat throws the Exception than close the connection
*
* @param c
* @param e
*/
private void executeException(BackendConnection c, Throwable e) {
lock.lock();
try {
condition.signal();
c.close("heatbeat exception:" + e);
LOGGER.info("executeException ", e);
} finally {
lock.unlock();
}
public void errorResponse(byte[] data, BackendConnection con) {
}
/**
* if when the query going on the conneciton be closed
* than just do nothing and go on for next one
*
* @param conn
* @param con
* @param reason
*/
@Override
public void connectionClose(BackendConnection conn, String reason) {
lock.lock();
try {
condition.signal();
LOGGER.info("connection closed " + conn + " reason:" + reason);
} finally {
lock.unlock();
}
public void connectionClose(BackendConnection con, String reason) {
}
/**
* @param eof
* @param isLeft
* @param conn
* @param con
*/
@Override
public void rowEofResponse(byte[] eof, boolean isLeft, BackendConnection conn) {
public void rowEofResponse(byte[] eof, boolean isLeft, BackendConnection con) {
// not called
}
@Override
public void fieldEofResponse(byte[] header, List<byte[]> fields, List<FieldPacket> fieldPackets, byte[] eof,
boolean isLeft, BackendConnection conn) {
boolean isLeft, BackendConnection con) {
// not called
}
@Override
public boolean rowResponse(byte[] rowNull, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
public boolean rowResponse(byte[] rowNull, RowDataPacket rowPacket, boolean isLeft, BackendConnection con) {
// not called
return false;
}
@Override
public void connectionAcquired(BackendConnection conn) {
public void connectionAcquired(BackendConnection con) {
// not called
}
@Override
public void connectionError(Throwable e, BackendConnection conn) {
public void connectionError(Throwable e, BackendConnection con) {
// not called
}
}
@@ -1,68 +0,0 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.backend.mysql.nio.handler;
import com.actiontech.dble.backend.BackendConnection;
import com.actiontech.dble.net.mysql.FieldPacket;
import com.actiontech.dble.net.mysql.RowDataPacket;
import java.util.List;
/**
* @author mycat
*/
public class DelegateResponseHandler implements ResponseHandler {
private final ResponseHandler target;
public DelegateResponseHandler(ResponseHandler target) {
if (target == null) {
throw new IllegalArgumentException("delegate is null!");
}
this.target = target;
}
@Override
public void connectionAcquired(BackendConnection conn) {
target.connectionAcquired(conn);
}
@Override
public void connectionError(Throwable e, BackendConnection conn) {
target.connectionError(e, conn);
}
@Override
public void okResponse(byte[] ok, BackendConnection conn) {
target.okResponse(ok, conn);
}
@Override
public void errorResponse(byte[] err, BackendConnection conn) {
target.errorResponse(err, conn);
}
@Override
public void fieldEofResponse(byte[] header, List<byte[]> fields, List<FieldPacket> fieldPackets, byte[] eof,
boolean isLeft, BackendConnection conn) {
target.fieldEofResponse(header, fields, fieldPackets, eof, isLeft, conn);
}
@Override
public boolean rowResponse(byte[] row, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
return target.rowResponse(row, rowPacket, isLeft, conn);
}
@Override
public void rowEofResponse(byte[] eof, boolean isLeft, BackendConnection conn) {
target.rowEofResponse(eof, isLeft, conn);
}
@Override
public void connectionClose(BackendConnection conn, String reason) {
target.connectionClose(conn, reason);
}
}
@@ -1,96 +0,0 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.backend.mysql.nio.handler;
import com.actiontech.dble.backend.BackendConnection;
import com.actiontech.dble.net.mysql.FieldPacket;
import com.actiontech.dble.net.mysql.RowDataPacket;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.atomic.AtomicInteger;
/**
* wuzh
*
* @author mycat
*/
public class GetConnectionHandler implements ResponseHandler {
private final CopyOnWriteArrayList<BackendConnection> successCons;
private static final Logger LOGGER = LoggerFactory.getLogger(GetConnectionHandler.class);
private final AtomicInteger finishedCount = new AtomicInteger(0);
private final int total;
public GetConnectionHandler(
CopyOnWriteArrayList<BackendConnection> consToStore,
int totalNumber) {
super();
this.successCons = consToStore;
this.total = totalNumber;
}
public String getStatusInfo() {
return "finished " + finishedCount.get() + " success " + successCons.size() + " target count:" + this.total;
}
public boolean finished() {
return finishedCount.get() >= total;
}
public void initIncrement() {
finishedCount.incrementAndGet();
}
@Override
public void connectionAcquired(BackendConnection conn) {
successCons.add(conn);
finishedCount.addAndGet(1);
LOGGER.info("connected successfully " + conn);
conn.release();
}
@Override
public void connectionError(Throwable e, BackendConnection conn) {
finishedCount.addAndGet(1);
LOGGER.info("connect error " + conn + e);
conn.close("connectionError");
}
@Override
public void errorResponse(byte[] err, BackendConnection conn) {
LOGGER.info("caught error resp: " + conn + " " + new String(err));
conn.release();
}
@Override
public void okResponse(byte[] ok, BackendConnection conn) {
LOGGER.info("received ok resp: " + conn + " " + new String(ok));
}
@Override
public void fieldEofResponse(byte[] header, List<byte[]> fields, List<FieldPacket> fieldPackets, byte[] eof,
boolean isLeft, BackendConnection conn) {
}
@Override
public boolean rowResponse(byte[] row, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
return false;
}
@Override
public void rowEofResponse(byte[] eof, boolean isLeft, BackendConnection conn) {
}
@Override
public void connectionClose(BackendConnection conn, String reason) {
}
}
@@ -1,116 +0,0 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.backend.mysql.nio.handler;
import com.actiontech.dble.backend.BackendConnection;
import com.actiontech.dble.backend.mysql.nio.MySQLConnection;
import com.actiontech.dble.net.mysql.FieldPacket;
import com.actiontech.dble.net.mysql.RowDataPacket;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
public class NewConnectionRespHandler implements ResponseHandler {
private static final Logger LOGGER = LoggerFactory.getLogger(NewConnectionRespHandler.class);
private BackendConnection backConn;
private ReentrantLock lock = new ReentrantLock();
private Condition initiated = lock.newCondition();
private String errMsg;
public BackendConnection getBackConn() throws IOException {
lock.lock();
try {
if (errMsg == null && backConn == null) {
initiated.await();
}
if (backConn == null) {
throw new IOException(errMsg);
} else if (((MySQLConnection) backConn).getPool().isDisabled()) {
backConn.close("dbInstance turned into disabled");
throw new IOException("dbInstance " + ((MySQLConnection) backConn).getPool().toString() + " is disabled");
}
return backConn;
} catch (InterruptedException e) {
LOGGER.info("getBackConn " + e);
throw new IOException(e.getMessage());
} finally {
lock.unlock();
}
}
@Override
public void connectionError(Throwable e, BackendConnection conn) {
LOGGER.info(conn + " connectionError " + e);
lock.lock();
try {
errMsg = "Backend connect Error, Connection{dbInstance[" + conn.getHost() + ":" + conn.getPort() + "],Schema[" + conn.getSchema() + "]} refused";
initiated.signal();
} finally {
lock.unlock();
}
}
@Override
public void connectionAcquired(BackendConnection conn) {
lock.lock();
try {
backConn = conn;
initiated.signal();
} finally {
lock.unlock();
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("connectionAcquired " + conn);
}
}
@Override
public void errorResponse(byte[] err, BackendConnection conn) {
LOGGER.info("caught error resp: " + conn + " " + new String(err));
}
@Override
public void okResponse(byte[] ok, BackendConnection conn) {
LOGGER.info("okResponse: " + conn);
}
@Override
public void fieldEofResponse(byte[] header, List<byte[]> fields, List<FieldPacket> fieldPackets, byte[] eof,
boolean isLeft, BackendConnection conn) {
LOGGER.info("fieldEofResponse: " + conn);
}
@Override
public boolean rowResponse(byte[] row, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
LOGGER.info("rowResponse: " + conn);
return false;
}
@Override
public void rowEofResponse(byte[] eof, boolean isLeft, BackendConnection conn) {
LOGGER.info("rowEofResponse: " + conn);
}
@Override
public void connectionClose(BackendConnection conn, String reason) {
lock.lock();
try {
errMsg = "Backend connect connectionClose, Connection{dbInstance[" + conn.getHost() + ":" + conn.getPort() + "],Schema[" + conn.getSchema() + "]}";
initiated.signal();
} finally {
lock.unlock();
}
LOGGER.info("connectionClose " + conn);
}
}
@@ -56,7 +56,7 @@ public class BaseSelectHandler extends BaseDMLHandler {
} else {
ShardingNode dn = DbleServer.getInstance().getConfig().getShardingNodes().get(rrss.getName());
//autocommit is session.getWriteSource().isAutocommit() && !session.getWriteSource().isTxStart()
final BackendConnection newConn = dn.getConnection(dn.getDatabase(), autocommit, rrss.getRunOnSlave(), rrss);
final BackendConnection newConn = dn.getConnection(dn.getDatabase(), rrss.getRunOnSlave(), rrss);
session.bindConnection(rrss, newConn);
newConn.setResponseHandler(this);
((MySQLConnection) newConn).setRowDataFlowing(true);
@@ -5,6 +5,7 @@
package com.actiontech.dble.backend.mysql.nio.handler.query.impl;
import com.actiontech.dble.DbleServer;
import com.actiontech.dble.backend.BackendConnection;
import com.actiontech.dble.backend.mysql.nio.MySQLConnection;
import com.actiontech.dble.net.mysql.FieldPacket;
@@ -31,17 +32,35 @@ public class MultiNodeEasyMergeHandler extends MultiNodeMergeHandler {
}
@Override
public void execute() throws Exception {
public void execute() {
synchronized (exeHandlers) {
if (terminate.get())
return;
for (BaseSelectHandler exeHandler : exeHandlers) {
session.setHandlerStart(exeHandler); //base start execute
MySQLConnection exeConn = exeHandler.initConnection();
if (exeConn != null) {
exeConn.setComplexQuery(true);
exeHandler.execute(exeConn);
}
if (Thread.currentThread().getName().contains("complexQueryExecutor")) {
doExecute();
} else {
DbleServer.getInstance().getComplexQueryExecutor().execute(new Runnable() {
@Override
public void run() {
doExecute();
}
});
}
}
}
private void doExecute() {
for (BaseSelectHandler exeHandler : exeHandlers) {
session.setHandlerStart(exeHandler); //base start execute
MySQLConnection exeConn = null;
try {
exeConn = exeHandler.initConnection();
exeConn.setComplexQuery(true);
exeHandler.execute(exeConn);
} catch (Exception e) {
exeHandler.connectionError(e, exeConn);
return;
}
}
}
@@ -5,6 +5,7 @@
package com.actiontech.dble.backend.mysql.nio.handler.query.impl;
import com.actiontech.dble.DbleServer;
import com.actiontech.dble.backend.BackendConnection;
import com.actiontech.dble.backend.mysql.nio.MySQLConnection;
import com.actiontech.dble.backend.mysql.nio.handler.util.ArrayMinHeap;
@@ -51,18 +52,36 @@ public class MultiNodeMergeAndOrderHandler extends MultiNodeMergeHandler {
}
@Override
public void execute() throws Exception {
public void execute() {
synchronized (exeHandlers) {
if (terminate.get())
return;
for (BaseSelectHandler exeHandler : exeHandlers) {
session.setHandlerStart(exeHandler); //base start execute
MySQLConnection exeConn = exeHandler.initConnection();
if (exeConn != null) {
exeConn.setComplexQuery(true);
queues.put(exeConn, new LinkedBlockingQueue<>(queueSize));
exeHandler.execute(exeConn);
}
if (Thread.currentThread().getName().contains("complexQueryExecutor")) {
doExecute();
} else {
DbleServer.getInstance().getComplexQueryExecutor().execute(new Runnable() {
@Override
public void run() {
doExecute();
}
});
}
}
}
private void doExecute() {
for (BaseSelectHandler exeHandler : exeHandlers) {
session.setHandlerStart(exeHandler); //base start execute
MySQLConnection exeConn = null;
try {
exeConn = exeHandler.initConnection();
exeConn.setComplexQuery(true);
queues.put(exeConn, new LinkedBlockingQueue<>(queueSize));
exeHandler.execute(exeConn);
} catch (Exception e) {
exeHandler.connectionError(e, exeConn);
return;
}
}
}
@@ -110,7 +110,7 @@ public class XACommitFailStage extends XACommitStage {
if (errNo == ErrorCode.ER_XAER_NOTA) {
RouteResultsetNode rrn = (RouteResultsetNode) conn.getAttachment();
String xid = conn.getConnXID(session.getSessionXaID(), rrn.getMultiplexNum().longValue());
XACheckHandler handler = new XACheckHandler(xid, conn.getSchema(), rrn.getName(), conn.getPool().getDbGroup().getWriteSource());
XACheckHandler handler = new XACheckHandler(xid, conn.getSchema(), rrn.getName(), conn.getDbInstance().getDbGroup().getWriteDbInstance());
// if mysql connection holding xa transaction wasn't released, may result in ER_XAER_NOTA.
// so we need check xid here
handler.checkXid();
@@ -97,7 +97,7 @@ public class XARollbackStage extends XAStage {
if (errNo == ErrorCode.ER_XAER_NOTA) {
RouteResultsetNode rrn = (RouteResultsetNode) conn.getAttachment();
String xid = conn.getConnXID(session.getSessionXaID(), rrn.getMultiplexNum().longValue());
XACheckHandler handler = new XACheckHandler(xid, conn.getSchema(), rrn.getName(), conn.getPool().getDbGroup().getWriteSource());
XACheckHandler handler = new XACheckHandler(xid, conn.getSchema(), rrn.getName(), conn.getDbInstance().getDbGroup().getWriteDbInstance());
// if mysql connection holding xa transaction wasn't released, may result in ER_XAER_NOTA.
// so we need check xid here
handler.killXaThread(xaOldThreadIds.get(rrn));
@@ -0,0 +1,420 @@
package com.actiontech.dble.backend.pool;
import com.actiontech.dble.backend.BackendConnection;
import com.actiontech.dble.backend.datasource.PhysicalDbInstance;
import com.actiontech.dble.backend.mysql.nio.MySQLConnection;
import com.actiontech.dble.backend.mysql.nio.MySQLConnectionListener;
import com.actiontech.dble.backend.mysql.nio.handler.ConnectionHeartBeatHandler;
import com.actiontech.dble.config.model.db.DbInstanceConfig;
import com.actiontech.dble.net.NIOProcessor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.ref.WeakReference;
import java.util.ArrayList;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import static com.actiontech.dble.backend.mysql.nio.MySQLConnection.*;
public class ConnectionPool extends PoolBase implements MySQLConnectionListener {
private static final Logger LOGGER = LoggerFactory.getLogger(ConnectionPool.class);
private final QueuedSequenceSynchronizer synchronizer;
private final AtomicInteger waiters;
private final CopyOnWriteArrayList<BackendConnection> allConnections;
private final AtomicInteger totalConnections = new AtomicInteger();
// evictor
private final WeakReference<ClassLoader> factoryClassLoader;
private volatile ConnectionPool.Evictor evictor = null;
private final AtomicBoolean isClosed = new AtomicBoolean();
public ConnectionPool(final DbInstanceConfig config, final PhysicalDbInstance instance) {
super(config, instance);
// save the current TCCL (if any) to be used later by the evictor Thread
final ClassLoader cl = Thread.currentThread().getContextClassLoader();
if (cl == null) {
factoryClassLoader = null;
} else {
factoryClassLoader = new WeakReference<>(cl);
}
this.synchronizer = new QueuedSequenceSynchronizer();
this.waiters = new AtomicInteger();
this.allConnections = new CopyOnWriteArrayList<>();
}
public BackendConnection borrow(final String schema, long timeout, final TimeUnit timeUnit) throws InterruptedException {
timeout = timeUnit.toNanos(timeout);
final long startScan = System.nanoTime();
final long originTimeout = timeout;
BackendConnection createEntry = null;
long startSeq;
waiters.incrementAndGet();
try {
do {
do {
startSeq = synchronizer.currentSequence();
for (BackendConnection entry : allConnections) {
if (entry.compareAndSet(STATE_NOT_IN_USE, STATE_IN_USE)) {
// if we might have stolen another thread's new connection, restart the add...
if (waiters.get() > 1 && createEntry == null) {
newPooledEntry(schema);
}
return entry;
}
}
} while (startSeq < synchronizer.currentSequence());
if (createEntry == null || createEntry.getState() != INITIAL) {
createEntry = newPooledEntry(schema);
}
timeout = originTimeout - (System.nanoTime() - startScan);
} while (timeout > 10_000L && synchronizer.waitUntilSequenceExceeded(startSeq, timeout));
} finally {
waiters.decrementAndGet();
}
return null;
}
/**
* Create a new pooled object.
*
* @param schema Key associated with new pooled object
* @return The new, wrapped pooled object
*/
private BackendConnection newPooledEntry(final String schema) {
if (instance.isDisabled() || isClosed.get()) {
return null;
}
if (totalConnections.incrementAndGet() <= config.getMaxCon()) {
final BackendConnection conn = newConnection(schema, ConnectionPool.this);
if (conn != null) {
return conn;
}
}
totalConnections.decrementAndGet();
return null;
}
public void release(final BackendConnection conn) {
if (getTestOnReturn()) {
ConnectionHeartBeatHandler heartBeatHandler = new ConnectionHeartBeatHandler(conn, false, this);
heartBeatHandler.ping(getConnectionHeartbeatTimeout());
return;
}
conn.lazySet(STATE_NOT_IN_USE);
synchronizer.signal();
}
private void fillPool() {
final int idleCount = getCount(STATE_NOT_IN_USE, STATE_HEARTBEAT);
final int connectionsToAdd = Math.min(config.getMaxCon() - totalConnections.get(), config.getMinCon() - idleCount) -
(totalConnections.get() - idleCount);
if (LOGGER.isDebugEnabled() && connectionsToAdd > 0) {
LOGGER.debug("need add {}", connectionsToAdd);
}
for (int i = 0; i < connectionsToAdd; i++) {
// newPooledEntry(schemas[i % schemas.length]);
newPooledEntry(null);
}
}
/**
* Calculate the number of objects to test in a run of the idle object
* evictor.
*
* @return The number of objects to test for validity
*/
private int getNumTests() {
final int totalIdle = getCount(STATE_NOT_IN_USE);
final int numTests = getNumTestsPerEvictionRun();
if (numTests >= 0) {
return Math.min(numTests, totalIdle);
}
return (int) (Math.ceil(totalIdle / Math.abs((double) numTests)));
}
@Override
public void onCreateSuccess(BackendConnection conn) {
allConnections.add(conn);
if (getTestOnCreate()) {
ConnectionHeartBeatHandler heartBeatHandler = new ConnectionHeartBeatHandler(conn, false, this);
heartBeatHandler.ping(getConnectionHeartbeatTimeout());
return;
}
conn.lazySet(STATE_NOT_IN_USE);
synchronizer.signal();
}
@Override
public void onCreateFail(BackendConnection conn, Throwable e) {
LOGGER.warn("create connection fail " + e.getMessage());
totalConnections.decrementAndGet();
}
@Override
public void onHeartbeatSuccess(BackendConnection conn) {
conn.lazySet(STATE_NOT_IN_USE);
synchronizer.signal();
}
public int getCount(final int... states) {
int count = 0;
for (final BackendConnection conn : allConnections) {
boolean allRight = true;
for (int state : states) {
if (conn.getState() != state) {
allRight = false;
break;
}
}
if (allRight) {
count++;
}
}
return count;
}
public int getCount(String schema, final int... states) {
int count = 0;
for (final BackendConnection conn : allConnections) {
if (!schema.equals(conn.getSchema())) {
continue;
}
boolean allRight = true;
for (int state : states) {
if (conn.getState() != state) {
allRight = false;
break;
}
}
if (allRight) {
count++;
}
}
return count;
}
public int size() {
return allConnections.size();
}
public void close(final BackendConnection conn) {
if (remove(conn)) {
final int tc = totalConnections.decrementAndGet();
if (tc < 0) {
LOGGER.warn("{} - Unexpected value of totalConnections={}", config.getInstanceName(), tc);
}
}
}
private boolean remove(final BackendConnection pooledEntry) {
// if (!pooledEntry.compareAndSet(STATE_IN_USE, STATE_REMOVED) && !pooledEntry.compareAndSet(STATE_RESERVED, STATE_REMOVED) &&
// !pooledEntry.compareAndSet(STATE_HEARTBEAT, STATE_REMOVED) && !isClosed.get()) {
// LOGGER.warn("Attempt to remove an object that was not borrowed or reserved: {}", pooledEntry);
// return false;
// }
final boolean removed = allConnections.remove(pooledEntry);
if (!removed) {
LOGGER.warn("Attempt to remove an object from the bag that does not exist: {}", pooledEntry);
}
// synchronizer.signal();
return removed;
}
/**
* Closes the keyed object pool. Once the pool is closed
*/
public void closeAllConnections(final String closureReason) {
closeAllConnections(closureReason, false);
}
/**
* Closes the keyed object pool. Once the pool is closed
*/
public void closeAllConnections(final String closureReason, final boolean closeFrontConn) {
while (totalConnections.get() > 0) {
for (BackendConnection conn : allConnections) {
if (conn.getState() == STATE_IN_USE) {
if (closeFrontConn) {
((MySQLConnection) conn).close(closureReason, true);
} else {
conn.setOldTimestamp(System.currentTimeMillis());
NIOProcessor.BACKENDS_OLD.add(conn);
}
} else {
conn.close(closureReason);
}
}
}
}
/**
* Closes the keyed object pool. Once the pool is closed
*/
public void stop(final String closureReason) {
stop(closureReason, false);
}
public void stop(final String closureReason, boolean closeFront) {
if (isClosed.getAndSet(true)) {
return;
}
stopEvictor();
closeAllConnections(closureReason, closeFront);
}
private void evict() {
final ArrayList<BackendConnection> idleList = new ArrayList<>(allConnections.size());
for (final BackendConnection entry : allConnections) {
if (entry.getState() == STATE_NOT_IN_USE) {
idleList.add(entry);
}
}
int removable = idleList.size() - config.getMinCon();
// Sort pool entries on lastAccessed
idleList.sort(LAST_ACCESS_COMPARABLE);
logPoolState("before cleanup ");
for (BackendConnection conn : idleList) {
if (removable > 0 && System.currentTimeMillis() - conn.getLastTime() > getIdleTimeout() &&
conn.compareAndSet(STATE_NOT_IN_USE, STATE_RESERVED)) {
conn.close("connection has passed idleTimeout");
removable--;
} else if (getTestWhileIdle() && conn.compareAndSet(STATE_NOT_IN_USE, STATE_HEARTBEAT)) {
ConnectionHeartBeatHandler heartBeatHandler = new ConnectionHeartBeatHandler(conn, false, this);
heartBeatHandler.ping(getConnectionHeartbeatTimeout());
}
}
}
public final int getThreadsAwaitingConnection() {
return synchronizer.getQueueLength();
}
/**
* <p>Starts the evictor with the given delay. If there is an evictor
* running when this method is called, it is stopped and replaced with a
* new evictor with the specified delay.</p>
*
* <p>This method needs to be final, since it is called from a constructor.
* See POOL-195.</p>
*/
public void startEvictor() {
if (evictor != null) {
EvictionTimer.cancel(evictor, getEvictorShutdownTimeoutMillis(), TimeUnit.MILLISECONDS);
}
evictor = new Evictor();
EvictionTimer.schedule(evictor, 0, getTimeBetweenEvictionRunsMillis());
}
/**
* Stops the evictor.
*/
public void stopEvictor() {
EvictionTimer.cancel(evictor, getEvictorShutdownTimeoutMillis(), TimeUnit.MILLISECONDS);
evictor = null;
}
/**
* Log the current pool state at debug level.
*
* @param prefix an optional prefix to prepend the log message
*/
private void logPoolState(String... prefix) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("{} db instance[{}] stats (total={}, active={}, idle={}, idleTest={} waiting={})", new Object[]{(prefix.length > 0 ? prefix[0] : ""), config.getInstanceName(),
allConnections.size() - getCount(STATE_REMOVED), getCount(STATE_IN_USE), getCount(STATE_NOT_IN_USE), getCount(STATE_HEARTBEAT), getThreadsAwaitingConnection()});
}
}
/**
* The idle object evictor.
*/
class Evictor implements Runnable {
private ScheduledFuture<?> scheduledFuture;
/**
* Run pool maintenance. Evict objects qualifying for eviction and then
* ensure that the minimum number of idle instances are available.
* Since the Timer that invokes Evictors is shared for all Pools but
* pools may exist in different class loaders, the Evictor ensures that
* any actions taken are under the class loader of the factory
* associated with the pool.
*/
@Override
public void run() {
if (!instance.isAlive()) {
return;
}
final ClassLoader savedClassLoader =
Thread.currentThread().getContextClassLoader();
try {
if (factoryClassLoader != null) {
// Set the class loader for the factory
final ClassLoader cl = factoryClassLoader.get();
if (cl == null) {
// The pool has been dereferenced and the class loader
// GC'd. Cancel this timer so the pool can be GC'd as
// well.
cancel();
return;
}
Thread.currentThread().setContextClassLoader(cl);
}
// Evict from the pool
evict();
// Try to maintain minimum connections
fillPool();
} finally {
// Restore the previous CCL
Thread.currentThread().setContextClassLoader(savedClassLoader);
}
}
/**
* Sets the scheduled future.
*
* @param scheduledFuture the scheduled future.
*/
void setScheduledFuture(final ScheduledFuture<?> scheduledFuture) {
this.scheduledFuture = scheduledFuture;
}
/**
* Cancels the scheduled future.
*/
void cancel() {
scheduledFuture.cancel(false);
}
}
}
@@ -0,0 +1,101 @@
package com.actiontech.dble.backend.pool;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
final class EvictionTimer {
/**
* Executor instance
*/
private static ScheduledThreadPoolExecutor executor; //@GuardedBy("EvictionTimer.class")
/**
* Prevents instantiation
*/
private EvictionTimer() {
// Hide the default constructor
}
/**
* @since 2.4.3
*/
@Override
public String toString() {
final StringBuilder builder = new StringBuilder();
builder.append("EvictionTimer []");
return builder.toString();
}
/**
* Adds the specified eviction task to the timer. Tasks that are added with a
* call to this method *must* call {@link #cancel()} to cancel the
* task to prevent memory and/or thread leaks in application server
* environments.
*
* @param task Task to be scheduled.
* @param delay Delay in milliseconds before task is executed.
* @param period Time in milliseconds between executions.
*/
static synchronized void schedule(
final ConnectionPool.Evictor task, final long delay, final long period) {
if (null == executor) {
executor = new ScheduledThreadPoolExecutor(1, new EvictorThreadFactory());
executor.setRemoveOnCancelPolicy(true);
}
final ScheduledFuture<?> scheduledFuture =
executor.scheduleWithFixedDelay(task, delay, period, TimeUnit.MILLISECONDS);
task.setScheduledFuture(scheduledFuture);
}
/**
* Removes the specified eviction task from the timer.
*
* @param evictor Task to be cancelled.
* @param timeout If the associated executor is no longer required, how
* long should this thread wait for the executor to
* terminate?
* @param unit The units for the specified timeout.
*/
static synchronized void cancel(
final ConnectionPool.Evictor evictor, final long timeout, final TimeUnit unit) {
if (evictor != null) {
evictor.cancel();
}
if (executor != null && executor.getQueue().isEmpty()) {
executor.shutdown();
try {
executor.awaitTermination(timeout, unit);
} catch (final InterruptedException e) {
// Swallow
// Significant API changes would be required to propagate this
}
executor.setCorePoolSize(0);
executor = null;
}
}
/**
* Thread factory that creates a daemon thread, with the context class loader from this class.
*/
private static class EvictorThreadFactory implements ThreadFactory {
@Override
public Thread newThread(final Runnable runnable) {
final Thread thread = new Thread(null, runnable, "connection-pool-evictor-thread");
thread.setDaemon(true); // POOL-363 - Required for applications using Runtime.addShutdownHook().
AccessController.doPrivileged((PrivilegedAction<Void>) () -> {
thread.setContextClassLoader(EvictorThreadFactory.class.getClassLoader());
return null;
});
return thread;
}
}
}
@@ -0,0 +1,177 @@
package com.actiontech.dble.backend.pool;
import com.actiontech.dble.DbleServer;
import com.actiontech.dble.backend.BackendConnection;
import com.actiontech.dble.backend.datasource.PhysicalDbInstance;
import com.actiontech.dble.backend.mysql.nio.MySQLConnection;
import com.actiontech.dble.backend.mysql.nio.MySQLConnectionAuthenticator;
import com.actiontech.dble.backend.mysql.nio.MySQLConnectionListener;
import com.actiontech.dble.backend.mysql.nio.handler.ResponseHandler;
import com.actiontech.dble.config.model.db.DbInstanceConfig;
import com.actiontech.dble.net.NIOConnector;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.channels.AsynchronousSocketChannel;
import java.nio.channels.CompletionHandler;
import java.nio.channels.NetworkChannel;
import java.nio.channels.SocketChannel;
public class PoolBase {
protected final DbInstanceConfig config;
protected final PhysicalDbInstance instance;
private final long connectionTimeout;
private final long connectionHeartbeatTimeout;
private final boolean testOnCreate;
private final boolean testOnBorrow;
private final boolean testOnReturn;
private final boolean testWhileIdle;
private final long timeBetweenEvictionRunsMillis;
private final int numTestsPerEvictionRun;
private final long evictorShutdownTimeoutMillis;
private final long idleTimeout;
public PoolBase(DbInstanceConfig dbConfig, PhysicalDbInstance instance) {
this.config = dbConfig;
this.instance = instance;
PoolConfig poolConfig = dbConfig.getPoolConfig();
this.testOnBorrow = poolConfig.getTestOnBorrow();
this.testOnCreate = poolConfig.getTestOnCreate();
this.testOnReturn = poolConfig.getTestOnReturn();
this.testWhileIdle = poolConfig.getTestWhileIdle();
this.connectionHeartbeatTimeout = poolConfig.getConnectionHeartbeatTimeout();
this.connectionTimeout = poolConfig.getConnectionTimeout();
this.timeBetweenEvictionRunsMillis = poolConfig.getTimeBetweenEvictionRunsMillis();
this.numTestsPerEvictionRun = poolConfig.getNumTestsPerEvictionRun();
this.evictorShutdownTimeoutMillis = poolConfig.getEvictorShutdownTimeoutMillis();
this.idleTimeout = poolConfig.getIdleTimeout();
}
/**
* only for heartbeat
*
* @param handler
* @return
*/
public void newConnection(String schema, ResponseHandler handler) {
try {
NetworkChannel channel = openSocketChannel();
MySQLConnection conn = new MySQLConnection(channel, config, instance.isReadInstance(), instance.isAutocommitSynced(), instance.isIsolationSynced());
conn.setSocketParams(false);
conn.setSchema(schema);
conn.setHandler(new MySQLConnectionAuthenticator(conn, new MySQLConnectionListener() {
@Override
public void onCreateSuccess(BackendConnection conn) {
handler.connectionAcquired(conn);
}
@Override
public void onCreateFail(BackendConnection conn, Throwable e) {
handler.connectionError(e, conn);
}
@Override
public void onHeartbeatSuccess(BackendConnection conn) {
}
}));
if (channel instanceof AsynchronousSocketChannel) {
((AsynchronousSocketChannel) channel).connect(
new InetSocketAddress(config.getIp(), config.getPort()), conn,
(CompletionHandler) DbleServer.getInstance().getConnector());
} else {
((NIOConnector) DbleServer.getInstance().getConnector()).postConnect(conn);
}
} catch (IOException ioe) {
handler.connectionError(ioe, null);
}
}
BackendConnection newConnection(String schema, MySQLConnectionListener listener) {
try {
NetworkChannel channel = openSocketChannel();
MySQLConnection conn = new MySQLConnection(channel, config, instance.isReadInstance(), instance.isAutocommitSynced(), instance.isIsolationSynced());
conn.setSocketParams(false);
conn.setSchema(schema);
conn.setHandler(new MySQLConnectionAuthenticator(conn, listener));
conn.setDbInstance(instance);
if (channel instanceof AsynchronousSocketChannel) {
((AsynchronousSocketChannel) channel).connect(
new InetSocketAddress(config.getIp(), config.getPort()), conn,
(CompletionHandler) DbleServer.getInstance().getConnector());
} else {
((NIOConnector) DbleServer.getInstance().getConnector()).postConnect(conn);
}
return conn;
} catch (IOException ioe) {
listener.onCreateFail(null, ioe);
return null;
}
}
private NetworkChannel openSocketChannel() throws IOException {
NetworkChannel channel;
if (DbleServer.getInstance().isAIO()) {
channel = AsynchronousSocketChannel.open(DbleServer.getInstance().getNextAsyncChannelGroup());
} else {
channel = SocketChannel.open();
((SocketChannel) channel).configureBlocking(false);
}
return channel;
}
public final boolean getTestOnCreate() {
return testOnCreate;
}
public final boolean getTestOnBorrow() {
return testOnBorrow;
}
public final boolean getTestOnReturn() {
return testOnReturn;
}
public final boolean getTestWhileIdle() {
return testWhileIdle;
}
public final long getTimeBetweenEvictionRunsMillis() {
return timeBetweenEvictionRunsMillis;
}
public final int getNumTestsPerEvictionRun() {
return numTestsPerEvictionRun;
}
/**
* Gets the timeout that will be used when waiting for the Evictor to
* shutdown if this pool is closed and it is the only pool still using the
* the value for the Evictor.
*
* @return The timeout in milliseconds that will be used while waiting for
* the Evictor to shut down.
*/
public final long getEvictorShutdownTimeoutMillis() {
return evictorShutdownTimeoutMillis;
}
public long getConnectionTimeout() {
return connectionTimeout;
}
public long getConnectionHeartbeatTimeout() {
return connectionHeartbeatTimeout;
}
public long getIdleTimeout() {
return idleTimeout;
}
}
@@ -0,0 +1,241 @@
package com.actiontech.dble.backend.pool;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.SECONDS;
public class PoolConfig {
private static final long CONNECTION_TIMEOUT = SECONDS.toMillis(30);
private static final long CON_HEARTBEAT_TIMEOUT = MILLISECONDS.toMillis(20);
private static final long DEFAULT_IDLE_TIMEOUT = 30 * 60 * 1000L;
private static final long HOUSEKEEPING_PERIOD_MS = SECONDS.toMillis(30);
private volatile long connectionTimeout = CONNECTION_TIMEOUT;
private volatile long connectionHeartbeatTimeout = CON_HEARTBEAT_TIMEOUT;
private volatile boolean testOnCreate = false;
private volatile boolean testOnBorrow = false;
private volatile boolean testOnReturn = false;
private volatile boolean testWhileIdle = false;
private volatile long timeBetweenEvictionRunsMillis = HOUSEKEEPING_PERIOD_MS;
private volatile int numTestsPerEvictionRun = 3;
private volatile long evictorShutdownTimeoutMillis = 10000L;
private volatile long idleTimeout = DEFAULT_IDLE_TIMEOUT;
public PoolConfig() {
}
/**
* Returns whether objects created for the pool will be validated before
* being returned from the <code>borrowObject()</code> method. Validation is
* performed by the <code>validateObject()</code> method of the factory
* associated with the pool. If the object fails to validate, then
* <code>borrowObject()</code> will fail.
*
* @return <code>true</code> if newly created objects are validated before
* being returned from the <code>borrowObject()</code> method
* @see #setTestOnCreate
* @since 2.2
*/
public final boolean getTestOnCreate() {
return testOnCreate;
}
/**
* Sets whether objects created for the pool will be validated before
* being returned from the <code>borrowObject()</code> method. Validation is
* performed by the <code>validateObject()</code> method of the factory
* associated with the pool. If the object fails to validate, then
* <code>borrowObject()</code> will fail.
*
* @param testOnCreate <code>true</code> if newly created objects should be
* validated before being returned from the
* <code>borrowObject()</code> method
* @see #getTestOnCreate
* @since 2.2
*/
public final void setTestOnCreate(final boolean testOnCreate) {
this.testOnCreate = testOnCreate;
}
/**
* Returns whether objects borrowed from the pool will be validated before
* being returned from the <code>borrowObject()</code> method. Validation is
* performed by the <code>validateObject()</code> method of the factory
* associated with the pool. If the object fails to validate, it will be
* removed from the pool and destroyed, and a new attempt will be made to
* borrow an object from the pool.
*
* @return <code>true</code> if objects are validated before being returned
* from the <code>borrowObject()</code> method
* @see #setTestOnBorrow
*/
public final boolean getTestOnBorrow() {
return testOnBorrow;
}
/**
* Sets whether objects borrowed from the pool will be validated before
* being returned from the <code>borrowObject()</code> method. Validation is
* performed by the <code>validateObject()</code> method of the factory
* associated with the pool. If the object fails to validate, it will be
* removed from the pool and destroyed, and a new attempt will be made to
* borrow an object from the pool.
*
* @param testOnBorrow <code>true</code> if objects should be validated
* before being returned from the
* <code>borrowObject()</code> method
* @see #getTestOnBorrow
*/
public final void setTestOnBorrow(final boolean testOnBorrow) {
this.testOnBorrow = testOnBorrow;
}
/**
* Returns whether objects borrowed from the pool will be validated when
* they are returned to the pool via the <code>returnObject()</code> method.
* Validation is performed by the <code>validateObject()</code> method of
* the factory associated with the pool. Returning objects that fail validation
* are destroyed rather then being returned the pool.
*
* @return <code>true</code> if objects are validated on return to
* the pool via the <code>returnObject()</code> method
* @see #setTestOnReturn
*/
public final boolean getTestOnReturn() {
return testOnReturn;
}
/**
* Sets whether objects borrowed from the pool will be validated when
* they are returned to the pool via the <code>returnObject()</code> method.
* Validation is performed by the <code>validateObject()</code> method of
* the factory associated with the pool. Returning objects that fail validation
* are destroyed rather then being returned the pool.
*
* @param testOnReturn <code>true</code> if objects are validated on
* return to the pool via the
* <code>returnObject()</code> method
* @see #getTestOnReturn
*/
public final void setTestOnReturn(final boolean testOnReturn) {
this.testOnReturn = testOnReturn;
}
/**
* Returns whether objects sitting idle in the pool will be validated by the
* idle object evictor (if any - see
* {@link #setTimeBetweenEvictionRunsMillis(long)}). Validation is performed
* by the <code>validateObject()</code> method of the factory associated
* with the pool. If the object fails to validate, it will be removed from
* the pool and destroyed.
*
* @return <code>true</code> if objects will be validated by the evictor
* @see #setTestWhileIdle
* @see #setTimeBetweenEvictionRunsMillis
*/
public final boolean getTestWhileIdle() {
return testWhileIdle;
}
/**
* Returns whether objects sitting idle in the pool will be validated by the
* idle object evictor (if any - see
* {@link #setTimeBetweenEvictionRunsMillis(long)}). Validation is performed
* by the <code>validateObject()</code> method of the factory associated
* with the pool. If the object fails to validate, it will be removed from
* the pool and destroyed. Note that setting this property has no effect
* unless the idle object evictor is enabled by setting
* <code>timeBetweenEvictionRunsMillis</code> to a positive value.
*
* @param testWhileIdle <code>true</code> so objects will be validated by the evictor
* @see #getTestWhileIdle
* @see #setTimeBetweenEvictionRunsMillis
*/
public final void setTestWhileIdle(final boolean testWhileIdle) {
this.testWhileIdle = testWhileIdle;
}
/**
* Returns the number of milliseconds to sleep between runs of the idle
* object evictor thread. When non-positive, no idle object evictor thread
* will be run.
*
* @return number of milliseconds to sleep between evictor runs
* @see #setTimeBetweenEvictionRunsMillis
*/
public final long getTimeBetweenEvictionRunsMillis() {
return timeBetweenEvictionRunsMillis;
}
/**
* Sets the number of milliseconds to sleep between runs of the idle object evictor thread.
* <ul>
* <li>When positive, the idle object evictor thread starts.</li>
* <li>When non-positive, no idle object evictor thread runs.</li>
* </ul>
*
* @param timeBetweenEvictionRunsMillis number of milliseconds to sleep between evictor runs
* @see #getTimeBetweenEvictionRunsMillis
*/
public final void setTimeBetweenEvictionRunsMillis(
final long timeBetweenEvictionRunsMillis) {
this.timeBetweenEvictionRunsMillis = timeBetweenEvictionRunsMillis;
}
public final int getNumTestsPerEvictionRun() {
return numTestsPerEvictionRun;
}
public final void setNumTestsPerEvictionRun(final int numTestsPerEvictionRun) {
this.numTestsPerEvictionRun = numTestsPerEvictionRun;
}
/**
* Gets the timeout that will be used when waiting for the Evictor to
* shutdown if this pool is closed and it is the only pool still using the
* the value for the Evictor.
*
* @return The timeout in milliseconds that will be used while waiting for
* the Evictor to shut down.
*/
public final long getEvictorShutdownTimeoutMillis() {
return evictorShutdownTimeoutMillis;
}
/**
* Sets the timeout that will be used when waiting for the Evictor to
* shutdown if this pool is closed and it is the only pool still using the
* the value for the Evictor.
*
* @param evictorShutdownTimeoutMillis the timeout in milliseconds that
* will be used while waiting for the
* Evictor to shut down.
*/
public final void setEvictorShutdownTimeoutMillis(final long evictorShutdownTimeoutMillis) {
this.evictorShutdownTimeoutMillis = evictorShutdownTimeoutMillis;
}
public long getConnectionTimeout() {
return connectionTimeout;
}
public void setConnectionTimeout(long connectionTimeout) {
this.connectionTimeout = connectionTimeout;
}
public long getConnectionHeartbeatTimeout() {
return connectionHeartbeatTimeout;
}
public void setConnectionHeartbeatTimeout(long connectionHeartbeatTimeout) {
this.connectionHeartbeatTimeout = connectionHeartbeatTimeout;
}
public long getIdleTimeout() {
return idleTimeout;
}
public void setIdleTimeout(long idleTimeout) {
this.idleTimeout = idleTimeout;
}
}
@@ -0,0 +1,20 @@
package com.actiontech.dble.backend.pool;
public interface PooledEntry {
int STATE_REMOVED = -4;
int STATE_HEARTBEAT = -3;
int STATE_RESERVED = -2;
int STATE_IN_USE = -1;
int INITIAL = 0;
int STATE_NOT_IN_USE = 1;
boolean compareAndSet(int expect, int update);
void lazySet(int update);
int getState();
void release();
}
@@ -0,0 +1,92 @@
package com.actiontech.dble.backend.pool;
import com.actiontech.dble.backend.pool.util.Java8Sequence;
import com.actiontech.dble.backend.pool.util.Sequence;
import java.util.concurrent.locks.AbstractQueuedLongSynchronizer;
public class QueuedSequenceSynchronizer {
private final Sequence sequence;
private final Synchronizer synchronizer;
/**
* Default constructor
*/
public QueuedSequenceSynchronizer() {
this.synchronizer = new Synchronizer();
this.sequence = new Java8Sequence();
}
/**
* Signal any waiting threads.
*/
public void signal() {
synchronizer.releaseShared(1);
}
/**
* Get the current sequence.
*
* @return the current sequence
*/
public long currentSequence() {
return sequence.get();
}
/**
* Block the current thread until the current sequence exceeds the specified threshold, or
* until the specified timeout is reached.
*
* @param seq the threshold the sequence must reach before this thread becomes unblocked
* @param nanosTimeout a nanosecond timeout specifying the maximum time to wait
* @return true if the threshold was reached, false if the wait timed out
* @throws InterruptedException if the thread is interrupted while waiting
*/
public boolean waitUntilSequenceExceeded(long seq, long nanosTimeout) throws InterruptedException {
return synchronizer.tryAcquireSharedNanos(seq, nanosTimeout);
}
/**
* Queries whether any threads are waiting to for the sequence to reach a particular threshold.
*
* @return true if there may be other threads waiting for a sequence threshold to be reached
*/
public boolean hasQueuedThreads() {
return synchronizer.hasQueuedThreads();
}
/**
* Returns an estimate of the number of threads waiting for a sequence threshold to be reached. The
* value is only an estimate because the number of threads may change dynamically while this method
* traverses internal data structures. This method is designed for use in monitoring system state,
* not for synchronization control.
*
* @return the estimated number of threads waiting for a sequence threshold to be reached
*/
public int getQueueLength() {
return synchronizer.getQueueLength();
}
private final class Synchronizer extends AbstractQueuedLongSynchronizer {
private static final long serialVersionUID = 104753538004341218L;
/**
* {@inheritDoc}
*/
@Override
protected long tryAcquireShared(final long seq) {
return sequence.get() - (seq + 1);
}
/**
* {@inheritDoc}
*/
@Override
protected boolean tryReleaseShared(final long unused) {
sequence.increment();
return true;
}
}
}
@@ -0,0 +1,18 @@
package com.actiontech.dble.backend.pool.util;
import java.util.concurrent.atomic.LongAdder;
/**
* A monotonically increasing long sequence.
*
* @author brettw
*/
@SuppressWarnings("serial")
public class Java8Sequence extends LongAdder implements Sequence {
@Override
public long get() {
return this.sum();
}
}
@@ -0,0 +1,22 @@
package com.actiontech.dble.backend.pool.util;
/**
* A monotonically increasing long sequence.
*
* @author brettw
*/
@SuppressWarnings("serial")
public interface Sequence {
/**
* Increments the current sequence by one.
*/
void increment();
/**
* Get the current sequence.
*
* @return the current sequence.
*/
long get();
}
@@ -0,0 +1,31 @@
package com.actiontech.dble.backend.pool.util;
import io.netty.util.HashedWheelTimer;
import io.netty.util.Timer;
import net.sf.ehcache.util.NamedThreadFactory;
import java.util.concurrent.TimeUnit;
public final class TimerHolder {
private static final long DEFAULT_TICK_DURATION = 10;
private TimerHolder() {
}
/**
* Get a singleton instance of {@link Timer}. <br>
* The tick duration is {@link #DEFAULT_TICK_DURATION}.
*
* @return Timer
*/
public static Timer getTimer() {
return DefaultInstance.INSTANCE;
}
private static class DefaultInstance {
static final Timer INSTANCE = new HashedWheelTimer(new NamedThreadFactory("DefaultTimer" + DEFAULT_TICK_DURATION),
DEFAULT_TICK_DURATION, TimeUnit.MILLISECONDS);
}
}
@@ -1,8 +1,8 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.config;
import com.actiontech.dble.backend.datasource.PhysicalDbGroup;
@@ -85,22 +85,21 @@ public class ConfigInitializer implements ProblemReporter {
private void checkWriteHost() {
if (this.dbGroups.isEmpty()) {
return;
} else {
//Mark all dbInstance whether they are fake or not
for (PhysicalDbGroup dbGroup : this.dbGroups.values()) {
for (PhysicalDbInstance source : dbGroup.getAllDbInstances()) {
if (checkSourceFake(source)) {
source.setFakeNode(true);
} else if (!source.isDisabled()) {
this.fullyConfigured = true;
}
}
//Mark all dbInstance whether they are fake or not
for (PhysicalDbGroup dbGroup : this.dbGroups.values()) {
for (PhysicalDbInstance source : dbGroup.getAllDbInstances()) {
if (checkSourceFake(source)) {
source.setFakeNode(true);
} else if (!source.isDisabled()) {
this.fullyConfigured = true;
}
}
// if there are dbGroups exists. no empty shardingNodes allowed
for (ShardingNode shardingNode : this.shardingNodes.values()) {
if (shardingNode.getDbGroup() == null) {
throw new ConfigException("dbGroup not exists " + shardingNode.getDbGroupName());
}
}
// if there are dbGroups exists. no empty shardingNodes allowed
for (ShardingNode shardingNode : this.shardingNodes.values()) {
if (shardingNode.getDbGroup() == null) {
throw new ConfigException("dbGroup not exists " + shardingNode.getDbGroupName());
}
}
}
@@ -238,7 +237,7 @@ public class ConfigInitializer implements ProblemReporter {
private void testDbInstance(Set<String> errNodeKeys, Set<String> errSourceKeys, BoolPtr isConnectivity,
BoolPtr isAllDbInstanceConnected, List<Pair<String, String>> nodeList, PhysicalDbGroup pool, PhysicalDbInstance ds) {
boolean isMaster = ds == pool.getWriteSource();
boolean isMaster = ds == pool.getWriteDbInstance();
String dbInstanceName = "dbInstance[" + ds.getDbGroupConfig().getName() + "." + ds.getName() + "]";
try {
BoolPtr isDSConnectedPtr = new BoolPtr(false);
@@ -321,8 +320,6 @@ public class ConfigInitializer implements ProblemReporter {
return erRelations;
}
private Map<String, ShardingNode> initShardingNodes(Map<String, ShardingNodeConfig> nodeConf) {
Map<String, ShardingNode> nodes = new HashMap<>(nodeConf.size());
for (ShardingNodeConfig conf : nodeConf.values()) {
@@ -1,8 +1,8 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.config;
import com.actiontech.dble.DbleServer;
@@ -20,6 +20,7 @@ import com.actiontech.dble.config.model.user.UserConfig;
import com.actiontech.dble.config.model.user.UserName;
import com.actiontech.dble.config.util.ConfigException;
import com.actiontech.dble.config.util.ConfigUtil;
import com.actiontech.dble.meta.ReloadLogHelper;
import com.actiontech.dble.route.parser.ManagerParseConfig;
import com.actiontech.dble.route.parser.util.Pair;
import com.actiontech.dble.server.variables.SystemVariables;
@@ -364,7 +365,8 @@ public class ServerConfig {
if (recycleDbGroups != null) {
for (PhysicalDbGroup oldDbGroup : recycleDbGroups.values()) {
if (oldDbGroup != null) {
oldDbGroup.stopHeartbeat();
ReloadLogHelper.info("reload config, recycle old group. old active backend conn will be close", LOGGER);
oldDbGroup.stop("reload config, recycle old group", ((loadAllMode & ManagerParseConfig.OPTF_MODE) != 0));
}
}
}
@@ -378,13 +380,13 @@ public class ServerConfig {
// 1 start heartbeat
// 2 apply the configure
//---------------------------------------------------
if (changeOrAddDbGroups != null) {
for (PhysicalDbGroup newDbGroup : changeOrAddDbGroups.values()) {
if (newDbGroup != null && isFullyConfigured) {
newDbGroup.startHeartbeat();
}
}
}
// if (changeOrAddDbGroups != null) {
// for (PhysicalDbGroup newDbGroup : changeOrAddDbGroups.values()) {
// if (newDbGroup != null && isFullyConfigured) {
// newDbGroup.startHeartbeat();
// }
// }
// }
this.shardingNodes = newShardingNodes;
this.dbGroups = newDbGroups;
this.fullyConfigured = isFullyConfigured;
@@ -104,12 +104,8 @@ public class GetAndSyncDbInstanceKeyVariables implements Callable<KeyVariables>
break;
}
keyVariables.setTargetIsolation(SystemConfig.getInstance().getTxIsolation());
keyVariables.setMaxPacketSize(Integer.parseInt(result.getResult().get(COLUMN_MAX_PACKET)));
keyVariables.setTargetMaxPacketSize(SystemConfig.getInstance().getMaxPacketSize() + KeyVariables.MARGIN_PACKET_SIZE);
keyVariables.setReadOnly(result.getResult().get(COLUMN_READONLY).equals("1"));
if (needSync) {
@@ -8,25 +8,25 @@ package com.actiontech.dble.config.loader.xml;
import com.actiontech.dble.backend.datasource.PhysicalDbGroup;
import com.actiontech.dble.backend.datasource.PhysicalDbInstance;
import com.actiontech.dble.backend.mysql.nio.MySQLInstance;
import com.actiontech.dble.backend.pool.PoolConfig;
import com.actiontech.dble.config.ProblemReporter;
import com.actiontech.dble.config.Versions;
import com.actiontech.dble.config.model.SystemConfig;
import com.actiontech.dble.config.model.db.DbGroupConfig;
import com.actiontech.dble.config.model.db.DbInstanceConfig;
import com.actiontech.dble.config.util.ConfigException;
import com.actiontech.dble.config.util.ConfigUtil;
import com.actiontech.dble.config.util.ParameterMapping;
import com.actiontech.dble.manager.handler.DbGroupHAHandler;
import com.actiontech.dble.util.DecryptUtil;
import com.actiontech.dble.util.ResourceUtil;
import com.actiontech.dble.util.StringUtil;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.lang.reflect.InvocationTargetException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@@ -99,7 +99,7 @@ public class XMLDbLoader {
}
}
private void loadDbGroups(Element root) {
private void loadDbGroups(Element root) throws InvocationTargetException, IllegalAccessException {
NodeList list = root.getElementsByTagName("dbGroup");
for (int i = 0, n = list.getLength(); i < n; ++i) {
Set<String> instanceNames = new HashSet<>();
@@ -159,14 +159,14 @@ public class XMLDbLoader {
DbGroupConfig dbGroupConf = new DbGroupConfig(name, writeDbConf, readDbConfList, delayThreshold);
dbGroupConf.setRwSplitMode(rwSplitMode);
dbGroupConf.setHearbeatSQL(heartbeatSQL);
dbGroupConf.setHeartbeatSQL(heartbeatSQL);
dbGroupConf.setHeartbeatTimeout(Integer.parseInt(strHBTimeout) * 1000);
dbGroupConf.setErrorRetryCount(Integer.parseInt(strHBErrorRetryCount));
dbGroupConfigs.put(dbGroupConf.getName(), dbGroupConf);
}
}
private DbInstanceConfig createDbInstanceConf(String dbGroup, Element node) {
private DbInstanceConfig createDbInstanceConf(String dbGroup, Element node) throws InvocationTargetException, IllegalAccessException {
String name = node.getAttribute("name");
String nodeUrl = node.getAttribute("url");
@@ -176,7 +176,7 @@ public class XMLDbLoader {
if (!nameMatcher.matches()) {
throw new ConfigException("dbInstance name " + name + " show be use " + DbGroupHAHandler.DB_NAME_FORMAT + "!");
}
if (empty(name) || empty(nodeUrl) || empty(user)) {
if (StringUtil.isEmpty(name) || StringUtil.isEmpty(nodeUrl) || StringUtil.isEmpty(user)) {
throw new ConfigException(
"dbGroup " + dbGroup +
" define error,some attributes of this element is empty: " +
@@ -190,29 +190,34 @@ public class XMLDbLoader {
password = DecryptUtil.dbHostDecrypt(usingDecrypt, name, user, password);
String disabledStr = ConfigUtil.checkAndGetAttribute(node, "disabled", "false", problemReporter);
boolean disabled = Boolean.parseBoolean(disabledStr);
String readWeightStr = ConfigUtil.checkAndGetAttribute(node, "readWeight", String.valueOf(PhysicalDbGroup.WEIGHT), problemReporter);
int readWeight = Integer.parseInt(readWeightStr);
int maxCon = Integer.parseInt(node.getAttribute("maxCon"));
int minCon = Integer.parseInt(node.getAttribute("minCon"));
String primaryStr = ConfigUtil.checkAndGetAttribute(node, "primary", "false", problemReporter);
boolean primary = Boolean.parseBoolean(primaryStr);
DbInstanceConfig conf = new DbInstanceConfig(name, ip, port, nodeUrl, user, password, disabled, primary);
String readWeightStr = ConfigUtil.checkAndGetAttribute(node, "readWeight", String.valueOf(PhysicalDbGroup.WEIGHT), problemReporter);
int readWeight = Integer.parseInt(readWeightStr);
int maxCon = Integer.parseInt(node.getAttribute("maxCon"));
int minCon = Integer.parseInt(node.getAttribute("minCon"));
conf.setMaxCon(maxCon);
conf.setMinCon(minCon);
conf.setReadWeight(readWeight);
String id = node.getAttribute("id");
if (!"".equals(id)) {
conf.setId(id);
} else {
if (StringUtil.isEmpty(id)) {
conf.setId(name);
} else {
conf.setId(id);
}
return conf;
}
// init properties of connection pool
PoolConfig poolConfig = new PoolConfig();
Properties poolProperties = ConfigUtil.loadElements(node);
ParameterMapping.mapping(poolConfig, poolProperties, problemReporter);
if (poolProperties.size() > 0) {
throw new ConfigException("These properties of system are not recognized: " + StringUtil.join(poolProperties.stringPropertyNames(), ","));
}
conf.setPoolConfig(poolConfig);
private boolean empty(String dnName) {
return dnName == null || dnName.length() == 0;
return conf;
}
private Map<String, PhysicalDbGroup> initDbGroups(Map<String, DbGroupConfig> nodeConf) {
@@ -225,9 +230,7 @@ public class XMLDbLoader {
return nodes;
}
private PhysicalDbInstance createDbInstance(DbGroupConfig conf, DbInstanceConfig node,
boolean isRead) {
node.setIdleTimeout(SystemConfig.getInstance().getIdleTimeout());
private PhysicalDbInstance createDbInstance(DbGroupConfig conf, DbInstanceConfig node, boolean isRead) {
return new MySQLInstance(node, conf, isRead);
}
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.config.model.db;
@@ -17,7 +18,7 @@ public class DbGroupConfig {
private int rwSplitMode = PhysicalDbGroup.RW_SPLIT_OFF;
private final DbInstanceConfig writeInstanceConfig;
private final DbInstanceConfig[] readInstanceConfigs;
private String hearbeatSQL;
private String heartbeatSQL;
private boolean isShowSlaveSql = false;
private boolean isSelectReadOnlySql = false;
private int delayThreshold = -1;
@@ -66,13 +67,12 @@ public class DbGroupConfig {
return readInstanceConfigs;
}
public String getHearbeatSQL() {
return hearbeatSQL;
public String getHeartbeatSQL() {
return heartbeatSQL;
}
public void setHearbeatSQL(String heartbeatSQL) {
this.hearbeatSQL = heartbeatSQL;
public void setHeartbeatSQL(String heartbeatSQL) {
this.heartbeatSQL = heartbeatSQL;
Matcher matcher = HP_PATTERN_SHOW_SLAVE_STATUS.matcher(heartbeatSQL);
if (matcher.find()) {
isShowSlaveSql = true;
@@ -1,28 +1,27 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.config.model.db;
import com.actiontech.dble.config.model.SystemConfig;
import com.actiontech.dble.backend.pool.PoolConfig;
public class DbInstanceConfig {
private long idleTimeout = SystemConfig.DEFAULT_IDLE_TIMEOUT;
private final String instanceName;
private final String ip;
private final int port;
private final String url;
private final String user;
private final String password;
private int maxCon;
private int minCon;
private int readWeight;
private String id;
private boolean disabled = false;
private boolean primary = false;
private boolean disabled;
private boolean primary;
private volatile int maxCon = -1;
private volatile int minCon = -1;
private volatile PoolConfig poolConfig;
public DbInstanceConfig(String instanceName, String ip, int port, String url,
String user, String password, boolean disabled, boolean primary) {
@@ -36,30 +35,6 @@ public class DbInstanceConfig {
this.primary = primary;
}
public long getIdleTimeout() {
return idleTimeout;
}
public void setIdleTimeout(long idleTimeout) {
this.idleTimeout = idleTimeout;
}
public int getMaxCon() {
return maxCon;
}
public void setMaxCon(int maxCon) {
this.maxCon = maxCon;
}
public int getMinCon() {
return minCon;
}
public void setMinCon(int minCon) {
this.minCon = minCon;
}
public String getInstanceName() {
return instanceName;
}
@@ -104,7 +79,6 @@ public class DbInstanceConfig {
this.id = id;
}
public boolean isPrimary() {
return primary;
}
@@ -113,6 +87,30 @@ public class DbInstanceConfig {
this.primary = primary;
}
public int getMaxCon() {
return maxCon;
}
public void setMaxCon(int maxCon) {
this.maxCon = maxCon;
}
public int getMinCon() {
return minCon;
}
public void setMinCon(int minCon) {
this.minCon = minCon;
}
public PoolConfig getPoolConfig() {
return poolConfig;
}
public void setPoolConfig(PoolConfig poolConfig) {
this.poolConfig = poolConfig;
}
@Override
public String toString() {
return "DbInstanceConfig [hostName=" + instanceName + ", url=" + url + "]";
@@ -1,8 +1,8 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.manager;
import com.actiontech.dble.backend.BackendConnection;
@@ -22,6 +22,7 @@ public class ManagerConnection extends FrontendConnection {
private static final long AUTH_TIMEOUT = 15 * 1000L;
private volatile boolean skipIdleCheck = false;
private ManagerUserConfig userConfig;
public ManagerConnection(NetworkChannel channel) throws IOException {
super(channel);
this.handler = new ManagerAuthenticator(this);
@@ -34,6 +35,7 @@ public class ManagerConnection extends FrontendConnection {
public void setUserConfig(ManagerUserConfig userConfig) {
this.userConfig = userConfig;
}
@Override
public void handlerQuery(String sql) {
// execute
@@ -44,15 +46,15 @@ public class ManagerConnection extends FrontendConnection {
writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, "Query unsupported!");
}
}
@Override
public boolean isIdleTimeout() {
if (skipIdleCheck) {
return false;
} else if (isAuthenticated) {
return super.isIdleTimeout();
return TimeUtil.currentTimeMillis() > Math.max(lastWriteTime, lastReadTime) + idleTimeout;
} else {
return TimeUtil.currentTimeMillis() > Math.max(lastWriteTime,
lastReadTime) + AUTH_TIMEOUT;
return TimeUtil.currentTimeMillis() > Math.max(lastWriteTime, lastReadTime) + AUTH_TIMEOUT;
}
}
@@ -72,7 +72,7 @@ public final class DatabaseHandler {
final AtomicInteger numberCount = new AtomicInteger(shardingNodes.size());
for (final String shardingNode : shardingNodes) {
ShardingNode dn = allShardingNodes.get(shardingNode);
final PhysicalDbInstance ds = dn.getDbGroup().getWriteSource();
final PhysicalDbInstance ds = dn.getDbGroup().getWriteDbInstance();
final String schema = dn.getDatabase();
OneRawSQLQueryResultHandler resultHandler = new OneRawSQLQueryResultHandler(new String[0], new SQLQueryResultListener<SQLQueryResult<Map<String, String>>>() {
@Override
@@ -60,7 +60,7 @@ public final class SelectHandler {
Iterator<PhysicalDbGroup> iterator = DbleServer.getInstance().getConfig().getDbGroups().values().iterator();
if (iterator.hasNext()) {
PhysicalDbGroup pool = iterator.next();
final PhysicalDbInstance source = pool.getWriteSource();
final PhysicalDbInstance source = pool.getWriteDbInstance();
TransformSQLJob sqlJob = new TransformSQLJob(stmt, null, source, c);
sqlJob.run();
} else {
@@ -253,7 +253,7 @@ public final class ShowHandler {
Iterator<PhysicalDbGroup> iterator = DbleServer.getInstance().getConfig().getDbGroups().values().iterator();
if (iterator.hasNext()) {
PhysicalDbGroup pool = iterator.next();
final PhysicalDbInstance source = pool.getWriteSource();
final PhysicalDbInstance source = pool.getWriteDbInstance();
TransformSQLJob sqlJob = new TransformSQLJob(stmt, null, source, c);
sqlJob.run();
} else {
@@ -41,7 +41,7 @@ public class ShowProcesslistHandler {
public void execute() {
String sbSql = SQL.replace("{0}", StringUtils.join(threadIds, ','));
ShardingNode dn = DbleServer.getInstance().getConfig().getShardingNodes().get(shardingNode);
PhysicalDbInstance ds = dn.getDbGroup().getWriteSource();
PhysicalDbInstance ds = dn.getDbGroup().getWriteDbInstance();
if (ds.isAlive()) {
MultiRowSQLQueryResultHandler resultHandler = new MultiRowSQLQueryResultHandler(MYSQL_SHOW_PROCESSLIST_COLS, new MySQLShowProcesslistListener());
SQLJob sqlJob = new SQLJob(sbSql, dn.getDatabase(), resultHandler, ds);
@@ -124,8 +124,8 @@ public final class DryRun {
}
}
if (handler.getUsedDataource() != null) {
handler.getUsedDataource().clearCons("dry run end");
if (handler.getUsedDbInstance() != null) {
handler.getUsedDbInstance().closeAllConnection("dry run end");
}
userCheck(list, serverConfig);
@@ -44,7 +44,7 @@ public final class FlowControlList {
FIELDS[i++].setPacketId(++packetId);
FIELDS[i] = PacketUtil.getField("WRITE_QUEUE_SIZE", Fields.FIELD_TYPE_LONGLONG);
FIELDS[i++].setPacketId(++packetId);
FIELDS[i].setPacketId(++packetId);
EOF.setPacketId(++packetId);
}
@@ -73,7 +73,7 @@ public final class FlowControlList {
//find all server connection
packetId = findAllServerConnection(buffer, c, packetId);
//find all mysql connection
packetId = findAllMySQLConeection(buffer, c, packetId);
packetId = findAllMySQLConnection(buffer, c, packetId);
}
// write last eof
@@ -94,7 +94,7 @@ public final class FlowControlList {
RowDataPacket row = new RowDataPacket(FIELD_COUNT);
row.add(StringUtil.encode("ServerConnection", c.getCharset().getResults()));
row.add(LongUtil.toBytes(fc.getId()));
row.add(StringUtil.encode(fc.getHost() + ":" + fc.getLocalPort() + "/" + ((ServerConnection) fc).getSchema() + " user = " + fc.getUser(), c.getCharset().getResults()));
row.add(StringUtil.encode(fc.getHost() + ":" + fc.getLocalPort() + "/" + fc.getSchema() + " user = " + fc.getUser(), c.getCharset().getResults()));
row.add(LongUtil.toBytes(fc.getWriteQueue().size()));
row.setPacketId(++packetId);
buffer = row.write(buffer, c, true);
@@ -104,7 +104,7 @@ public final class FlowControlList {
return packetId;
}
private static byte findAllMySQLConeection(ByteBuffer buffer, ManagerConnection c, byte packetId) {
private static byte findAllMySQLConnection(ByteBuffer buffer, ManagerConnection c, byte packetId) {
NIOProcessor[] processors = DbleServer.getInstance().getBackendProcessors();
for (NIOProcessor p : processors) {
for (BackendConnection bc : p.getBackends().values()) {
@@ -113,7 +113,7 @@ public final class FlowControlList {
RowDataPacket row = new RowDataPacket(FIELD_COUNT);
row.add(StringUtil.encode("MySQLConnection", c.getCharset().getResults()));
row.add(LongUtil.toBytes(mc.getThreadId()));
row.add(StringUtil.encode(mc.getPool().getConfig().getUrl() + "/" + mc.getSchema() + " id = " + mc.getThreadId(), c.getCharset().getResults()));
row.add(StringUtil.encode(mc.getDbInstance().getConfig().getUrl() + "/" + mc.getSchema() + " id = " + mc.getThreadId(), c.getCharset().getResults()));
row.add(LongUtil.toBytes(mc.getWriteQueue().size()));
row.setPacketId(++packetId);
buffer = row.write(buffer, c, true);
@@ -1,17 +1,14 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.manager.response;
import com.actiontech.dble.DbleServer;
import com.actiontech.dble.backend.BackendConnection;
import com.actiontech.dble.backend.datasource.PhysicalDbGroup;
import com.actiontech.dble.backend.datasource.PhysicalDbGroupDiff;
import com.actiontech.dble.backend.datasource.PhysicalDbInstance;
import com.actiontech.dble.backend.datasource.ShardingNode;
import com.actiontech.dble.backend.mysql.nio.MySQLConnection;
import com.actiontech.dble.btrace.provider.ClusterDelayProvider;
import com.actiontech.dble.cluster.ClusterHelper;
import com.actiontech.dble.cluster.ClusterPathUtil;
@@ -38,7 +35,6 @@ import com.actiontech.dble.meta.ReloadManager;
import com.actiontech.dble.net.FrontendConnection;
import com.actiontech.dble.net.NIOProcessor;
import com.actiontech.dble.net.mysql.OkPacket;
import com.actiontech.dble.route.RouteResultsetNode;
import com.actiontech.dble.route.parser.ManagerParseConfig;
import com.actiontech.dble.server.ServerConnection;
import com.actiontech.dble.server.variables.SystemVariables;
@@ -387,7 +383,6 @@ public final class ReloadConfig {
}
FrontendUserManager.getInstance().initForLatest(newUsers, SystemConfig.getInstance().getMaxCon());
ReloadLogHelper.info("reload config: apply new config end", LOGGER);
recycleOldBackendConnections(recycleHosts, ((loadAllMode & ManagerParseConfig.OPTF_MODE) != 0));
if (!loader.isFullyConfigured()) {
recycleServerConnections();
}
@@ -402,12 +397,11 @@ public final class ReloadConfig {
}
}
private static void initFailed(Map<String, PhysicalDbGroup> newDbGroups) throws Exception {
private static void initFailed(Map<String, PhysicalDbGroup> newDbGroups) {
// INIT FAILED
ReloadLogHelper.info("reload failed, clear previously created dbInstances ", LOGGER);
for (PhysicalDbGroup dbGroup : newDbGroups.values()) {
dbGroup.clearDbInstances("reload config");
dbGroup.stopHeartbeat();
dbGroup.stop("reload fail, stop");
}
}
@@ -450,7 +444,6 @@ public final class ReloadConfig {
}
FrontendUserManager.getInstance().initForLatest(newUsers, SystemConfig.getInstance().getMaxCon());
ReloadLogHelper.info("reload config: apply new config end", LOGGER);
recycleOldBackendConnections(config.getBackupDbGroups(), ((loadAllMode & ManagerParseConfig.OPTF_MODE) != 0));
if (!loader.isFullyConfigured()) {
recycleServerConnections();
}
@@ -492,30 +485,6 @@ public final class ReloadConfig {
return newSystemVariables;
}
private static void findAndcloseFrontCon(BackendConnection con) {
if (con instanceof MySQLConnection) {
MySQLConnection mcon1 = (MySQLConnection) con;
for (NIOProcessor processor : DbleServer.getInstance().getFrontProcessors()) {
for (FrontendConnection fcon : processor.getFrontends().values()) {
if (fcon instanceof ServerConnection) {
ServerConnection scon = (ServerConnection) fcon;
Map<RouteResultsetNode, BackendConnection> bons = scon.getSession2().getTargetMap();
for (BackendConnection bcon : bons.values()) {
if (bcon instanceof MySQLConnection) {
MySQLConnection mcon2 = (MySQLConnection) bcon;
if (mcon1 == mcon2) {
//frontEnd kill change to frontEnd close ,it's not necessary to use kill
scon.close("reload config all");
return;
}
}
}
}
}
}
}
}
private static void recycleServerConnections() {
for (NIOProcessor processor : DbleServer.getInstance().getFrontProcessors()) {
for (FrontendConnection fcon : processor.getFrontends().values()) {
@@ -527,50 +496,6 @@ public final class ReloadConfig {
}
}
private static void recycleOldBackendConnections(Map<String, PhysicalDbGroup> recycleMap, boolean closeFrontCon) {
for (PhysicalDbGroup dbGroup : recycleMap.values()) {
dbGroup.stopHeartbeat();
long oldTimestamp = System.currentTimeMillis();
for (PhysicalDbInstance ds : dbGroup.getAllActiveDbInstances()) {
for (NIOProcessor processor : DbleServer.getInstance().getBackendProcessors()) {
for (BackendConnection con : processor.getBackends().values()) {
if (con instanceof MySQLConnection) {
MySQLConnection mysqlCon = (MySQLConnection) con;
if (mysqlCon.getPool() == ds) {
if (con.isBorrowed()) {
if (closeFrontCon) {
ReloadLogHelper.info("old active backend conn will be forced closed by closing front conn, conn info:" + mysqlCon, LOGGER);
findAndcloseFrontCon(con);
} else {
ReloadLogHelper.info("old active backend conn will be added to old pool, conn info:" + mysqlCon, LOGGER);
con.setOldTimestamp(oldTimestamp);
NIOProcessor.BACKENDS_OLD.add(con);
}
} else {
ReloadLogHelper.info("old idle backend conn will be closed, conn info:" + mysqlCon, LOGGER);
con.close("old idle conn for reload merge");
}
}
}
}
}
}
}
if (closeFrontCon) {
for (NIOProcessor processor : DbleServer.getInstance().getBackendProcessors()) {
for (BackendConnection con : processor.getBackends().values()) {
if (con instanceof MySQLConnection) {
MySQLConnection mysqlCon = (MySQLConnection) con;
if (mysqlCon.getOldTimestamp() != 0) {
findAndcloseFrontCon(con);
}
}
}
}
}
}
private static void distinguishDbGroup(Map<String, PhysicalDbGroup> newDbGroups, Map<String, PhysicalDbGroup> oldDbGroups,
Map<String, PhysicalDbGroup> addOrChangeDbGroups, Map<String, PhysicalDbGroup> noChangeDbGroups,
Map<String, PhysicalDbGroup> recycleHosts) {
@@ -634,14 +559,10 @@ public final class ReloadConfig {
}
}
dbGroup.setSchemas(dnSchemas.toArray(new String[dnSchemas.size()]));
if (!dbGroup.isInitSuccess() && fullyConfigured) {
if (fullyConfigured) {
dbGroup.init();
if (!dbGroup.isInitSuccess()) {
reasonMsg = "Init dbGroup [" + dbGroup.getGroupName() + "] failed";
break;
}
} else {
LOGGER.info("dbGroup[" + hostName + "] already initiated, so doing nothing");
LOGGER.info("dbGroup[" + hostName + "] is not fullyConfigured, so doing nothing");
}
}
return reasonMsg;
@@ -1,8 +1,8 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.manager.response;
import com.actiontech.dble.DbleServer;
@@ -256,24 +256,9 @@ public final class RollbackConfig {
Map<ERTable, Set<ERTable>> erRelations = conf.getBackupErRelations();
boolean backIsFullyConfiged = conf.backIsFullyConfiged();
if (conf.canRollbackAll()) {
boolean rollbackStatus = true;
String errorMsg = null;
if (conf.isFullyConfigured()) {
for (PhysicalDbGroup dn : dbGroups.values()) {
dn.init();
if (!dn.isInitSuccess()) {
rollbackStatus = false;
errorMsg = "dbGroup[" + dn.getGroupName() + "] inited failure";
break;
}
}
// INIT FAILED
if (!rollbackStatus) {
for (PhysicalDbGroup dn : dbGroups.values()) {
dn.clearDbInstances("rollbackup config");
dn.stopHeartbeat();
}
throw new Exception(errorMsg);
}
}
final Map<String, PhysicalDbGroup> cNodes = conf.getDbGroups();
@@ -281,8 +266,7 @@ public final class RollbackConfig {
boolean result = conf.rollback(users, schemas, shardingNodes, dbGroups, erRelations, backIsFullyConfiged);
// stop old resource heartbeat
for (PhysicalDbGroup dn : cNodes.values()) {
dn.clearDbInstances("clear old config ");
dn.stopHeartbeat();
dn.stop("initial failed, rollback up config");
}
if (!backIsFullyConfiged) {
for (NIOProcessor processor : DbleServer.getInstance().getFrontProcessors()) {
@@ -7,9 +7,11 @@ package com.actiontech.dble.manager.response;
import com.actiontech.dble.DbleServer;
import com.actiontech.dble.backend.BackendConnection;
import com.actiontech.dble.backend.heartbeat.HeartbeatSQLJob;
import com.actiontech.dble.backend.mysql.PacketUtil;
import com.actiontech.dble.backend.mysql.nio.MySQLConnection;
import com.actiontech.dble.backend.mysql.nio.handler.ResponseHandler;
import com.actiontech.dble.backend.pool.PooledEntry;
import com.actiontech.dble.config.ErrorCode;
import com.actiontech.dble.config.Fields;
import com.actiontech.dble.manager.ManagerConnection;
@@ -19,7 +21,6 @@ import com.actiontech.dble.net.mysql.FieldPacket;
import com.actiontech.dble.net.mysql.ResultSetHeaderPacket;
import com.actiontech.dble.net.mysql.RowDataPacket;
import com.actiontech.dble.route.factory.RouteStrategyFactory;
import com.actiontech.dble.sqlengine.HeartbeatSQLJob;
import com.actiontech.dble.util.*;
import com.alibaba.druid.sql.ast.SQLExpr;
import com.alibaba.druid.sql.ast.SQLStatement;
@@ -67,7 +68,7 @@ public final class ShowBackend {
FIELDS[i++].setPacketId(++packetId);
// fields[i] = PacketUtil.getField("run", Fields.FIELD_TYPE_VAR_STRING);
// fields[i++].packetId = ++packetId;
FIELDS[i] = PacketUtil.getField("BORROWED", Fields.FIELD_TYPE_VAR_STRING);
FIELDS[i] = PacketUtil.getField("STATE", Fields.FIELD_TYPE_VAR_STRING);
FIELDS[i++].setPacketId(++packetId);
FIELDS[i] = PacketUtil.getField("SEND_QUEUE", Fields.FIELD_TYPE_LONG);
FIELDS[i++].setPacketId(++packetId);
@@ -198,6 +199,7 @@ public final class ShowBackend {
if (!(c instanceof MySQLConnection)) {
return null;
}
int state = c.getState();
MySQLConnection conn = (MySQLConnection) c;
row.add(conn.getProcessor().getName().getBytes());
row.add(LongUtil.toBytes(c.getId()));
@@ -209,7 +211,7 @@ public final class ShowBackend {
row.add(LongUtil.toBytes(c.getNetOutBytes()));
row.add(LongUtil.toBytes((TimeUtil.currentTimeMillis() - c.getStartupTime()) / 1000L));
row.add(c.isClosed() ? "true".getBytes() : "false".getBytes());
row.add(c.isBorrowed() ? "true".getBytes() : "false".getBytes());
row.add(stateStr(state).getBytes());
row.add(IntegerUtil.toBytes(conn.getWriteQueue().size()));
row.add((conn.getSchema() == null ? "NULL" : conn.getSchema()).getBytes());
row.add(conn.getCharset().getClient().getBytes());
@@ -221,12 +223,31 @@ public final class ShowBackend {
row.add(StringUtil.encode(conn.getStringOfUsrVariables(), charset));
row.add(StringUtil.encode(conn.getXaStatus().toString(), charset));
row.add(StringUtil.encode(FormatUtil.formatDate(conn.getOldTimestamp()), charset));
if (c.isBorrowed()) {
if (state == PooledEntry.INITIAL) {
ResponseHandler handler = ((MySQLConnection) c).getRespHandler();
row.add(handler != null && handler instanceof HeartbeatSQLJob ? "true".getBytes() : "false".getBytes());
row.add(handler instanceof HeartbeatSQLJob ? "true".getBytes() : "false".getBytes());
} else {
row.add("false".getBytes());
}
return row;
}
public static String stateStr(int state) {
switch (state) {
case PooledEntry.STATE_IN_USE:
return "IN USE";
case PooledEntry.STATE_NOT_IN_USE:
return "IDLE";
case PooledEntry.STATE_REMOVED:
return "REMOVED";
case PooledEntry.STATE_HEARTBEAT:
return "HEARTBEAT CHECK";
case PooledEntry.STATE_RESERVED:
return "EVICT";
case PooledEntry.INITIAL:
return "IN CREATION OR OUT OF POOL";
default:
return "UNKNOWN STATE";
}
}
}
@@ -8,6 +8,7 @@ package com.actiontech.dble.manager.response;
import com.actiontech.dble.backend.BackendConnection;
import com.actiontech.dble.backend.mysql.PacketUtil;
import com.actiontech.dble.backend.mysql.nio.MySQLConnection;
import com.actiontech.dble.backend.pool.PooledEntry;
import com.actiontech.dble.config.Fields;
import com.actiontech.dble.manager.ManagerConnection;
import com.actiontech.dble.net.NIOProcessor;
@@ -101,7 +102,7 @@ public final class ShowBackendOld {
row.add(LongUtil.toBytes(c.getNetOutBytes()));
row.add(LongUtil.toBytes((TimeUtil.currentTimeMillis() - c.getStartupTime()) / 1000L));
row.add(LongUtil.toBytes(c.getLastTime()));
boolean isBorrowed = c.isBorrowed();
boolean isBorrowed = c.getState() == PooledEntry.STATE_IN_USE;
row.add(isBorrowed ? "true".getBytes() : "false".getBytes());
return row;
}
@@ -1,14 +1,15 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.manager.response;
import com.actiontech.dble.DbleServer;
import com.actiontech.dble.backend.BackendConnection;
import com.actiontech.dble.backend.mysql.PacketUtil;
import com.actiontech.dble.backend.mysql.nio.MySQLConnection;
import com.actiontech.dble.backend.pool.PooledEntry;
import com.actiontech.dble.config.Fields;
import com.actiontech.dble.manager.ManagerConnection;
import com.actiontech.dble.net.NIOProcessor;
@@ -52,7 +53,7 @@ public final class ShowBackendStat {
FIELDS[i++].setPacketId(++packetId);
FIELDS[i] = PacketUtil.getField("TOTAL", Fields.FIELD_TYPE_LONGLONG);
FIELDS[i++].setPacketId(++packetId);
FIELDS[i].setPacketId(++packetId);
EOF.setPacketId(++packetId);
}
@@ -71,7 +72,7 @@ public final class ShowBackendStat {
HashMap<String, BackendStat> infos = stat();
byte packetId = EOF.getPacketId();
for (Map.Entry<String, BackendStat> entry: infos.entrySet()) {
for (Map.Entry<String, BackendStat> entry : infos.entrySet()) {
RowDataPacket row = getRow(entry.getValue(), c.getCharset().getResults());
row.setPacketId(++packetId);
buffer = row.write(buffer, c, true);
@@ -93,24 +94,24 @@ public final class ShowBackendStat {
}
private static HashMap<String, BackendStat> stat() {
HashMap<String, BackendStat> all = new HashMap<String, BackendStat>();
HashMap<String, BackendStat> all = new HashMap<>();
for (NIOProcessor p : DbleServer.getInstance().getBackendProcessors()) {
for (BackendConnection bc : p.getBackends().values()) {
if ((bc == null) || !(bc instanceof MySQLConnection)) {
if (!(bc instanceof MySQLConnection)) {
break;
}
MySQLConnection con = (MySQLConnection) bc;
String host = con.getHost();
long port = con.getPort();
BackendStat info = all.get(host + Long.toString(port));
BackendStat info = all.get(host + port);
if (info == null) {
info = new BackendStat(host, port);
all.put(host + Long.toString(port), info);
all.put(host + port, info);
}
if (con.isBorrowed()) {
if (con.getState() == PooledEntry.STATE_IN_USE) {
info.addActive();
}
info.addTotal();
@@ -132,21 +133,27 @@ public final class ShowBackendStat {
this.active = 0;
this.total = 0;
}
public String getHost() {
return this.host;
}
public long getPort() {
return this.port;
}
public void addActive() {
this.active++;
}
public void addTotal() {
this.total++;
}
public long getActive() {
return this.active;
}
public long getTotal() {
return this.total;
}
@@ -347,7 +347,7 @@ public final class ShowBinlogStatus {
rows = new CopyOnWriteArrayList<>();
for (PhysicalDbGroup pool : allPools) {
//if WRITE_RANDOM_NODE ,may the binlog is not ready.
final PhysicalDbInstance source = pool.getWriteSource();
final PhysicalDbInstance source = pool.getWriteDbInstance();
OneRawSQLQueryResultHandler resultHandler = new OneRawSQLQueryResultHandler(FIELDS,
new SQLQueryResultListener<SQLQueryResult<Map<String, String>>>() {
@Override
@@ -1,8 +1,8 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.manager.response;
import com.actiontech.dble.DbleServer;
@@ -28,7 +28,7 @@ public final class ShowDbInstance {
private ShowDbInstance() {
}
private static final int FIELD_COUNT = 12;
private static final int FIELD_COUNT = 11;
private static final ResultSetHeaderPacket HEADER = PacketUtil.getHeader(FIELD_COUNT);
private static final FieldPacket[] FIELDS = new FieldPacket[FIELD_COUNT];
private static final EOFPacket EOF = new EOFPacket();
@@ -62,9 +62,6 @@ public final class ShowDbInstance {
FIELDS[i] = PacketUtil.getField("SIZE", Fields.FIELD_TYPE_LONG);
FIELDS[i++].setPacketId(++packetId);
FIELDS[i] = PacketUtil.getField("EXECUTE", Fields.FIELD_TYPE_LONGLONG);
FIELDS[i++].setPacketId(++packetId);
FIELDS[i] = PacketUtil.getField("READ_LOAD", Fields.FIELD_TYPE_LONG);
FIELDS[i++].setPacketId(++packetId);
@@ -127,18 +124,16 @@ public final class ShowDbInstance {
private static RowDataPacket getRow(String dbGroup, PhysicalDbInstance ds,
String charset) {
RowDataPacket row = new RowDataPacket(FIELD_COUNT);
int idleCount = ds.getIdleCount();
row.add(StringUtil.encode(dbGroup, charset));
row.add(StringUtil.encode(ds.getName(), charset));
row.add(StringUtil.encode(ds.getConfig().getIp(), charset));
row.add(IntegerUtil.toBytes(ds.getConfig().getPort()));
row.add(StringUtil.encode(ds.isReadInstance() ? "R" : "W", charset));
row.add(IntegerUtil.toBytes(ds.getTotalConCount() - idleCount));
row.add(IntegerUtil.toBytes(idleCount));
row.add(IntegerUtil.toBytes(ds.getSize()));
row.add(LongUtil.toBytes(ds.getExecuteCount()));
row.add(LongUtil.toBytes(ds.getReadCount()));
row.add(LongUtil.toBytes(ds.getWriteCount()));
row.add(IntegerUtil.toBytes(ds.getActiveConnections()));
row.add(IntegerUtil.toBytes(ds.getIdleConnections()));
row.add(LongUtil.toBytes(ds.getTotalConnections()));
row.add(LongUtil.toBytes(ds.getCount(true)));
row.add(LongUtil.toBytes(ds.getCount(false)));
row.add(StringUtil.encode(ds.isDisabled() ? "true" : "false", charset));
return row;
}
@@ -1,8 +1,8 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.manager.response;
import com.actiontech.dble.DbleServer;
@@ -73,7 +73,7 @@ public final class ShowHeartbeat {
FIELDS[i++].setPacketId(++packetId);
FIELDS[i] = PacketUtil.getField("RS_MESSAGE ", Fields.FIELD_TYPE_VAR_STRING);
FIELDS[i++].setPacketId(++packetId);
FIELDS[i].setPacketId(++packetId);
EOF.setPacketId(++packetId);
}
@@ -28,7 +28,10 @@ import com.actiontech.dble.util.TimeUtil;
import java.nio.ByteBuffer;
import java.text.DecimalFormat;
import java.text.NumberFormat;
import java.util.*;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
/**
* ShowShardingNode
@@ -107,7 +110,7 @@ public final class ShowShardingNode {
keys.addAll(sc.getAllShardingNodes());
}
}
Collections.sort(keys, new Comparator<String>() {
keys.sort(new Comparator<String>() {
@Override
public int compare(String o1, String o2) {
Pair<String, Integer> p1 = PairUtil.splitIndex(o1, '[', ']');
@@ -138,7 +141,7 @@ public final class ShowShardingNode {
private static RowDataPacket getRow(ShardingNode node, String charset) {
PhysicalDbGroup pool = node.getDbGroup();
PhysicalDbInstance ds = pool.getWriteSource();
PhysicalDbInstance ds = pool.getWriteDbInstance();
if (ds != null) {
RowDataPacket row = new RowDataPacket(FIELD_COUNT);
row.add(StringUtil.encode(node.getName(), charset));
@@ -146,13 +149,13 @@ public final class ShowShardingNode {
node.getDbGroup().getGroupName() + '/' + node.getDatabase(),
charset));
row.add(StringUtil.encode(node.isSchemaExists() ? "true" : "false", charset));
int active = ds.getActiveCountForSchema(node.getDatabase());
int idle = ds.getIdleCountForSchema(node.getDatabase());
int active = ds.getActiveConnections(node.getDatabase());
int idle = ds.getIdleConnections(node.getDatabase());
row.add(IntegerUtil.toBytes(active));
row.add(IntegerUtil.toBytes(idle));
row.add(IntegerUtil.toBytes(ds.getSize()));
row.add(LongUtil.toBytes(ds.getExecuteCountForSchema(node.getDatabase())));
long recoveryTime = pool.getWriteSource().getHeartbeatRecoveryTime() - TimeUtil.currentTimeMillis();
row.add(IntegerUtil.toBytes(ds.getConfig().getMaxCon()));
row.add(LongUtil.toBytes(0));
long recoveryTime = ds.getHeartbeatRecoveryTime() - TimeUtil.currentTimeMillis();
row.add(LongUtil.toBytes(recoveryTime > 0 ? recoveryTime / 1000L : -1L));
return row;
} else {
@@ -140,7 +140,7 @@ public final class ShowTableShardingNode {
int sequence = 0;
for (String shardingNode : shardingNodes) {
ShardingNode dn = DbleServer.getInstance().getConfig().getShardingNodes().get(shardingNode);
DbInstanceConfig dbConfig = dn.getDbGroup().getWriteSource().getConfig();
DbInstanceConfig dbConfig = dn.getDbGroup().getWriteDbInstance().getConfig();
RowDataPacket row = new RowDataPacket(FIELD_COUNT);
row.add(StringUtil.encode(dn.getName(), charset));
row.add(LongUtil.toBytes(sequence));
@@ -1,8 +1,8 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.manager.response;
import com.actiontech.dble.DbleServer;
@@ -38,7 +38,7 @@ public final class StopHeartbeat {
for (String key : keys.getKey()) {
PhysicalDbGroup dn = dns.get(key);
if (dn != null) {
dn.getWriteSource().setHeartbeatRecoveryTime(TimeUtil.currentTimeMillis() + time);
dn.getWriteDbInstance().setHeartbeatRecoveryTime(TimeUtil.currentTimeMillis() + time);
++count;
StringBuilder s = new StringBuilder();
s.append(dn.getGroupName()).append(" stop heartbeat '");
@@ -57,7 +57,7 @@ public abstract class AbstractTableMetaHandler {
return;
}
ShardingNode dn = DbleServer.getInstance().getConfig().getShardingNodes().get(shardingNode);
PhysicalDbInstance ds = dn.getDbGroup().getWriteSource();
PhysicalDbInstance ds = dn.getDbGroup().getWriteDbInstance();
String sql = SQL_PREFIX + "`" + tableName + "`";
if (ds.isAlive()) {
OneRawSQLQueryResultHandler resultHandler = new OneRawSQLQueryResultHandler(MYSQL_SHOW_CREATE_TABLE_COLS, new MySQLTableStructureListener(shardingNode, System.currentTimeMillis(), ds));
@@ -43,7 +43,7 @@ public class DryRunGetNodeTablesHandler extends GetNodeTablesHandler {
public void execute() {
String mysqlShowTableCol = "Tables_in_" + phyShardingNode.getDatabase();
String[] mysqlShowTableCols = new String[]{mysqlShowTableCol};
PhysicalDbInstance tds = phyShardingNode.getDbGroup().getWriteSource();
PhysicalDbInstance tds = phyShardingNode.getDbGroup().getWriteDbInstance();
PhysicalDbInstance ds = null;
if (tds != null) {
if (tds.isTestConnSuccess()) {
@@ -50,7 +50,7 @@ public abstract class GetNodeTablesHandler {
ShardingNode dn = DbleServer.getInstance().getConfig().getShardingNodes().get(shardingNode);
String mysqlShowTableCol = "Tables_in_" + dn.getDatabase();
String[] mysqlShowTableCols = new String[]{mysqlShowTableCol, "Table_type"};
PhysicalDbInstance ds = dn.getDbGroup().getWriteSource();
PhysicalDbInstance ds = dn.getDbGroup().getWriteDbInstance();
if (ds.isAlive()) {
MultiRowSQLQueryResultHandler resultHandler = new MultiRowSQLQueryResultHandler(mysqlShowTableCols, new MySQLShowTablesListener(mysqlShowTableCol, dn.getDatabase(), ds));
SQLJob sqlJob = new SQLJob(sql, dn.getDatabase(), resultHandler, ds);
@@ -49,7 +49,7 @@ public abstract class GetTableMetaHandler {
sbSql.append(SQL_SHOW_CREATE_TABLE.replace("{0}", table));
}
ShardingNode dn = DbleServer.getInstance().getConfig().getShardingNodes().get(shardingNode);
PhysicalDbInstance ds = dn.getDbGroup().getWriteSource();
PhysicalDbInstance ds = dn.getDbGroup().getWriteDbInstance();
if (ds.isAlive()) {
logger.info("dbInstance is alive start sqljob for shardingNode:" + shardingNode);
MultiRowSQLQueryResultHandler resultHandler = new MultiRowSQLQueryResultHandler(MYSQL_SHOW_CREATE_TABLE_COLS, new TableStructureListener(shardingNode, tables, ds));
@@ -1,8 +1,8 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.net;
import com.actiontech.dble.DbleServer;
@@ -68,8 +68,6 @@ public abstract class AbstractConnection implements NIOConnection {
protected volatile Map<String, String> usrVariables;
protected volatile Map<String, String> sysVariables;
private long idleTimeout;
private final SocketWR socketWR;
private byte[] rowData;
@@ -149,10 +147,6 @@ public abstract class AbstractConnection implements NIOConnection {
return socketWR;
}
public void setIdleTimeout(long idleTimeout) {
this.idleTimeout = idleTimeout;
}
public int getLocalPort() {
return localPort;
}
@@ -161,7 +155,6 @@ public abstract class AbstractConnection implements NIOConnection {
return host;
}
public int getPort() {
return port;
}
@@ -178,10 +171,6 @@ public abstract class AbstractConnection implements NIOConnection {
this.id = id;
}
public boolean isIdleTimeout() {
return TimeUtil.currentTimeMillis() > Math.max(lastWriteTime, lastReadTime) + idleTimeout;
}
public Map<String, String> getUsrVariables() {
return usrVariables;
}
@@ -194,7 +183,6 @@ public abstract class AbstractConnection implements NIOConnection {
return channel;
}
public void setReadBufferChunk(int readBufferChunk) {
this.readBufferChunk = readBufferChunk;
}
@@ -551,6 +539,7 @@ public abstract class AbstractConnection implements NIOConnection {
if (processor != null) {
processor.removeConnection(this);
}
this.cleanup();
isSupportCompress = false;
@@ -574,13 +563,6 @@ public abstract class AbstractConnection implements NIOConnection {
return isClosed;
}
public void idleCheck() {
if (isIdleTimeout()) {
LOGGER.info(toString() + " idle timeout");
close(" idle ");
}
}
protected synchronized void cleanup() {
if (readBuffer != null) {
@@ -677,7 +659,7 @@ public abstract class AbstractConnection implements NIOConnection {
}
public void onConnectFinish() {
LOGGER.debug("The backend conntinon has finished connecting");
LOGGER.debug("The backend connection has finished connecting");
}
public void setSocketParams(boolean isFrontChannel) throws IOException {
@@ -702,7 +684,6 @@ public abstract class AbstractConnection implements NIOConnection {
channel.setOption(StandardSocketOptions.SO_KEEPALIVE, true);
this.setMaxPacketSize(system.getMaxPacketSize());
this.setIdleTimeout(system.getIdleTimeout());
this.initCharacterSet(system.getCharset());
this.setReadBufferChunk(soRcvBuf);
}
@@ -756,8 +737,7 @@ public abstract class AbstractConnection implements NIOConnection {
}
/*
start flow control because of the write queue in this connection to long
* start flow control because of the write queue in this connection to long
*/
public abstract void startFlowControl(BackendConnection bcon);
@@ -1,8 +1,8 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.net;
import com.actiontech.dble.net.mysql.CharsetNames;
@@ -17,8 +17,6 @@ public interface ClosableConnection {
boolean isClosed();
void idleCheck();
long getStartupTime();
String getHost();
@@ -47,6 +47,7 @@ public abstract class FrontendConnection extends AbstractConnection {
protected FrontendQueryHandler queryHandler;
protected String executeSql;
protected final long idleTimeout = SystemConfig.getInstance().getIdleTimeout();
public FrontendConnection(NetworkChannel channel) throws IOException {
super(channel);
@@ -9,6 +9,7 @@ import com.actiontech.dble.backend.BackendConnection;
import com.actiontech.dble.backend.mysql.nio.MySQLConnection;
import com.actiontech.dble.backend.mysql.nio.handler.transaction.xa.stage.XAStage;
import com.actiontech.dble.backend.mysql.xa.TxState;
import com.actiontech.dble.backend.pool.PooledEntry;
import com.actiontech.dble.buffer.BufferPool;
import com.actiontech.dble.config.model.SystemConfig;
import com.actiontech.dble.server.ServerConnection;
@@ -18,7 +19,6 @@ import com.actiontech.dble.util.TimeUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
@@ -46,7 +46,7 @@ public final class NIOProcessor {
private AtomicInteger frontEndsLength = new AtomicInteger(0);
public NIOProcessor(String name, BufferPool bufferPool) throws IOException {
public NIOProcessor(String name, BufferPool bufferPool) {
this.name = name;
this.bufferPool = bufferPool;
this.frontends = new ConcurrentHashMap<>();
@@ -145,19 +145,21 @@ public final class NIOProcessor {
} else {
// very important ,for some data maybe not sent
checkConSendQueue(c);
if (c instanceof ServerConnection && c.isIdleTimeout()) {
if (c instanceof ServerConnection) {
ServerConnection s = (ServerConnection) c;
String xaStage = s.getSession2().getTransactionManager().getXAStage();
if (xaStage != null) {
if (!xaStage.equals(XAStage.COMMIT_FAIL_STAGE) && !xaStage.equals(XAStage.ROLLBACK_FAIL_STAGE)) {
// Active/IDLE/PREPARED XA FrontendS will be rollbacked
s.close("Idle Timeout");
XASessionCheck.getInstance().addRollbackSession(s.getSession2());
if (s.isIdleTimeout()) {
String xaStage = s.getSession2().getTransactionManager().getXAStage();
if (xaStage != null) {
if (!xaStage.equals(XAStage.COMMIT_FAIL_STAGE) && !xaStage.equals(XAStage.ROLLBACK_FAIL_STAGE)) {
// Active/IDLE/PREPARED XA FrontendS will be rollbacked
s.close("Idle Timeout");
XASessionCheck.getInstance().addRollbackSession(s.getSession2());
}
} else {
s.close("idle timeout");
}
continue;
}
}
c.idleCheck();
}
}
}
@@ -193,7 +195,7 @@ public final class NIOProcessor {
}
}
// close the conn which executeTimeOut
if (!c.isDDL() && c.isBorrowed() && c.isExecuting() && c.getLastTime() < TimeUtil.currentTimeMillis() - sqlTimeout) {
if (!c.isDDL() && c.getState() == PooledEntry.STATE_IN_USE && c.isExecuting() && c.getLastTime() < TimeUtil.currentTimeMillis() - sqlTimeout) {
LOGGER.info("found backend connection SQL timeout ,close it " + c);
c.close("sql timeout");
}
@@ -206,7 +208,6 @@ public final class NIOProcessor {
if (c instanceof AbstractConnection) {
checkConSendQueue((AbstractConnection) c);
}
c.idleCheck();
}
}
}
@@ -9,8 +9,6 @@ import com.actiontech.dble.backend.mysql.nio.MySQLConnection;
import com.actiontech.dble.config.FlowControllerConfig;
import com.actiontech.dble.singleton.WriteQueueFlowController;
import com.actiontech.dble.util.TimeUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.ByteBuffer;
@@ -21,7 +19,7 @@ import java.nio.channels.SocketChannel;
import java.util.concurrent.atomic.AtomicBoolean;
public class NIOSocketWR extends SocketWR {
private static final Logger LOGGER = LoggerFactory.getLogger(NIOSocketWR.class);
private SelectionKey processKey;
private static final int OP_NOT_READ = ~SelectionKey.OP_READ;
private static final int OP_NOT_WRITE = ~SelectionKey.OP_WRITE;
@@ -1,33 +0,0 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* based on code by MyCATCopyrightHolder Copyright (c) 2013, OpenCloudDB/MyCAT.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.net.factory;
import com.actiontech.dble.DbleServer;
import java.io.IOException;
import java.nio.channels.AsynchronousSocketChannel;
import java.nio.channels.NetworkChannel;
import java.nio.channels.SocketChannel;
/**
* @author mycat
*/
public abstract class BackendConnectionFactory {
protected NetworkChannel openSocketChannel(boolean isAIO)
throws IOException {
if (isAIO) {
return AsynchronousSocketChannel.open(DbleServer.getInstance().getNextAsyncChannelGroup());
} else {
SocketChannel channel = null;
channel = SocketChannel.open();
channel.configureBlocking(false);
return channel;
}
}
}
@@ -49,7 +49,7 @@ public class ServerUserAuthenticator extends FrontendAuthenticator {
sc.setSchema(auth.getDatabase());
sc.initCharsetIndex(auth.getCharsetIndex());
sc.setHandler(new ShardingUserCommandHandler(sc));
sc.setMultStatementAllow(auth.isMultStatementAllow());
sc.setMultiStatementAllow(auth.isMultStatementAllow());
sc.setClientFlags(auth.getClientFlags());
boolean clientCompress = Capabilities.CLIENT_COMPRESS == (Capabilities.CLIENT_COMPRESS & auth.getClientFlags());
boolean usingCompress = SystemConfig.getInstance().getUseCompression() == 1;
@@ -339,7 +339,7 @@ public final class RouterUtil {
ArrayList<String> x = new ArrayList<>(shardingNodes);
Map<String, ShardingNode> shardingNodeMap = DbleServer.getInstance().getConfig().getShardingNodes();
while (x.size() > 1) {
if (shardingNodeMap.get(x.get(index)).getDbGroup().getWriteSource().isAlive()) {
if (shardingNodeMap.get(x.get(index)).getDbGroup().getWriteDbInstance().isAlive()) {
return x.get(index);
}
x.remove(index);
@@ -758,22 +758,17 @@ public class NonBlockingSession implements Session {
public void releaseConnection(RouteResultsetNode rrn, boolean debug, final boolean needClose) {
if (rrn != null) {
BackendConnection c = target.remove(rrn);
if (c != null) {
if (debug) {
LOGGER.debug("release connection " + c);
if (c != null && !c.isClosed()) {
if (source.isFlowControlled()) {
releaseConnectionFromFlowCntrolled(c);
}
if (!c.isClosed()) {
if (source.isFlowControlled()) {
releaseConnectionFromFlowCntrolled(c);
}
if (c.isAutocommit()) {
c.release();
} else if (needClose) {
//c.rollback();
c.close("the need to be closed");
} else {
c.release();
}
if (c.isAutocommit()) {
c.release();
} else if (needClose) {
//c.rollback();
c.close("the need to be closed");
} else {
c.release();
}
}
}
@@ -786,13 +781,9 @@ public class NonBlockingSession implements Session {
if (theCon == con) {
iterator.remove();
con.release();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("release connection " + con);
}
break;
}
}
}
public void waitFinishConnection(RouteResultsetNode rrn) {
@@ -862,14 +853,11 @@ public class NonBlockingSession implements Session {
}
for (Entry<RouteResultsetNode, BackendConnection> en : toKilled.entrySet()) {
KillConnectionHandler kill = new KillConnectionHandler(
en.getValue(), this);
KillConnectionHandler kill = new KillConnectionHandler(en.getValue(), this);
ServerConfig conf = DbleServer.getInstance().getConfig();
ShardingNode dn = conf.getShardingNodes().get(
en.getKey().getName());
ShardingNode dn = conf.getShardingNodes().get(en.getKey().getName());
try {
dn.getConnectionFromSameSource(en.getValue().getSchema(), true, en.getValue(),
kill, en.getKey());
dn.getConnectionFromSameSource(en.getValue().getSchema(), en.getValue(), kill, en.getKey());
} catch (Exception e) {
LOGGER.info("get killer connection failed for " + en.getKey(), e);
kill.connectionError(e, null);
@@ -915,7 +903,7 @@ public class NonBlockingSession implements Session {
ServerConfig conf = DbleServer.getInstance().getConfig();
ShardingNode dn = conf.getShardingNodes().get(node.getName());
try {
MySQLConnection newConn = (MySQLConnection) dn.getConnection(dn.getDatabase(), errConn.isAutocommit(), false, errConn.getAttachment());
MySQLConnection newConn = (MySQLConnection) dn.getConnection(dn.getDatabase(), false, errConn.getAttachment());
newConn.setXaStatus(errConn.getXaStatus());
newConn.setSession(this);
if (!newConn.setResponseHandler(queryHandler)) {
@@ -79,11 +79,10 @@ public class ServerConnection extends FrontendConnection {
private FrontendPrepareHandler prepareHandler;
private LoadDataInfileHandler loadDataInfileHandler;
private boolean sessionReadOnly = false;
private volatile boolean multStatementAllow = false;
private volatile boolean multiStatementAllow = false;
private ServerUserConfig userConfig;
public ServerConnection(NetworkChannel channel)
throws IOException {
public ServerConnection(NetworkChannel channel) throws IOException {
super(channel);
this.handler = new ServerUserAuthenticator(this);
@@ -121,13 +120,11 @@ public class ServerConnection extends FrontendConnection {
this.userConfig = userConfig;
}
@Override
public boolean isIdleTimeout() {
if (isAuthenticated) {
return super.isIdleTimeout();
return TimeUtil.currentTimeMillis() > Math.max(lastWriteTime, lastReadTime) + idleTimeout;
} else {
return TimeUtil.currentTimeMillis() > Math.max(lastWriteTime,
lastReadTime) + AUTH_TIMEOUT;
return TimeUtil.currentTimeMillis() > Math.max(lastWriteTime, lastReadTime) + AUTH_TIMEOUT;
}
}
@@ -216,15 +213,14 @@ public class ServerConnection extends FrontendConnection {
this.loadDataInfileHandler = loadDataInfileHandler;
}
public boolean isMultStatementAllow() {
return multStatementAllow;
public boolean isMultiStatementAllow() {
return multiStatementAllow;
}
public void setMultStatementAllow(boolean multStatementAllow) {
this.multStatementAllow = multStatementAllow;
public void setMultiStatementAllow(boolean multiStatementAllow) {
this.multiStatementAllow = multiStatementAllow;
}
public void setPrepareHandler(FrontendPrepareHandler prepareHandler) {
this.prepareHandler = prepareHandler;
}
@@ -524,7 +520,6 @@ public class ServerConnection extends FrontendConnection {
}
public void stmtPrepare(byte[] data) {
if (prepareHandler != null) {
MySQLMessage mm = new MySQLMessage(data);
@@ -566,11 +561,11 @@ public class ServerConnection extends FrontendConnection {
mm.position(5);
int optCommand = mm.readUB2();
if (optCommand == 0) {
this.multStatementAllow = true;
this.multiStatementAllow = true;
write(writeToBuffer(EOFPacket.EOF, allocate()));
return;
} else if (optCommand == 1) {
this.multStatementAllow = false;
this.multiStatementAllow = false;
write(writeToBuffer(EOFPacket.EOF, allocate()));
return;
}
@@ -844,6 +839,9 @@ public class ServerConnection extends FrontendConnection {
@Override
public synchronized void close(String reason) {
if (isClosed) {
return;
}
super.close(reason);
if (session != null) {
TsQueriesCounter.getInstance().addToHistory(session);
@@ -56,7 +56,7 @@ public class ServerQueryHandler implements FrontendQueryHandler {
sql = source.getSession2().getRemingSql();
}
//Preliminary judgment of multi statement
if (source.isMultStatementAllow() && source.getSession2().generalNextStatement(sql)) {
if (source.isMultiStatementAllow() && source.getSession2().generalNextStatement(sql)) {
sql = sql.substring(0, ParseUtil.findNextBreak(sql));
}
source.setExecuteSql(sql);
@@ -28,7 +28,7 @@ public class VarsExtractorHandler {
private Condition done;
private Map<String, PhysicalDbGroup> dbGroups;
private volatile SystemVariables systemVariables = null;
private PhysicalDbInstance usedDataource = null;
private PhysicalDbInstance usedDbInstance = null;
public VarsExtractorHandler(Map<String, PhysicalDbGroup> dbGroups) {
this.dbGroups = dbGroups;
@@ -40,7 +40,7 @@ public class VarsExtractorHandler {
public SystemVariables execute() {
OneRawSQLQueryResultHandler resultHandler = new OneRawSQLQueryResultHandler(MYSQL_SHOW_VARIABLES_COLS, new MysqlVarsListener(this));
PhysicalDbInstance ds = getPhysicalDbInstance();
this.usedDataource = ds;
this.usedDbInstance = ds;
if (ds != null) {
OneTimeConnJob sqlJob = new OneTimeConnJob(MYSQL_SHOW_VARIABLES, null, resultHandler, ds);
sqlJob.run();
@@ -54,7 +54,7 @@ public class VarsExtractorHandler {
private PhysicalDbInstance getPhysicalDbInstance() {
PhysicalDbInstance ds = null;
for (PhysicalDbGroup dbGroup : dbGroups.values()) {
PhysicalDbInstance dsTest = dbGroup.getWriteSource();
PhysicalDbInstance dsTest = dbGroup.getWriteDbInstance();
if (dsTest.isTestConnSuccess()) {
ds = dsTest;
}
@@ -112,11 +112,11 @@ public class VarsExtractorHandler {
}
}
public PhysicalDbInstance getUsedDataource() {
return usedDataource;
public PhysicalDbInstance getUsedDbInstance() {
return usedDbInstance;
}
public void setUsedDataource(PhysicalDbInstance usedDataource) {
this.usedDataource = usedDataource;
public void setUsedDbInstance(PhysicalDbInstance usedDbInstance) {
this.usedDbInstance = usedDbInstance;
}
}
@@ -2,8 +2,8 @@ package com.actiontech.dble.singleton;
import com.actiontech.dble.DbleServer;
import com.actiontech.dble.backend.BackendConnection;
import com.actiontech.dble.backend.datasource.PhysicalDbGroup;
import com.actiontech.dble.backend.mysql.xa.XAStateLog;
import com.actiontech.dble.backend.pool.PooledEntry;
import com.actiontech.dble.buffer.BufferPool;
import com.actiontech.dble.config.model.SystemConfig;
import com.actiontech.dble.config.model.user.UserName;
@@ -36,26 +36,26 @@ public final class Scheduler {
private static final long DEFAULT_OLD_CONNECTION_CLEAR_PERIOD = 5 * 1000L;
private static final long DEFAULT_SQL_STAT_RECYCLE_PERIOD = 5 * 1000L;
private ExecutorService timerExecutor;
private ScheduledExecutorService scheduledExecutor;
private Scheduler() {
this.scheduledExecutor = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder().setNameFormat("TimerScheduler-%d").build());
}
public void init(ExecutorService executor) {
this.timerExecutor = executor;
ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder().setNameFormat("TimerScheduler-%d").build());
long shardingNodeIdleCheckPeriod = SystemConfig.getInstance().getShardingNodeIdleCheckPeriod();
scheduler.scheduleAtFixedRate(updateTime(), 0L, TIME_UPDATE_PERIOD, TimeUnit.MILLISECONDS);
scheduler.scheduleWithFixedDelay(DbleServer.getInstance().processorCheck(), 0L, SystemConfig.getInstance().getProcessorCheckPeriod(), TimeUnit.MILLISECONDS);
scheduler.scheduleAtFixedRate(shardingNodeConHeartBeatCheck(shardingNodeIdleCheckPeriod), 0L, shardingNodeIdleCheckPeriod, TimeUnit.MILLISECONDS);
//dbGroup heartBeat will be influence by dbGroupWithoutWR
scheduler.scheduleAtFixedRate(dbInstanceHeartbeat(), 0L, SystemConfig.getInstance().getShardingNodeHeartbeatPeriod(), TimeUnit.MILLISECONDS);
scheduler.scheduleAtFixedRate(dbInstanceOldConsClear(), 0L, DEFAULT_OLD_CONNECTION_CLEAR_PERIOD, TimeUnit.MILLISECONDS);
scheduler.scheduleWithFixedDelay(xaSessionCheck(), 0L, SystemConfig.getInstance().getXaSessionCheckPeriod(), TimeUnit.MILLISECONDS);
scheduler.scheduleWithFixedDelay(xaLogClean(), 0L, SystemConfig.getInstance().getXaLogCleanPeriod(), TimeUnit.MILLISECONDS);
scheduler.scheduleWithFixedDelay(resultSetMapClear(), 0L, SystemConfig.getInstance().getClearBigSQLResultSetMapMs(), TimeUnit.MILLISECONDS);
scheduledExecutor.scheduleAtFixedRate(updateTime(), 0L, TIME_UPDATE_PERIOD, TimeUnit.MILLISECONDS);
scheduledExecutor.scheduleWithFixedDelay(DbleServer.getInstance().processorCheck(), 0L, SystemConfig.getInstance().getProcessorCheckPeriod(), TimeUnit.MILLISECONDS);
scheduledExecutor.scheduleAtFixedRate(dbInstanceOldConsClear(), 0L, DEFAULT_OLD_CONNECTION_CLEAR_PERIOD, TimeUnit.MILLISECONDS);
scheduledExecutor.scheduleWithFixedDelay(xaSessionCheck(), 0L, SystemConfig.getInstance().getXaSessionCheckPeriod(), TimeUnit.MILLISECONDS);
scheduledExecutor.scheduleWithFixedDelay(xaLogClean(), 0L, SystemConfig.getInstance().getXaLogCleanPeriod(), TimeUnit.MILLISECONDS);
scheduledExecutor.scheduleWithFixedDelay(resultSetMapClear(), 0L, SystemConfig.getInstance().getClearBigSQLResultSetMapMs(), TimeUnit.MILLISECONDS);
if (SystemConfig.getInstance().getUseSqlStat() == 1) {
//sql record detail timing clean
scheduler.scheduleWithFixedDelay(recycleSqlStat(), 0L, DEFAULT_SQL_STAT_RECYCLE_PERIOD, TimeUnit.MILLISECONDS);
scheduledExecutor.scheduleWithFixedDelay(recycleSqlStat(), 0L, DEFAULT_SQL_STAT_RECYCLE_PERIOD, TimeUnit.MILLISECONDS);
}
scheduler.scheduleAtFixedRate(threadStatRenew(), 0L, 1, TimeUnit.SECONDS);
scheduler.scheduleAtFixedRate(printLongTimeDDL(), 0L, DDL_EXECUTE_CHECK_PERIOD, TimeUnit.SECONDS);
scheduledExecutor.scheduleAtFixedRate(threadStatRenew(), 0L, 1, TimeUnit.SECONDS);
scheduledExecutor.scheduleAtFixedRate(printLongTimeDDL(), 0L, DDL_EXECUTE_CHECK_PERIOD, TimeUnit.SECONDS);
}
private Runnable printLongTimeDDL() {
@@ -76,45 +76,6 @@ public final class Scheduler {
};
}
private Runnable shardingNodeConHeartBeatCheck(final long heartPeriod) {
return new Runnable() {
@Override
public void run() {
timerExecutor.execute(new Runnable() {
@Override
public void run() {
Map<String, PhysicalDbGroup> nodes = DbleServer.getInstance().getConfig().getDbGroups();
for (PhysicalDbGroup node : nodes.values()) {
node.heartbeatCheck(heartPeriod);
}
}
});
}
};
}
// heartbeat for dbInstance
private Runnable dbInstanceHeartbeat() {
return new Runnable() {
@Override
public void run() {
timerExecutor.execute(new Runnable() {
@Override
public void run() {
if (DbleServer.getInstance().getConfig().isFullyConfigured()) {
Map<String, PhysicalDbGroup> hosts = DbleServer.getInstance().getConfig().getDbGroups();
for (PhysicalDbGroup host : hosts.values()) {
host.doHeartbeat();
}
}
}
});
}
};
}
/**
* after reload @@config_all ,clean old connection
*/
@@ -133,7 +94,7 @@ public final class Scheduler {
while (iterator.hasNext()) {
BackendConnection con = iterator.next();
long lastTime = con.getLastTime();
if (con.isClosed() || !con.isBorrowed() || currentTime - lastTime > sqlTimeout) {
if (con.isClosed() || con.getState() != PooledEntry.STATE_IN_USE || currentTime - lastTime > sqlTimeout) {
con.close("clear old backend connection ...");
iterator.remove();
}
@@ -239,6 +200,9 @@ public final class Scheduler {
return timerExecutor;
}
public ScheduledExecutorService getScheduledExecutor() {
return scheduledExecutor;
}
public static Scheduler getInstance() {
return INSTANCE;
@@ -66,7 +66,7 @@ public class MultiTablesMetaJob implements ResponseHandler, Runnable {
ShardingNode dn = DbleServer.getInstance().getConfig().getShardingNodes().get(node.getName());
dn.getConnection(dn.getDatabase(), isMustWriteNode, true, node, this, node);
} else {
ds.getConnection(schema, true, this, null, false);
ds.getConnection(schema, this, null, false);
}
} catch (Exception e) {
logger.warn("can't get connection" + shardingNode, e);
@@ -34,7 +34,7 @@ public class OneTimeConnJob extends SQLJob {
public void run() {
try {
ds.getNewConnection(schema, this, null, false, true);
ds.createConnectionSkipPool(schema, this);
} catch (Exception e) {
this.connectionError(e, null);
}
@@ -69,7 +69,7 @@ public class SQLJob implements ResponseHandler, Runnable, Cloneable {
ShardingNode dn = DbleServer.getInstance().getConfig().getShardingNodes().get(node.getName());
dn.getConnection(dn.getDatabase(), isMustWriteNode, true, node, this, node);
} else {
ds.getConnection(schema, true, this, null, isMustWriteNode);
ds.getConnection(schema, this, null, isMustWriteNode);
}
} catch (Exception e) {
LOGGER.warn("can't get connection", e);
@@ -47,8 +47,8 @@ public class SetTestJob implements ResponseHandler, Runnable {
try {
Map<String, PhysicalDbGroup> dbGroups = DbleServer.getInstance().getConfig().getDbGroups();
for (PhysicalDbGroup dbGroup : dbGroups.values()) {
if (dbGroup.getWriteSource().isAlive()) {
dbGroup.getWriteSource().getConnection(databaseName, true, this, null, false);
if (dbGroup.getWriteDbInstance().isAlive()) {
dbGroup.getWriteDbInstance().getConnection(databaseName, this, null, false);
sendTest = true;
break;
}
@@ -53,7 +53,7 @@ public class SpecialSqlJob extends SQLJob {
@Override
public void run() {
try {
ds.getConnection(schema, true, sqlJob, null, false);
ds.getConnection(schema, sqlJob, null, false);
} catch (Exception e) {
sqlJob.connectionError(e, null);
}
@@ -49,7 +49,7 @@ public class TransformSQLJob implements ResponseHandler, Runnable {
ShardingNode dn = DbleServer.getInstance().getConfig().getShardingNodes().get(node.getName());
dn.getConnection(dn.getDatabase(), false, true, node, this, node);
} else {
ds.getConnection(databaseName, true, this, null, false);
ds.getConnection(databaseName, this, null, false);
}
} catch (Exception e) {
LOGGER.warn("can't get connection", e);