no compiler error,without er and global

This commit is contained in:
yanhuqing666
2017-02-17 09:33:04 +08:00
parent 630e864d59
commit 5d2bd95ed2
462 changed files with 44902 additions and 2127 deletions

View File

@@ -1,6 +1,5 @@
package io.mycat.backend;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import io.mycat.backend.mysql.nio.handler.ResponseHandler;
@@ -44,7 +43,7 @@ public interface BackendConnection extends ClosableConnection {
public void execute(RouteResultsetNode node, ServerConnection source,
boolean autocommit) throws IOException;
boolean autocommit);
public void recordSql(String host, String schema, String statement);

View File

@@ -23,31 +23,245 @@
*/
package io.mycat.backend.mysql;
import java.io.FileInputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.FileInputStream;
import java.util.*;
/**
* @author mycat
*/
public class CharsetUtil {
public static final Logger logger = LoggerFactory
.getLogger(CharsetUtil.class);
private static final Map<Integer,String> INDEX_TO_CHARSET = new HashMap<>();
private static final String[] INDEX_TO_CHARSET = new String[248];
private static final Map<String, Integer> CHARSET_TO_INDEX = new HashMap<>();
private static final Map<String, String> CHARSET_TO_JAVA = new HashMap<String, String>();
static {
// index_to_charset.properties
INDEX_TO_CHARSET.put(1,"big5");
INDEX_TO_CHARSET.put(8,"latin1");
INDEX_TO_CHARSET.put(9,"latin2");
INDEX_TO_CHARSET.put(14,"cp1251");
INDEX_TO_CHARSET.put(28,"gbk");
INDEX_TO_CHARSET.put(24,"gb2312");
INDEX_TO_CHARSET.put(33,"utf8");
INDEX_TO_CHARSET.put(45,"utf8mb4");
INDEX_TO_CHARSET[1] = "big5";
INDEX_TO_CHARSET[2] = "latin2";
INDEX_TO_CHARSET[3] = "dec8";
INDEX_TO_CHARSET[4] = "cp850";
INDEX_TO_CHARSET[5] = "latin1";
INDEX_TO_CHARSET[6] = "hp8";
INDEX_TO_CHARSET[7] = "koi8r";
INDEX_TO_CHARSET[8] = "latin1";
INDEX_TO_CHARSET[9] = "latin2";
INDEX_TO_CHARSET[10] = "swe7";
INDEX_TO_CHARSET[11] = "ascii";
INDEX_TO_CHARSET[12] = "ujis";
INDEX_TO_CHARSET[13] = "sjis";
INDEX_TO_CHARSET[14] = "cp1251";
INDEX_TO_CHARSET[15] = "latin1";
INDEX_TO_CHARSET[16] = "hebrew";
INDEX_TO_CHARSET[18] = "tis620";
INDEX_TO_CHARSET[19] = "euckr";
INDEX_TO_CHARSET[20] = "latin7";
INDEX_TO_CHARSET[21] = "latin2";
INDEX_TO_CHARSET[22] = "koi8u";
INDEX_TO_CHARSET[23] = "cp1251";
INDEX_TO_CHARSET[24] = "gb2312";
INDEX_TO_CHARSET[25] = "greek";
INDEX_TO_CHARSET[26] = "cp1250";
INDEX_TO_CHARSET[27] = "latin2";
INDEX_TO_CHARSET[28] = "gbk";
INDEX_TO_CHARSET[29] = "cp1257";
INDEX_TO_CHARSET[30] = "latin5";
INDEX_TO_CHARSET[31] = "latin1";
INDEX_TO_CHARSET[32] = "armscii8";
INDEX_TO_CHARSET[33] = "utf8";
INDEX_TO_CHARSET[34] = "cp1250";
INDEX_TO_CHARSET[35] = "ucs2";
INDEX_TO_CHARSET[36] = "cp866";
INDEX_TO_CHARSET[37] = "keybcs2";
INDEX_TO_CHARSET[38] = "macce";
INDEX_TO_CHARSET[39] = "macroman";
INDEX_TO_CHARSET[40] = "cp852";
INDEX_TO_CHARSET[41] = "latin7";
INDEX_TO_CHARSET[42] = "latin7";
INDEX_TO_CHARSET[43] = "macce";
INDEX_TO_CHARSET[44] = "cp1250";
INDEX_TO_CHARSET[45] = "utf8mb4";
INDEX_TO_CHARSET[46] = "utf8mb4";
INDEX_TO_CHARSET[47] = "latin1";
INDEX_TO_CHARSET[48] = "latin1";
INDEX_TO_CHARSET[49] = "latin1";
INDEX_TO_CHARSET[50] = "cp1251";
INDEX_TO_CHARSET[51] = "cp1251";
INDEX_TO_CHARSET[52] = "cp1251";
INDEX_TO_CHARSET[53] = "macroman";
INDEX_TO_CHARSET[54] = "utf16";
INDEX_TO_CHARSET[55] = "utf16";
INDEX_TO_CHARSET[56] = "utf16le";
INDEX_TO_CHARSET[57] = "cp1256";
INDEX_TO_CHARSET[58] = "cp1257";
INDEX_TO_CHARSET[59] = "cp1257";
INDEX_TO_CHARSET[60] = "utf32";
INDEX_TO_CHARSET[61] = "utf32";
INDEX_TO_CHARSET[62] = "utf16le";
INDEX_TO_CHARSET[63] = "binary";
INDEX_TO_CHARSET[64] = "armscii8";
INDEX_TO_CHARSET[65] = "ascii";
INDEX_TO_CHARSET[66] = "cp1250";
INDEX_TO_CHARSET[67] = "cp1256";
INDEX_TO_CHARSET[68] = "cp866";
INDEX_TO_CHARSET[69] = "dec8";
INDEX_TO_CHARSET[70] = "greek";
INDEX_TO_CHARSET[71] = "hebrew";
INDEX_TO_CHARSET[72] = "hp8";
INDEX_TO_CHARSET[73] = "keybcs2";
INDEX_TO_CHARSET[74] = "koi8r";
INDEX_TO_CHARSET[75] = "koi8u";
INDEX_TO_CHARSET[77] = "latin2";
INDEX_TO_CHARSET[78] = "latin5";
INDEX_TO_CHARSET[79] = "latin7";
INDEX_TO_CHARSET[80] = "cp850";
INDEX_TO_CHARSET[81] = "cp852";
INDEX_TO_CHARSET[82] = "swe7";
INDEX_TO_CHARSET[83] = "utf8";
INDEX_TO_CHARSET[84] = "big5";
INDEX_TO_CHARSET[85] = "euckr";
INDEX_TO_CHARSET[86] = "gb2312";
INDEX_TO_CHARSET[87] = "gbk";
INDEX_TO_CHARSET[88] = "sjis";
INDEX_TO_CHARSET[89] = "tis620";
INDEX_TO_CHARSET[90] = "ucs2";
INDEX_TO_CHARSET[91] = "ujis";
INDEX_TO_CHARSET[92] = "geostd8";
INDEX_TO_CHARSET[93] = "geostd8";
INDEX_TO_CHARSET[94] = "latin1";
INDEX_TO_CHARSET[95] = "cp932";
INDEX_TO_CHARSET[96] = "cp932";
INDEX_TO_CHARSET[97] = "eucjpms";
INDEX_TO_CHARSET[98] = "eucjpms";
INDEX_TO_CHARSET[99] = "cp1250";
INDEX_TO_CHARSET[101] = "utf16";
INDEX_TO_CHARSET[102] = "utf16";
INDEX_TO_CHARSET[103] = "utf16";
INDEX_TO_CHARSET[104] = "utf16";
INDEX_TO_CHARSET[105] = "utf16";
INDEX_TO_CHARSET[106] = "utf16";
INDEX_TO_CHARSET[107] = "utf16";
INDEX_TO_CHARSET[108] = "utf16";
INDEX_TO_CHARSET[109] = "utf16";
INDEX_TO_CHARSET[110] = "utf16";
INDEX_TO_CHARSET[111] = "utf16";
INDEX_TO_CHARSET[112] = "utf16";
INDEX_TO_CHARSET[113] = "utf16";
INDEX_TO_CHARSET[114] = "utf16";
INDEX_TO_CHARSET[115] = "utf16";
INDEX_TO_CHARSET[116] = "utf16";
INDEX_TO_CHARSET[117] = "utf16";
INDEX_TO_CHARSET[118] = "utf16";
INDEX_TO_CHARSET[119] = "utf16";
INDEX_TO_CHARSET[120] = "utf16";
INDEX_TO_CHARSET[121] = "utf16";
INDEX_TO_CHARSET[122] = "utf16";
INDEX_TO_CHARSET[123] = "utf16";
INDEX_TO_CHARSET[124] = "utf16";
INDEX_TO_CHARSET[128] = "ucs2";
INDEX_TO_CHARSET[129] = "ucs2";
INDEX_TO_CHARSET[130] = "ucs2";
INDEX_TO_CHARSET[131] = "ucs2";
INDEX_TO_CHARSET[132] = "ucs2";
INDEX_TO_CHARSET[133] = "ucs2";
INDEX_TO_CHARSET[134] = "ucs2";
INDEX_TO_CHARSET[135] = "ucs2";
INDEX_TO_CHARSET[136] = "ucs2";
INDEX_TO_CHARSET[137] = "ucs2";
INDEX_TO_CHARSET[138] = "ucs2";
INDEX_TO_CHARSET[139] = "ucs2";
INDEX_TO_CHARSET[140] = "ucs2";
INDEX_TO_CHARSET[141] = "ucs2";
INDEX_TO_CHARSET[142] = "ucs2";
INDEX_TO_CHARSET[143] = "ucs2";
INDEX_TO_CHARSET[144] = "ucs2";
INDEX_TO_CHARSET[145] = "ucs2";
INDEX_TO_CHARSET[146] = "ucs2";
INDEX_TO_CHARSET[147] = "ucs2";
INDEX_TO_CHARSET[148] = "ucs2";
INDEX_TO_CHARSET[149] = "ucs2";
INDEX_TO_CHARSET[150] = "ucs2";
INDEX_TO_CHARSET[151] = "ucs2";
INDEX_TO_CHARSET[159] = "ucs2";
INDEX_TO_CHARSET[160] = "utf32";
INDEX_TO_CHARSET[161] = "utf32";
INDEX_TO_CHARSET[162] = "utf32";
INDEX_TO_CHARSET[163] = "utf32";
INDEX_TO_CHARSET[164] = "utf32";
INDEX_TO_CHARSET[165] = "utf32";
INDEX_TO_CHARSET[166] = "utf32";
INDEX_TO_CHARSET[167] = "utf32";
INDEX_TO_CHARSET[168] = "utf32";
INDEX_TO_CHARSET[169] = "utf32";
INDEX_TO_CHARSET[170] = "utf32";
INDEX_TO_CHARSET[171] = "utf32";
INDEX_TO_CHARSET[172] = "utf32";
INDEX_TO_CHARSET[173] = "utf32";
INDEX_TO_CHARSET[174] = "utf32";
INDEX_TO_CHARSET[175] = "utf32";
INDEX_TO_CHARSET[176] = "utf32";
INDEX_TO_CHARSET[177] = "utf32";
INDEX_TO_CHARSET[178] = "utf32";
INDEX_TO_CHARSET[179] = "utf32";
INDEX_TO_CHARSET[180] = "utf32";
INDEX_TO_CHARSET[181] = "utf32";
INDEX_TO_CHARSET[182] = "utf32";
INDEX_TO_CHARSET[183] = "utf32";
INDEX_TO_CHARSET[192] = "utf8";
INDEX_TO_CHARSET[193] = "utf8";
INDEX_TO_CHARSET[194] = "utf8";
INDEX_TO_CHARSET[195] = "utf8";
INDEX_TO_CHARSET[196] = "utf8";
INDEX_TO_CHARSET[197] = "utf8";
INDEX_TO_CHARSET[198] = "utf8";
INDEX_TO_CHARSET[199] = "utf8";
INDEX_TO_CHARSET[200] = "utf8";
INDEX_TO_CHARSET[201] = "utf8";
INDEX_TO_CHARSET[202] = "utf8";
INDEX_TO_CHARSET[203] = "utf8";
INDEX_TO_CHARSET[204] = "utf8";
INDEX_TO_CHARSET[205] = "utf8";
INDEX_TO_CHARSET[206] = "utf8";
INDEX_TO_CHARSET[207] = "utf8";
INDEX_TO_CHARSET[208] = "utf8";
INDEX_TO_CHARSET[209] = "utf8";
INDEX_TO_CHARSET[210] = "utf8";
INDEX_TO_CHARSET[211] = "utf8";
INDEX_TO_CHARSET[212] = "utf8";
INDEX_TO_CHARSET[213] = "utf8";
INDEX_TO_CHARSET[214] = "utf8";
INDEX_TO_CHARSET[215] = "utf8";
INDEX_TO_CHARSET[223] = "utf8";
INDEX_TO_CHARSET[224] = "utf8mb4";
INDEX_TO_CHARSET[225] = "utf8mb4";
INDEX_TO_CHARSET[226] = "utf8mb4";
INDEX_TO_CHARSET[227] = "utf8mb4";
INDEX_TO_CHARSET[228] = "utf8mb4";
INDEX_TO_CHARSET[229] = "utf8mb4";
INDEX_TO_CHARSET[230] = "utf8mb4";
INDEX_TO_CHARSET[231] = "utf8mb4";
INDEX_TO_CHARSET[232] = "utf8mb4";
INDEX_TO_CHARSET[233] = "utf8mb4";
INDEX_TO_CHARSET[234] = "utf8mb4";
INDEX_TO_CHARSET[235] = "utf8mb4";
INDEX_TO_CHARSET[236] = "utf8mb4";
INDEX_TO_CHARSET[237] = "utf8mb4";
INDEX_TO_CHARSET[238] = "utf8mb4";
INDEX_TO_CHARSET[239] = "utf8mb4";
INDEX_TO_CHARSET[240] = "utf8mb4";
INDEX_TO_CHARSET[241] = "utf8mb4";
INDEX_TO_CHARSET[242] = "utf8mb4";
INDEX_TO_CHARSET[243] = "utf8mb4";
INDEX_TO_CHARSET[244] = "utf8mb4";
INDEX_TO_CHARSET[245] = "utf8mb4";
INDEX_TO_CHARSET[246] = "utf8mb4";
INDEX_TO_CHARSET[247] = "utf8mb4";
String filePath = Thread.currentThread().getContextClassLoader()
.getResource("").getPath().replaceAll("%20", " ")
@@ -56,27 +270,44 @@ public class CharsetUtil {
try {
prop.load(new FileInputStream(filePath));
for (Object index : prop.keySet()){
INDEX_TO_CHARSET.put(Integer.parseInt((String) index), prop.getProperty((String) index));
INDEX_TO_CHARSET[Integer.parseInt((String) index)] = prop.getProperty((String) index);
}
} catch (Exception e) {
logger.error("error:",e);
}
// charset --> index
for(Integer key : INDEX_TO_CHARSET.keySet()){
String charset = INDEX_TO_CHARSET.get(key);
for(int index =0;index<INDEX_TO_CHARSET.length;index++){
String charset = INDEX_TO_CHARSET[index];
if(charset != null && CHARSET_TO_INDEX.get(charset) == null){
CHARSET_TO_INDEX.put(charset, key);
CHARSET_TO_INDEX.put(charset, index);
}
}
CHARSET_TO_INDEX.put("iso-8859-1", 14);
CHARSET_TO_INDEX.put("iso_8859_1", 14);
CHARSET_TO_INDEX.put("utf-8", 33);
CHARSET_TO_JAVA.put("binary", "US-ASCII");
CHARSET_TO_JAVA.put("hp8", "ISO8859_1");
CHARSET_TO_JAVA.put("ucs2", "UnicodeBig");
CHARSET_TO_JAVA.put("macce", "MacCentralEurope");
CHARSET_TO_JAVA.put("latin7", "ISO8859_7");
CHARSET_TO_JAVA.put("dec8", "ISO8859_1");
CHARSET_TO_JAVA.put("ujis", "EUC_JP");
CHARSET_TO_JAVA.put("koi8r", "KOI8_R");
CHARSET_TO_JAVA.put("cp932", "Shift_JIS");
CHARSET_TO_JAVA.put("koi8u", "KOI8_U");
CHARSET_TO_JAVA.put("utf16le", "UTF-16");
CHARSET_TO_JAVA.put("utf8mb4", "MacCentralEurope");
CHARSET_TO_JAVA.put("keybcs2", "Cp895");
CHARSET_TO_JAVA.put("geostd8", "US-ASCII");
CHARSET_TO_JAVA.put("swe7", "ISO8859_1");
CHARSET_TO_JAVA.put("eucjpms", "EUC_JP");
CHARSET_TO_JAVA.put("armscii8", "ISO8859_1");
}
public static final String getCharset(int index) {
return INDEX_TO_CHARSET.get(index);
return INDEX_TO_CHARSET[index];
}
public static final int getIndex(String charset) {
@@ -88,6 +319,17 @@ public class CharsetUtil {
}
}
public static String getJavaCharset(String charset) {
if (charset == null || charset.length() == 0)
return charset;
String javaCharset = CHARSET_TO_JAVA.get(charset);
if (javaCharset == null)
return charset;
return javaCharset;
}
public static final String getJavaCharset(int index) {
String charset = getCharset(index);
return getJavaCharset(charset);
}
}

View File

@@ -65,6 +65,12 @@ public class MySQLConnection extends BackendAIOConnection {
private volatile boolean borrowed = false;
private volatile boolean modifiedSQLExecuted = false;
private volatile boolean isDDL = false;
private volatile boolean isRunning;
private volatile StatusSync statusSync;
private volatile boolean metaDataSyned = true;
private volatile TxState xaStatus = TxState.TX_INITIALIZE_STATE;
private volatile int txIsolation;
private volatile boolean autocommit;
private static long initClientFlags() {
int flag = 0;
@@ -137,8 +143,6 @@ public class MySQLConnection extends BackendAIOConnection {
private boolean fromSlaveDB;
private long threadId;
private HandshakePacket handshake;
private volatile int txIsolation;
private volatile boolean autocommit;
private long clientFlags;
private boolean isAuthenticated;
private String user;
@@ -147,9 +151,6 @@ public class MySQLConnection extends BackendAIOConnection {
private ResponseHandler respHandler;
private final AtomicBoolean isQuit;
private volatile StatusSync statusSync;
private volatile boolean metaDataSyned = true;
private volatile TxState xaStatus = TxState.TX_INITIALIZE_STATE;
public MySQLConnection(NetworkChannel channel, boolean fromSlaveDB) {
super(channel);
@@ -161,7 +162,13 @@ public class MySQLConnection extends BackendAIOConnection {
// 设为默认值,免得每个初始化好的连接都要去同步一下
this.txIsolation = MycatServer.getInstance().getConfig().getSystem().getTxIsolation();
}
public void setRunning(boolean running) {
isRunning = running;
}
public boolean isRunning() {
return isRunning;
}
public TxState getXaStatus() {
return xaStatus;
}
@@ -391,7 +398,7 @@ public class MySQLConnection extends BackendAIOConnection {
}
public void execute(RouteResultsetNode rrn, ServerConnection sc,
boolean autocommit) throws UnsupportedEncodingException {
boolean autocommit){
if (!modifiedSQLExecuted && rrn.isModifySQL()) {
modifiedSQLExecuted = true;
}

View File

@@ -190,7 +190,7 @@ public class MySQLConnectionHandler extends BackendAsyncHandler {
private void handleFieldEofPacket(byte[] data) {
ResponseHandler respHand = responseHandler;
if (respHand != null) {
respHand.fieldEofResponse(header, fields, data, source);
respHand.fieldEofResponse(header, fields, null, data, false, source);
} else {
closeNoHandler();
}
@@ -202,7 +202,7 @@ public class MySQLConnectionHandler extends BackendAsyncHandler {
private void handleRowPacket(byte[] data) {
ResponseHandler respHand = responseHandler;
if (respHand != null) {
respHand.rowResponse(data, source);
respHand.rowResponse(data, null, false, source);
} else {
closeNoHandler();
@@ -222,7 +222,7 @@ public class MySQLConnectionHandler extends BackendAsyncHandler {
*/
private void handleRowEofPacket(byte[] data) {
if (responseHandler != null) {
responseHandler.rowEofResponse(data, source);
responseHandler.rowEofResponse(data, false, source);
} else {
closeNoHandler();
}

View File

@@ -31,10 +31,13 @@ import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.locks.ReentrantLock;
import org.slf4j.Logger; import org.slf4j.LoggerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.mycat.backend.BackendConnection;
import io.mycat.net.mysql.ErrorPacket;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
/**
* heartbeat check for mysql connections
@@ -132,11 +135,12 @@ public class ConnectionHeartBeatHandler implements ResponseHandler {
}
@Override
public void rowResponse(byte[] row, BackendConnection conn) {
public boolean rowResponse(byte[] rownull, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
return false;
}
@Override
public void rowEofResponse(byte[] eof, BackendConnection conn) {
public void rowEofResponse(byte[] eof, boolean isLeft, BackendConnection conn) {
removeFinished(conn);
conn.release();
}
@@ -164,14 +168,28 @@ public class ConnectionHeartBeatHandler implements ResponseHandler {
LOGGER.warn("connection closed " + conn + " reason:" + reason);
}
@Override
public void fieldEofResponse(byte[] header, List<byte[]> fields,
byte[] eof, BackendConnection conn) {
public void fieldEofResponse(byte[] header, List<byte[]> fields, List<FieldPacket> fieldPackets, byte[] eof,
boolean isLeft, BackendConnection conn) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("received field eof from " + conn);
}
}
@Override
public void relayPacketResponse(byte[] relayPacket, BackendConnection conn) {
}
@Override
public void endPacketResponse(byte[] endPacket, BackendConnection conn) {
}
}
class HeartBeatCon {

View File

@@ -26,6 +26,8 @@ package io.mycat.backend.mysql.nio.handler;
import java.util.List;
import io.mycat.backend.BackendConnection;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
/**
* @author mycat
@@ -61,18 +63,19 @@ public class DelegateResponseHandler implements ResponseHandler {
}
@Override
public void fieldEofResponse(byte[] header, List<byte[]> fields, byte[] eof, BackendConnection conn) {
target.fieldEofResponse(header, fields, eof, conn);
public void fieldEofResponse(byte[] header, List<byte[]> fields, List<FieldPacket> fieldPackets, byte[] eof,
boolean isLeft, BackendConnection conn) {
target.fieldEofResponse(header, fields,fieldPackets, eof, isLeft, conn);
}
@Override
public void rowResponse(byte[] row, BackendConnection conn) {
target.rowResponse(row, conn);
public boolean rowResponse(byte[] row, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
return target.rowResponse(row, rowPacket, isLeft, conn);
}
@Override
public void rowEofResponse(byte[] eof, BackendConnection conn) {
target.rowEofResponse(eof, conn);
public void rowEofResponse(byte[] eof, boolean isLeft, BackendConnection conn) {
target.rowEofResponse(eof, isLeft, conn);
}
@Override
@@ -86,5 +89,11 @@ public class DelegateResponseHandler implements ResponseHandler {
target.connectionClose(conn, reason);
}
@Override
public void relayPacketResponse(byte[] relayPacket, BackendConnection conn) {
}
@Override
public void endPacketResponse(byte[] endPacket, BackendConnection conn) {
}
}

View File

@@ -23,7 +23,6 @@
*/
package io.mycat.backend.mysql.nio.handler;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
@@ -41,6 +40,8 @@ import io.mycat.backend.datasource.PhysicalDBNode;
import io.mycat.cache.CachePool;
import io.mycat.config.MycatConfig;
import io.mycat.net.mysql.ErrorPacket;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.route.RouteResultsetNode;
import io.mycat.server.NonBlockingSession;
import io.mycat.server.parser.ServerParse;
@@ -99,11 +100,8 @@ public class FetchStoreNodeOfChildTableHandler implements ResponseHandler {
return null;
}
conn.setResponseHandler(this);
try {
conn.execute(node, session.getSource(), isAutoCommit());
} catch (IOException e) {
connectionError(e, conn);
}
conn.execute(node, session.getSource(), isAutoCommit());
} else {
mysqlDN.getConnection(mysqlDN.getDatabase(), true, node, this, node);
}
@@ -188,7 +186,7 @@ public class FetchStoreNodeOfChildTableHandler implements ResponseHandler {
}
@Override
public void rowResponse(byte[] row, BackendConnection conn) {
public boolean rowResponse(byte[] row, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("received rowResponse response from " + conn);
}
@@ -203,11 +201,12 @@ public class FetchStoreNodeOfChildTableHandler implements ResponseHandler {
} else {
LOGGER.warn("find multi data nodes for child table store, sql is: " + sql);
}
return false;
}
@Override
public void rowEofResponse(byte[] eof, BackendConnection conn) {
public void rowEofResponse(byte[] eof, boolean isLeft, BackendConnection conn) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("rowEofResponse" + conn);
}
@@ -236,8 +235,14 @@ public class FetchStoreNodeOfChildTableHandler implements ResponseHandler {
}
@Override
public void fieldEofResponse(byte[] header, List<byte[]> fields,
byte[] eof, BackendConnection conn) {
public void fieldEofResponse(byte[] header, List<byte[]> fields, List<FieldPacket> fieldPackets, byte[] eof,
boolean isLeft, BackendConnection conn) {
}
@Override
public void relayPacketResponse(byte[] relayPacket, BackendConnection conn) {
}
@Override
public void endPacketResponse(byte[] endPacket, BackendConnection conn) {
}
}

View File

@@ -27,9 +27,12 @@ import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.atomic.AtomicInteger;
import org.slf4j.Logger; import org.slf4j.LoggerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.mycat.backend.BackendConnection;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
/**
* wuzh
@@ -88,18 +91,18 @@ public class GetConnectionHandler implements ResponseHandler {
}
@Override
public void fieldEofResponse(byte[] header, List<byte[]> fields,
byte[] eof, BackendConnection conn) {
public void fieldEofResponse(byte[] header, List<byte[]> fields, List<FieldPacket> fieldPackets, byte[] eof,
boolean isLeft, BackendConnection conn) {
}
@Override
public void rowResponse(byte[] row, BackendConnection conn) {
public boolean rowResponse(byte[] row, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
return false;
}
@Override
public void rowEofResponse(byte[] eof, BackendConnection conn) {
public void rowEofResponse(byte[] eof, boolean isLeft, BackendConnection conn) {
}
@@ -111,6 +114,12 @@ public class GetConnectionHandler implements ResponseHandler {
@Override
public void connectionClose(BackendConnection conn, String reason) {
}
@Override
public void relayPacketResponse(byte[] relayPacket, BackendConnection conn) {
}
@Override
public void endPacketResponse(byte[] endPacket, BackendConnection conn) {
}
}

View File

@@ -26,13 +26,16 @@ package io.mycat.backend.mysql.nio.handler;
import java.io.UnsupportedEncodingException;
import java.util.List;
import org.slf4j.Logger; import org.slf4j.LoggerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.mycat.backend.BackendConnection;
import io.mycat.backend.mysql.nio.MySQLConnection;
import io.mycat.net.mysql.CommandPacket;
import io.mycat.net.mysql.ErrorPacket;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.MySQLPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.server.NonBlockingSession;
/**
@@ -80,7 +83,7 @@ public class KillConnectionHandler implements ResponseHandler {
}
@Override
public void rowEofResponse(byte[] eof, BackendConnection conn) {
public void rowEofResponse(byte[] eof, boolean isLeft, BackendConnection conn) {
LOGGER.warn(new StringBuilder().append("unexpected packet for ")
.append(conn).append(" bound by ").append(session.getSource())
.append(": field's eof").toString());
@@ -105,12 +108,13 @@ public class KillConnectionHandler implements ResponseHandler {
}
@Override
public void fieldEofResponse(byte[] header, List<byte[]> fields,
byte[] eof, BackendConnection conn) {
public void fieldEofResponse(byte[] header, List<byte[]> fields, List<FieldPacket> fieldPackets, byte[] eof,
boolean isLeft, BackendConnection conn) {
}
@Override
public void rowResponse(byte[] row, BackendConnection conn) {
public boolean rowResponse(byte[] row, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
return false;
}
@Override
@@ -121,5 +125,11 @@ public class KillConnectionHandler implements ResponseHandler {
@Override
public void connectionClose(BackendConnection conn, String reason) {
}
@Override
public void relayPacketResponse(byte[] relayPacket, BackendConnection conn) {
}
@Override
public void endPacketResponse(byte[] endPacket, BackendConnection conn) {
}
}

View File

@@ -1,6 +1,5 @@
package io.mycat.backend.mysql.nio.handler;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.locks.ReentrantLock;
@@ -11,7 +10,9 @@ import io.mycat.MycatServer;
import io.mycat.backend.BackendConnection;
import io.mycat.backend.datasource.PhysicalDBNode;
import io.mycat.config.MycatConfig;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.OkPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.route.RouteResultset;
import io.mycat.route.RouteResultsetNode;
import io.mycat.server.NonBlockingSession;
@@ -56,11 +57,7 @@ public class LockTablesHandler extends MultiNodeHandler {
return;
}
conn.setResponseHandler(this);
try {
conn.execute(node, session.getSource(), autocommit);
} catch (IOException e) {
connectionError(e, conn);
}
conn.execute(node, session.getSource(), autocommit);
}
@Override
@@ -106,21 +103,23 @@ public class LockTablesHandler extends MultiNodeHandler {
}
@Override
public void fieldEofResponse(byte[] header, List<byte[]> fields, byte[] eof, BackendConnection conn) {
public void fieldEofResponse(byte[] header, List<byte[]> fields, List<FieldPacket> fieldPackets, byte[] eof,
boolean isLeft, BackendConnection conn) {
LOGGER.error(new StringBuilder().append("unexpected packet for ")
.append(conn).append(" bound by ").append(session.getSource())
.append(": field's eof").toString());
}
@Override
public void rowResponse(byte[] row, BackendConnection conn) {
public boolean rowResponse(byte[] row, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
LOGGER.warn(new StringBuilder().append("unexpected packet for ")
.append(conn).append(" bound by ").append(session.getSource())
.append(": row data packet").toString());
return false;
}
@Override
public void rowEofResponse(byte[] eof, BackendConnection conn) {
public void rowEofResponse(byte[] eof, boolean isLeft, BackendConnection conn) {
LOGGER.error(new StringBuilder().append("unexpected packet for ")
.append(conn).append(" bound by ").append(session.getSource())
.append(": row's eof").toString());

View File

@@ -193,4 +193,11 @@ public abstract class MultiNodeHandler implements ResponseHandler {
public void clearResources() {
}
@Override
public void relayPacketResponse(byte[] relayPacket, BackendConnection conn) {
}
@Override
public void endPacketResponse(byte[] endPacket, BackendConnection conn) {
}
}

View File

@@ -23,7 +23,6 @@
*/
package io.mycat.backend.mysql.nio.handler;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
@@ -208,11 +207,7 @@ public class MultiNodeQueryHandler extends MultiNodeHandler implements LoadDataR
return;
}
conn.setResponseHandler(this);
try {
conn.execute(node, session.getSource(), sessionAutocommit&&!session.getSource().isTxstart()&&!node.isModifySQL());
} catch (IOException e) {
connectionError(e, conn);
}
conn.execute(node, session.getSource(), sessionAutocommit&&!session.getSource().isTxstart()&&!node.isModifySQL());
}
@Override
public void connectionClose(BackendConnection conn, String reason) {
@@ -350,7 +345,7 @@ public class MultiNodeQueryHandler extends MultiNodeHandler implements LoadDataR
}
@Override
public void rowEofResponse(final byte[] eof, BackendConnection conn) {
public void rowEofResponse(final byte[] eof, boolean isLeft, BackendConnection conn) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("on row end reseponse " + conn);
}
@@ -565,8 +560,8 @@ public class MultiNodeQueryHandler extends MultiNodeHandler implements LoadDataR
}
@Override
public void fieldEofResponse(byte[] header, List<byte[]> fields,
byte[] eof, BackendConnection conn) {
public void fieldEofResponse(byte[] header, List<byte[]> fields, List<FieldPacket> fieldPacketsnull, byte[] eof,
boolean isLeft, BackendConnection conn) {
this.netOutBytes += header.length;
@@ -711,7 +706,7 @@ public class MultiNodeQueryHandler extends MultiNodeHandler implements LoadDataR
}
@Override
public void rowResponse(final byte[] row, final BackendConnection conn) {
public boolean rowResponse(final byte[] row, RowDataPacket rowPacketnull, boolean isLeft, BackendConnection conn) {
if (errorRepsponsed.get()) {
// the connection has been closed or set to "txInterrupt" properly
@@ -720,7 +715,7 @@ public class MultiNodeQueryHandler extends MultiNodeHandler implements LoadDataR
// @author Uncle-pan
// @since 2016-03-25
//conn.close(error);
return;
return true;
}
@@ -767,6 +762,7 @@ public class MultiNodeQueryHandler extends MultiNodeHandler implements LoadDataR
} finally {
lock.unlock();
}
return false;
}
@Override

View File

@@ -25,9 +25,12 @@ package io.mycat.backend.mysql.nio.handler;
import java.util.List;
import org.slf4j.Logger; import org.slf4j.LoggerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.mycat.backend.BackendConnection;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
public class NewConnectionRespHandler implements ResponseHandler{
private static final Logger LOGGER = LoggerFactory
@@ -59,20 +62,20 @@ public class NewConnectionRespHandler implements ResponseHandler{
}
@Override
public void fieldEofResponse(byte[] header, List<byte[]> fields,
byte[] eof, BackendConnection conn) {
public void fieldEofResponse(byte[] header, List<byte[]> fields, List<FieldPacket> fieldPackets, byte[] eof,
boolean isLeft, BackendConnection conn) {
LOGGER.info("fieldEofResponse: " + conn );
}
@Override
public void rowResponse(byte[] row, BackendConnection conn) {
public boolean rowResponse(byte[] row, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
LOGGER.info("rowResponse: " + conn );
return false;
}
@Override
public void rowEofResponse(byte[] eof, BackendConnection conn) {
public void rowEofResponse(byte[] eof, boolean isLeft, BackendConnection conn) {
LOGGER.info("rowEofResponse: " + conn );
}
@@ -89,4 +92,12 @@ public class NewConnectionRespHandler implements ResponseHandler{
}
@Override
public void relayPacketResponse(byte[] relayPacket, BackendConnection conn) {
}
@Override
public void endPacketResponse(byte[] endPacket, BackendConnection conn) {
}
}

View File

@@ -26,6 +26,8 @@ package io.mycat.backend.mysql.nio.handler;
import java.util.List;
import io.mycat.backend.BackendConnection;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
/**
* @author mycat
@@ -59,19 +61,28 @@ public interface ResponseHandler {
/**
* 收到字段数据包结束的响应处理
*/
void fieldEofResponse(byte[] header, List<byte[]> fields, byte[] eof,
BackendConnection conn);
void fieldEofResponse(byte[] header, List<byte[]> fields, List<FieldPacket> fieldPackets, byte[] eof,
boolean isLeft, BackendConnection conn);
/**
* 收到行数据包的响应处理
*/
void rowResponse(byte[] row, BackendConnection conn);
boolean rowResponse(byte[] rownull, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn);
/**
* 收到行数据包结束的响应处理
*/
void rowEofResponse(byte[] eof, BackendConnection conn);
void rowEofResponse(byte[] eof, boolean isLeft, BackendConnection conn);
/**
* 收到中继数据包的响应处理
*/
void relayPacketResponse(byte[] relayPacket, BackendConnection conn);
/**
* 收到结束数据包的响应处理
*/
void endPacketResponse(byte[] endPacket, BackendConnection conn);
/**
* 写队列为空,可以写数据了
*

View File

@@ -1,89 +0,0 @@
/*
* Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software;Designed and Developed mainly by many Chinese
* opensource volunteers. you can redistribute it and/or modify it under the
* terms of the GNU General Public License version 2 only, as published by the
* Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Any questions about this component can be directed to it's project Web address
* https://code.google.com/p/opencloudb/.
*
*/
package io.mycat.backend.mysql.nio.handler;
import java.util.List;
import org.slf4j.Logger; import org.slf4j.LoggerFactory;
import io.mycat.backend.BackendConnection;
public class SimpleLogHandler implements ResponseHandler{
private static final Logger LOGGER = LoggerFactory
.getLogger(SimpleLogHandler.class);
@Override
public void connectionError(Throwable e, BackendConnection conn) {
LOGGER.warn(conn+" connectionError "+e);
}
@Override
public void connectionAcquired(BackendConnection conn) {
LOGGER.info("connectionAcquired "+conn);
}
@Override
public void errorResponse(byte[] err, BackendConnection conn) {
LOGGER.warn("caught error resp: " + conn + " " + new String(err));
}
@Override
public void okResponse(byte[] ok, BackendConnection conn) {
LOGGER.info("okResponse: " + conn );
}
@Override
public void fieldEofResponse(byte[] header, List<byte[]> fields,
byte[] eof, BackendConnection conn) {
LOGGER.info("fieldEofResponse: " + conn );
}
@Override
public void rowResponse(byte[] row, BackendConnection conn) {
LOGGER.info("rowResponse: " + conn );
}
@Override
public void rowEofResponse(byte[] eof, BackendConnection conn) {
LOGGER.info("rowEofResponse: " + conn );
}
@Override
public void writeQueueAvailable() {
}
@Override
public void connectionClose(BackendConnection conn, String reason) {
}
}

View File

@@ -153,27 +153,12 @@ public class SingleNodeHandler implements ResponseHandler, LoadDataResponseHandl
return;
}
conn.setResponseHandler(this);
try {
boolean isAutocommit = session.getSource().isAutocommit()&&!session.getSource().isTxstart();
if(!isAutocommit&& node.isModifySQL()){
TxnLogHelper.putTxnLog(session.getSource(), node.getStatement());
}
conn.execute(node, session.getSource(), isAutocommit);
} catch (Exception e1) {
executeException(conn, e1);
return;
boolean isAutocommit = session.getSource().isAutocommit()&&!session.getSource().isTxstart();
if(!isAutocommit&& node.isModifySQL()){
TxnLogHelper.putTxnLog(session.getSource(), node.getStatement());
}
conn.execute(node, session.getSource(), isAutocommit);
}
private void executeException(BackendConnection c, Exception e) {
ErrorPacket err = new ErrorPacket();
err.packetId = ++packetId;
err.errno = ErrorCode.ERR_FOUND_EXCEPION;
err.message = StringUtil.encode(e.toString(), session.getSource().getCharset());
this.backConnectionErr(err, c);
}
@Override
public void connectionError(Throwable e, BackendConnection conn) {
ErrorPacket err = new ErrorPacket();
@@ -270,7 +255,7 @@ public class SingleNodeHandler implements ResponseHandler, LoadDataResponseHandl
* 行结束标志返回时触发将EOF标志写入缓冲区最后调用source.write(buffer)将缓冲区放入前端连接的写缓冲队列中等待NIOSocketWR将其发送给应用
*/
@Override
public void rowEofResponse(byte[] eof, BackendConnection conn) {
public void rowEofResponse(byte[] eof, boolean isLeft, BackendConnection conn) {
this.netOutBytes += eof.length;
@@ -322,8 +307,8 @@ public class SingleNodeHandler implements ResponseHandler, LoadDataResponseHandl
* 元数据返回时触发将header和元数据内容依次写入缓冲区中
*/
@Override
public void fieldEofResponse(byte[] header, List<byte[]> fields,
byte[] eof, BackendConnection conn) {
public void fieldEofResponse(byte[] header, List<byte[]> fields, List<FieldPacket> fieldPacketsnull, byte[] eof,
boolean isLeft, BackendConnection conn) {
this.netOutBytes += header.length;
for (int i = 0, len = fields.size(); i < len; ++i) {
@@ -378,7 +363,7 @@ public class SingleNodeHandler implements ResponseHandler, LoadDataResponseHandl
* 行数据返回时触发,将行数据写入缓冲区中
*/
@Override
public void rowResponse(byte[] row, BackendConnection conn) {
public boolean rowResponse(byte[] row, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
this.netOutBytes += row.length;
this.selectRows++;
@@ -388,14 +373,14 @@ public class SingleNodeHandler implements ResponseHandler, LoadDataResponseHandl
rowDataPacket.read(row);
String table = StringUtil.decode(rowDataPacket.fieldValues.get(0), conn.getCharset());
if (shardingTablesSet.contains(table.toUpperCase())) {
return;
return false;
}
}
row[3] = ++packetId;
if ( prepared ) {
RowDataPacket rowDataPk = new RowDataPacket(fieldCount);
rowDataPk.read(row);
rowDataPk.read(row);
BinaryRowDataPacket binRowDataPk = new BinaryRowDataPacket();
binRowDataPk.read(fieldPackets, rowDataPk);
binRowDataPk.packetId = rowDataPk.packetId;
@@ -410,7 +395,7 @@ public class SingleNodeHandler implements ResponseHandler, LoadDataResponseHandl
buffer = session.getSource().writeToBuffer(row, allocBuffer());
//session.getSource().write(row);
}
return false;
}
@Override
@@ -450,4 +435,16 @@ public class SingleNodeHandler implements ResponseHandler, LoadDataResponseHandl
return "SingleNodeHandler [node=" + node + ", packetId=" + packetId + "]";
}
@Override
public void relayPacketResponse(byte[] relayPacket, BackendConnection conn) {
}
@Override
public void endPacketResponse(byte[] endPacket, BackendConnection conn) {
}
}

View File

@@ -8,7 +8,9 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.mycat.backend.BackendConnection;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.OkPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.route.RouteResultsetNode;
import io.mycat.server.NonBlockingSession;
import io.mycat.server.parser.ServerParse;
@@ -103,21 +105,23 @@ public class UnLockTablesHandler extends MultiNodeHandler implements ResponseHan
}
@Override
public void fieldEofResponse(byte[] header, List<byte[]> fields, byte[] eof, BackendConnection conn) {
public void fieldEofResponse(byte[] header, List<byte[]> fields, List<FieldPacket> fieldPackets, byte[] eof,
boolean isLeft, BackendConnection conn) {
LOGGER.error(new StringBuilder().append("unexpected packet for ")
.append(conn).append(" bound by ").append(session.getSource())
.append(": field's eof").toString());
}
@Override
public void rowResponse(byte[] row, BackendConnection conn) {
public boolean rowResponse(byte[] rownull, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
LOGGER.warn(new StringBuilder().append("unexpected packet for ")
.append(conn).append(" bound by ").append(session.getSource())
.append(": row data packet").toString());
return false;
}
@Override
public void rowEofResponse(byte[] eof, BackendConnection conn) {
public void rowEofResponse(byte[] eof, boolean isLeft, BackendConnection conn) {
LOGGER.error(new StringBuilder().append("unexpected packet for ")
.append(conn).append(" bound by ").append(session.getSource())
.append(": row's eof").toString());
@@ -135,4 +139,15 @@ public class UnLockTablesHandler extends MultiNodeHandler implements ResponseHan
}
@Override
public void relayPacketResponse(byte[] relayPacket, BackendConnection conn) {
}
@Override
public void endPacketResponse(byte[] endPacket, BackendConnection conn) {
}
}

View File

@@ -0,0 +1,410 @@
package io.mycat.backend.mysql.nio.handler.builder;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
import com.alibaba.druid.sql.ast.SQLOrderingSpecification;
import io.mycat.MycatServer;
import io.mycat.backend.mysql.nio.handler.builder.sqlvisitor.GlobalVisitor;
import io.mycat.backend.mysql.nio.handler.builder.sqlvisitor.PushDownVisitor;
import io.mycat.backend.mysql.nio.handler.query.DMLResponseHandler;
import io.mycat.backend.mysql.nio.handler.query.impl.DistinctHandler;
import io.mycat.backend.mysql.nio.handler.query.impl.HavingHandler;
import io.mycat.backend.mysql.nio.handler.query.impl.LimitHandler;
import io.mycat.backend.mysql.nio.handler.query.impl.MultiNodeMergeHandler;
import io.mycat.backend.mysql.nio.handler.query.impl.OrderByHandler;
import io.mycat.backend.mysql.nio.handler.query.impl.SendMakeHandler;
import io.mycat.backend.mysql.nio.handler.query.impl.WhereHandler;
import io.mycat.backend.mysql.nio.handler.query.impl.groupby.DirectGroupByHandler;
import io.mycat.backend.mysql.nio.handler.query.impl.groupby.OrderedGroupByHandler;
import io.mycat.config.ErrorCode;
import io.mycat.config.MycatConfig;
import io.mycat.config.model.SchemaConfig;
import io.mycat.config.model.TableConfig;
import io.mycat.plan.Order;
import io.mycat.plan.PlanNode;
import io.mycat.plan.PlanNode.PlanNodeType;
import io.mycat.plan.common.exception.MySQLOutPutException;
import io.mycat.plan.common.item.Item;
import io.mycat.plan.common.item.function.sumfunc.ItemSum;
import io.mycat.plan.common.item.function.sumfunc.ItemSum.Sumfunctype;
import io.mycat.plan.node.JoinNode;
import io.mycat.plan.node.QueryNode;
import io.mycat.plan.node.TableNode;
import io.mycat.plan.util.PlanUtil;
import io.mycat.route.RouteResultsetNode;
import io.mycat.server.NonBlockingSession;
import io.mycat.server.parser.ServerParse;
abstract class BaseHandlerBuilder {
public enum MySQLNodeType {
MASTER, SLAVE
}
private static AtomicLong sequenceId = new AtomicLong(0);
protected NonBlockingSession session;
protected HandlerBuilder hBuilder;
protected MySQLNodeType nodeType;
protected DMLResponseHandler start;
/* 当前的最后一个handler */
protected DMLResponseHandler currentLast;
private PlanNode node;
protected MycatConfig mycatConfig;
/* 是否可以全下推 */
protected boolean canPushDown = false;
/* 是否需要common中的handler包括group byorder bylimit等 */
protected boolean needCommon = true;
/* 是否需要过wherehandler过滤 */
protected boolean needWhereHandler = true;
/* 直接从用户的sql下发的是不需要sendmaker */
protected boolean needSendMaker = true;
protected BaseHandlerBuilder(NonBlockingSession session, PlanNode node, HandlerBuilder hBuilder) {
this.session = session;
this.nodeType = session.getSource().isTxstart() || !session.getSource().isAutocommit() ? MySQLNodeType.MASTER
: MySQLNodeType.SLAVE;
this.node = node;
this.hBuilder = hBuilder;
this.mycatConfig = MycatServer.getInstance().getConfig();
if (mycatConfig.getSchemas().isEmpty())
throw new MySQLOutPutException(ErrorCode.ER_QUERYHANDLER, "", "current router config is empty!");
}
public DMLResponseHandler getEndHandler() {
return currentLast;
}
/**
* 生成正确的hanlder链
*/
public final void build() {
List<DMLResponseHandler> preHandlers = null;
// 是否切换了join策略切换了join策略
boolean joinStrategyed = isNestLoopStrategy(node);
if (joinStrategyed) {
nestLoopBuild();
// } else if (!node.isExsitView() && (node.getUnGlobalTableCount() == 0 || !PlanUtil.existShardTable(node))) {// global优化过的节点
// noShardBuild();
} else if (canDoAsMerge()) {
mergeBuild();
} else {
preHandlers = buildPre();
buildOwn();
}
if (needCommon)
buildCommon();
if (needSendMaker) {
// view subalias
String tbAlias = node.getAlias();
if (node.getParent() != null && node.getParent().getSubAlias() != null)
tbAlias = node.getParent().getSubAlias();
SendMakeHandler sh = new SendMakeHandler(getSequenceId(), session, node.getColumnsSelected(), tbAlias);
addHandler(sh);
}
if (preHandlers != null) {
for (DMLResponseHandler preHandler : preHandlers) {
preHandler.setNextHandler(start);
}
}
}
/**
* 虽然where和otherjoinOn过滤条件为空但是存在strategyFilters作为过滤条件
*/
protected void nestLoopBuild() {
throw new MySQLOutPutException(ErrorCode.ER_QUERYHANDLER, "", "not implement yet, node type["+node.type()+"]" );
}
/* join的er关系或者global优化以及tablenode可以当成merge来做 */
protected boolean canDoAsMerge() {
return false;
}
protected void mergeBuild() {
//
}
protected abstract List<DMLResponseHandler> buildPre();
/**
* 生成自己的handler
*
*/
protected abstract void buildOwn();
/**
* 不存在拆分表,下推到第一个节点
*/
protected final void noShardBuild() {
this.needCommon = false;
// 默认的可以global的都是unglobalcount=0除了是joinnode有特殊情况
// 當且僅當node.unglobalcount=0,所以所有的語句都可以下發,僅需要將語句拼出來下發到一個節點即可
String sql = null;
if (node.getParent() == null) {// root节点
sql = node.getSql();
}
// 有可能node来自于view
if (sql == null) {
GlobalVisitor visitor = new GlobalVisitor(node, true);
visitor.visit();
sql = visitor.getSql().toString();
} else {
needSendMaker = false;
}
RouteResultsetNode[] rrss = getTableSources(sql);
hBuilder.checkRRSS(rrss);
MultiNodeMergeHandler mh = new MultiNodeMergeHandler(getSequenceId(), rrss, session.getSource().isAutocommit(),
session, null);
addHandler(mh);
}
/**
* 构建node公共的属性包括wheregroupbyhavingorderbylimit还有最后的sendmakehandler
*
*/
protected void buildCommon() {
if (node.getWhereFilter() != null && needWhereHandler) {
WhereHandler wh = new WhereHandler(getSequenceId(), session, node.getWhereFilter());
addHandler(wh);
}
/* need groupby handler */
if (nodeHasGroupBy(node)) {
boolean needOrderBy = (node.getGroupBys().size() > 0) ? isOrderNeeded(node, node.getGroupBys()) : false;
boolean canDirectGroupBy = true;
List<ItemSum> sumRefs = new ArrayList<ItemSum>();
for (ItemSum funRef : node.sumFuncs) {
if (funRef.has_with_distinct() || funRef.sumType().equals(Sumfunctype.GROUP_CONCAT_FUNC))
canDirectGroupBy = false;
sumRefs.add(funRef);
}
if (needOrderBy) {
if (canDirectGroupBy) {
// we go direct groupby
DirectGroupByHandler gh = new DirectGroupByHandler(getSequenceId(), session, node.getGroupBys(),
sumRefs);
addHandler(gh);
} else {
OrderByHandler oh = new OrderByHandler(getSequenceId(), session, node.getGroupBys());
addHandler(oh);
OrderedGroupByHandler gh = new OrderedGroupByHandler(getSequenceId(), session, node.getGroupBys(),
sumRefs);
addHandler(gh);
}
} else {// @bug 1052 canDirectGroupby condition we use
// directgroupby already
OrderedGroupByHandler gh = new OrderedGroupByHandler(getSequenceId(), session, node.getGroupBys(),
sumRefs);
addHandler(gh);
}
}
// having
if (node.getHavingFilter() != null) {
HavingHandler hh = new HavingHandler(getSequenceId(), session, node.getHavingFilter());
addHandler(hh);
}
if (node.isDistinct() && node.getOrderBys().size() > 0) {
// distinct and order by both exists
List<Order> mergedOrders = mergeOrderBy(node.getColumnsSelected(), node.getOrderBys());
if (mergedOrders == null) {
// can not merge,need distinct then order by
DistinctHandler dh = new DistinctHandler(getSequenceId(), session, node.getColumnsSelected());
addHandler(dh);
OrderByHandler oh = new OrderByHandler(getSequenceId(), session, node.getOrderBys());
addHandler(oh);
} else {
DistinctHandler dh = new DistinctHandler(getSequenceId(), session, node.getColumnsSelected(),
mergedOrders);
addHandler(dh);
}
} else {
if (node.isDistinct()) {
DistinctHandler dh = new DistinctHandler(getSequenceId(), session, node.getColumnsSelected());
addHandler(dh);
}
// order by
if (node.getOrderBys().size() > 0) {
if (node.getGroupBys().size() > 0) {
if (!PlanUtil.orderContains(node.getGroupBys(), node.getOrderBys())) {
OrderByHandler oh = new OrderByHandler(getSequenceId(), session, node.getOrderBys());
addHandler(oh);
}
} else if (isOrderNeeded(node, node.getOrderBys())) {
OrderByHandler oh = new OrderByHandler(getSequenceId(), session, node.getOrderBys());
addHandler(oh);
}
}
}
if (node.getLimitTo() > 0) {
LimitHandler lh = new LimitHandler(getSequenceId(), session, node.getLimitFrom(), node.getLimitTo());
addHandler(lh);
}
}
/**
* 添加一个handler到hanlder链
*/
protected void addHandler(DMLResponseHandler bh) {
if (currentLast == null) {
start = bh;
currentLast = bh;
} else {
currentLast.setNextHandler(bh);
currentLast = bh;
}
bh.setAllPushDown(canPushDown);
}
/*----------------------------- helper method -------------------*/
private boolean isNestLoopStrategy(PlanNode node) {
if (node.type() == PlanNodeType.TABLE && node.getNestLoopFilters() != null)
return true;
return false;
}
/**
* 是否需要对该node进行orderby排序
* 如果该node的上一层handler返回的结果已经按照orderBys排序则无需再次进行orderby
*
* @param node
* @param orderBys
* @return
*/
private boolean isOrderNeeded(PlanNode node, List<Order> orderBys) {
if (node.isGlobaled() || node instanceof TableNode || PlanUtil.isERNode(node))
return false;
else if (node instanceof JoinNode) {
return !isJoinNodeOrderMatch((JoinNode) node, orderBys);
} else if (node instanceof QueryNode) {
return !isQueryNodeOrderMatch((QueryNode) node, orderBys);
}
return true;
}
/**
* joinnode的默认排序记录在leftjoinonorders和rightjoinonorders中
*
* @param jn
* @param orderBys
* 目标排序
* @return
*/
private boolean isJoinNodeOrderMatch(JoinNode jn, List<Order> orderBys) {
// 记录orderBys中前面出现的onCondition列,如jn.onCond = (t1.id=t2.id),
// orderBys为t1.id,t2.id,t1.name则onOrders = {t1.id,t2.id};
List<Order> onOrders = new ArrayList<Order>();
List<Order> leftOnOrders = jn.getLeftJoinOnOrders();
List<Order> rightOnOrders = jn.getRightJoinOnOrders();
for (Order orderBy : orderBys) {
if (leftOnOrders.contains(orderBy) || rightOnOrders.contains(orderBy)) {
onOrders.add(orderBy);
} else {
break;
}
}
if (onOrders.isEmpty()) {
// joinnode的数据一定是按照joinOnCondition进行排序的
return false;
} else {
List<Order> remainOrders = orderBys.subList(onOrders.size(), orderBys.size());
if (remainOrders.isEmpty()) {
return true;
} else {
List<Order> pushedOrders = PlanUtil.getPushDownOrders(jn, remainOrders);
if (jn.isLeftOrderMatch()) {
List<Order> leftChildOrders = jn.getLeftNode().getOrderBys();
List<Order> leftRemainOrders = leftChildOrders.subList(leftOnOrders.size(), leftChildOrders.size());
if (PlanUtil.orderContains(leftRemainOrders, pushedOrders))
return true;
} else if (jn.isRightOrderMatch()) {
List<Order> rightChildOrders = jn.getRightNode().getOrderBys();
List<Order> rightRemainOrders = rightChildOrders.subList(rightOnOrders.size(),
rightChildOrders.size());
if (PlanUtil.orderContains(rightRemainOrders, pushedOrders))
return true;
}
return false;
}
}
}
/**
* @param qn
* @param orderBys
* 目标排序
* @return
*/
private boolean isQueryNodeOrderMatch(QueryNode qn, List<Order> orderBys) {
List<Order> childOrders = qn.getChild().getOrderBys();
List<Order> pushedOrders = PlanUtil.getPushDownOrders(qn, orderBys);
return PlanUtil.orderContains(childOrders, pushedOrders);
}
/**
* 尝试将order by的顺序合并到columnsSelected中
*
* @param columnsSelected
* @param orderBys
* @return
*/
private List<Order> mergeOrderBy(List<Item> columnsSelected, List<Order> orderBys) {
List<Integer> orderIndexes = new ArrayList<Integer>();
List<Order> newOrderByList = new ArrayList<Order>();
for (Order orderBy : orderBys) {
Item column = orderBy.getItem();
int index = columnsSelected.indexOf(column);
if (index < 0)
return null;
else
orderIndexes.add(index);
Order newOrderBy = new Order(columnsSelected.get(index), orderBy.getSortOrder());
newOrderByList.add(newOrderBy);
}
for (int index = 0; index < columnsSelected.size(); index++) {
if (!orderIndexes.contains(index)) {
Order newOrderBy = new Order(columnsSelected.get(index), SQLOrderingSpecification.ASC);
newOrderByList.add(newOrderBy);
}
}
return newOrderByList;
}
protected static boolean nodeHasGroupBy(PlanNode arg) {
return (arg.sumFuncs.size() > 0 || arg.getGroupBys().size() > 0);
}
protected static long getSequenceId() {
return sequenceId.incrementAndGet();
}
/*-----------------计算datasource相关start------------------*/
protected void buildMergeHandler(PlanNode node, RouteResultsetNode[] rrssArray, PushDownVisitor sqlVisitor,
boolean simpleVisited) {
hBuilder.checkRRSS(rrssArray);
MultiNodeMergeHandler mh = null;
List<Order> orderBys = node.getGroupBys().size() > 0 ? node.getGroupBys() : node.getOrderBys();
mh = new MultiNodeMergeHandler(getSequenceId(), rrssArray, session.getSource().isAutocommit(), session,
orderBys);
addHandler(mh);
}
protected RouteResultsetNode[] getTableSources(String sql) {
String schema = session.getSource().getSchema();
SchemaConfig schemacfg = mycatConfig.getSchemas().get(schema);
RouteResultsetNode rrss = new RouteResultsetNode(schemacfg.getDataNode(), ServerParse.SELECT, sql);
return new RouteResultsetNode[]{rrss};
}
protected TableConfig getTableConfig(String schema, String table) {
SchemaConfig schemaConfig = this.mycatConfig.getSchemas().get(schema);
if (schemaConfig == null)
return null;
return schemaConfig.getTables().get(table);
}
/*-------------------------------计算datasource相关end------------------*/
}

View File

@@ -0,0 +1,111 @@
package io.mycat.backend.mysql.nio.handler.builder;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.log4j.Logger;
import io.mycat.backend.mysql.nio.handler.query.DMLResponseHandler;
import io.mycat.backend.mysql.nio.handler.query.impl.MultiNodeMergeHandler;
import io.mycat.backend.mysql.nio.handler.query.impl.OutputHandler;
import io.mycat.plan.PlanNode;
import io.mycat.plan.node.JoinNode;
import io.mycat.plan.node.MergeNode;
import io.mycat.plan.node.NoNameNode;
import io.mycat.plan.node.QueryNode;
import io.mycat.plan.node.TableNode;
import io.mycat.route.RouteResultsetNode;
import io.mycat.server.NonBlockingSession;
public class HandlerBuilder {
private static Logger logger = Logger.getLogger(HandlerBuilder.class);
private PlanNode node;
private NonBlockingSession session;
private OutputHandler fh;
private Set<RouteResultsetNode> rrsNodes = new HashSet<RouteResultsetNode>();
public HandlerBuilder(PlanNode node, NonBlockingSession session) {
this.node = node;
this.session = session;
}
public List<RouteResultsetNode> buildRouteSources() {
List<RouteResultsetNode> list = new ArrayList<RouteResultsetNode>();
BaseHandlerBuilder builder = createBuilder(session, node, this);
builder.build();
fh = new OutputHandler(BaseHandlerBuilder.getSequenceId(), session, false);
DMLResponseHandler endHandler = builder.getEndHandler();
endHandler.setNextHandler(fh);
for (DMLResponseHandler handler : fh.getMerges()) {
MultiNodeMergeHandler mergeHandler = (MultiNodeMergeHandler) handler;
for (int i = 0; i < mergeHandler.getRouteSources().length; i++) {
list.add(mergeHandler.getRouteSources()[i]);
}
}
return list;
}
public void checkRRSS(RouteResultsetNode[] rrssArray) {
for (RouteResultsetNode rrss : rrssArray) {
while (rrsNodes.contains(rrss)) {
rrss.getMultiplexNum().incrementAndGet();
}
rrsNodes.add(rrss);
}
}
/**
* 启动一个节点下面的所有的启动节点
*/
public static void startHandler(DMLResponseHandler handler) throws Exception {
for (DMLResponseHandler startHandler : handler.getMerges()) {
MultiNodeMergeHandler mergeHandler = (MultiNodeMergeHandler) startHandler;
mergeHandler.execute();
}
}
/**
* 生成node链返回endHandler
*
* @param node
* @return
*/
public DMLResponseHandler buildNode(NonBlockingSession session, PlanNode node) {
BaseHandlerBuilder builder = createBuilder(session, node, this);
builder.build();
return builder.getEndHandler();
}
public void build(boolean hasNext) throws Exception {
long startTime = System.nanoTime();
DMLResponseHandler endHandler = buildNode(session, node);
fh = new OutputHandler(BaseHandlerBuilder.getSequenceId(), session, hasNext);
endHandler.setNextHandler(fh);
HandlerBuilder.startHandler(fh);
long endTime = System.nanoTime();
logger.info("HandlerBuilder.build cost:" + (endTime - startTime));
}
private BaseHandlerBuilder createBuilder(final NonBlockingSession session, PlanNode node, HandlerBuilder context) {
switch (node.type()) {
case TABLE: {
return new TableNodeHandlerBuilder(session, (TableNode) node, this);
}
case JOIN: {
return new JoinNodeHandlerBuilder(session, (JoinNode) node, this);
}
case MERGE: {
return new MergeNodeHandlerBuilder(session, (MergeNode) node, this);
}
case QUERY:
return new QueryNodeHandlerBuilder(session, (QueryNode) node, this);
case NONAME:
return new NoNameNodeHandlerBuilder(session, (NoNameNode) node, this);
default:
}
throw new RuntimeException("not supported tree node type:" + node.type());
}
}

View File

@@ -0,0 +1,191 @@
package io.mycat.backend.mysql.nio.handler.builder;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import io.mycat.backend.mysql.nio.handler.builder.sqlvisitor.PushDownVisitor;
import io.mycat.backend.mysql.nio.handler.query.DMLResponseHandler;
import io.mycat.backend.mysql.nio.handler.query.impl.OrderByHandler;
import io.mycat.backend.mysql.nio.handler.query.impl.TempTableHandler;
import io.mycat.backend.mysql.nio.handler.query.impl.join.JoinHandler;
import io.mycat.backend.mysql.nio.handler.query.impl.join.NotInHandler;
import io.mycat.backend.mysql.nio.handler.util.CallBackHandler;
import io.mycat.config.ErrorCode;
import io.mycat.plan.PlanNode;
import io.mycat.plan.common.exception.MySQLOutPutException;
import io.mycat.plan.common.item.Item;
import io.mycat.plan.common.item.Item.ItemType;
import io.mycat.plan.common.item.ItemInt;
import io.mycat.plan.common.item.ItemString;
import io.mycat.plan.common.item.function.operator.cmpfunc.ItemFuncIn;
import io.mycat.plan.node.JoinNode;
import io.mycat.plan.util.PlanUtil;
import io.mycat.route.RouteResultsetNode;
import io.mycat.server.NonBlockingSession;
class JoinNodeHandlerBuilder extends BaseHandlerBuilder {
private JoinNode node;
protected JoinNodeHandlerBuilder(NonBlockingSession session, JoinNode node, HandlerBuilder hBuilder) {
super(session, node, hBuilder);
this.node = node;
}
@Override
public boolean canDoAsMerge() {
if (PlanUtil.isGlobalOrER(node))
return true;
else
return false;
}
@Override
public void mergeBuild() {
try {
this.needWhereHandler = false;
this.canPushDown = !node.existUnPushDownGroup();
PushDownVisitor pdVisitor = new PushDownVisitor(node, true);
MergeBuilder mergeBuilder = new MergeBuilder(session, node, needCommon, needSendMaker, pdVisitor);
RouteResultsetNode[] rrssArray = mergeBuilder.construct();
boolean simpleVisited = mergeBuilder.isSimpleVisited();
this.needCommon = mergeBuilder.getNeedCommonFlag();
this.needSendMaker = mergeBuilder.getNeedSendMakerFlag();
buildMergeHandler(node, rrssArray, pdVisitor, simpleVisited);
} catch (Exception e) {
throw new MySQLOutPutException(ErrorCode.ER_QUERYHANDLER, "", "join node mergebuild exception!", e);
}
}
@Override
public List<DMLResponseHandler> buildPre() {
List<DMLResponseHandler> pres = new ArrayList<DMLResponseHandler>();
PlanNode left = node.getLeftNode();
PlanNode right = node.getRightNode();
switch (node.getStrategy()) {
case NESTLOOP:
final boolean isLeftSmall = left.getNestLoopFilters() == null;
final PlanNode tnSmall = isLeftSmall ? left : right;
final PlanNode tnBig = isLeftSmall ? right : left;
// 确定准备传递的列
List<Item> keySources = isLeftSmall ? node.getLeftKeys() : node.getRightKeys();
List<Item> keyToPasses = isLeftSmall ? node.getRightKeys() : node.getLeftKeys();
// 只需要传递第一个key过滤条件给目标节点就ok 尽量选择toPass的是Column的
int columnIndex = 0;
for (int index = 0; index < keyToPasses.size(); index++) {
Item keyToPass = keyToPasses.get(index);
if (keyToPass.type().equals(ItemType.FIELD_ITEM)) {
columnIndex = index;
break;
}
}
final Item keySource = keySources.get(columnIndex);
final Item keyToPass = keyToPasses.get(columnIndex);
DMLResponseHandler endHandler = buildJoinChild(tnSmall, isLeftSmall);
final TempTableHandler tempHandler = new TempTableHandler(getSequenceId(), session, keySource);
endHandler.setNextHandler(tempHandler);
tempHandler.setLeft(isLeftSmall);
pres.add(tempHandler);
CallBackHandler tempDone = new CallBackHandler() {
@Override
public void call() throws Exception {
Set<String> valueSet = tempHandler.getValueSet();
buildNestFilters(tnBig, keyToPass, valueSet, tempHandler.getMaxPartSize());
DMLResponseHandler bigLh = buildJoinChild(tnBig, !isLeftSmall);
bigLh.setNextHandler(tempHandler.getNextHandler());
tempHandler.setCreatedHandler(bigLh);
// 启动handler
HandlerBuilder.startHandler(bigLh);
}
};
tempHandler.setTempDoneCallBack(tempDone);
break;
case SORTMERGE:
DMLResponseHandler lh = buildJoinChild(left, true);
pres.add(lh);
DMLResponseHandler rh = buildJoinChild(right, false);
pres.add(rh);
break;
default:
throw new MySQLOutPutException(ErrorCode.ER_QUERYHANDLER, "","strategy ["+node.getStrategy()+"] not implement yet!" );
}
return pres;
}
private DMLResponseHandler buildJoinChild(PlanNode child, boolean isLeft) {
DMLResponseHandler endHandler = hBuilder.buildNode(session, child);
if (isLeft) {
if (!node.isLeftOrderMatch()) {
OrderByHandler oh = new OrderByHandler(getSequenceId(), session, node.getLeftJoinOnOrders());
endHandler.setNextHandler(oh);
endHandler = oh;
}
endHandler.setLeft(true);
} else {
if (!node.isRightOrderMatch()) {
OrderByHandler oh = new OrderByHandler(getSequenceId(), session, node.getRightJoinOnOrders());
endHandler.setNextHandler(oh);
endHandler = oh;
}
}
return endHandler;
}
@Override
public void buildOwn() {
if (node.isNotIn()) {
NotInHandler nh = new NotInHandler(getSequenceId(), session, node.getLeftJoinOnOrders(),
node.getRightJoinOnOrders());
addHandler(nh);
} else {
JoinHandler jh = new JoinHandler(getSequenceId(), session, node.isLeftOuterJoin(),
node.getLeftJoinOnOrders(), node.getRightJoinOnOrders(), node.getOtherJoinOnFilter());
addHandler(jh);
}
}
/**
* 根据临时表的数据生成新的大表的过滤条件
*
* @param tnBig
* @param keyToPass
* @param valueSet
*/
protected void buildNestFilters(PlanNode tnBig, Item keyToPass, Set<String> valueSet, int maxPartSize) {
List<Item> strategyFilters = tnBig.getNestLoopFilters();
List<Item> partList = null;
Item keyInBig = PlanUtil.pushDownItem(node, keyToPass);
int partSize = 0;
for (String value : valueSet) {
if (partList == null)
partList = new ArrayList<Item>();
if (value == null) { // is null will never join
continue;
} else {
partList.add(new ItemString(value));
if (++partSize >= maxPartSize) {
List<Item> argList = new ArrayList<Item>();
argList.add(keyInBig);
argList.addAll(partList);
ItemFuncIn inFilter = new ItemFuncIn(argList, false);
strategyFilters.add(inFilter);
partList = null;
partSize = 0;
}
}
}
if (partSize > 0) {
List<Item> argList = new ArrayList<Item>();
argList.add(keyInBig);
argList.addAll(partList);
ItemFuncIn inFilter = new ItemFuncIn(argList, false);
strategyFilters.add(inFilter);
}
// 没有数据
if (strategyFilters.isEmpty()) {
strategyFilters.add(new ItemInt(0));
}
}
}

View File

@@ -0,0 +1,95 @@
package io.mycat.backend.mysql.nio.handler.builder;
import java.sql.SQLNonTransientException;
import java.sql.SQLSyntaxErrorException;
import com.alibaba.druid.sql.ast.statement.SQLSelectStatement;
import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser;
import com.alibaba.druid.sql.parser.SQLStatementParser;
import io.mycat.MycatServer;
import io.mycat.backend.mysql.nio.handler.builder.BaseHandlerBuilder.MySQLNodeType;
import io.mycat.backend.mysql.nio.handler.builder.sqlvisitor.PushDownVisitor;
import io.mycat.cache.LayerCachePool;
import io.mycat.config.MycatConfig;
import io.mycat.config.model.SchemaConfig;
import io.mycat.plan.PlanNode;
import io.mycat.route.RouteResultset;
import io.mycat.route.RouteResultsetNode;
import io.mycat.route.parser.druid.DruidParser;
import io.mycat.route.parser.druid.MycatSchemaStatVisitor;
import io.mycat.route.parser.druid.impl.DruidBaseSelectParser;
import io.mycat.route.util.RouterUtil;
import io.mycat.server.NonBlockingSession;
import io.mycat.server.parser.ServerParse;
public class MergeBuilder {
private boolean simpleVisited;
private boolean needCommonFlag;
private boolean needSendMakerFlag;
private PlanNode node;
private NonBlockingSession session;
private MySQLNodeType nodeType;
private String schema;
private MycatConfig mycatConfig;
private PushDownVisitor pdVisitor;
public MergeBuilder(NonBlockingSession session, PlanNode node, boolean needCommon, boolean needSendMaker,
PushDownVisitor pdVisitor) {
this.node = node;
this.simpleVisited = false;
this.needCommonFlag = needCommon;
this.needSendMakerFlag = needSendMaker;
this.session = session;
this.schema = session.getSource().getSchema();
this.nodeType = session.getSource().isTxstart() || !session.getSource().isAutocommit() ? MySQLNodeType.MASTER
: MySQLNodeType.SLAVE;
this.mycatConfig = MycatServer.getInstance().getConfig();
this.pdVisitor = pdVisitor;
}
/**
* 将一个或者多个条件合并后计算出所需要的节点...
*
* @return
* @throws SQLNonTransientException
* @throws SQLSyntaxErrorException
*/
public RouteResultsetNode[] construct() throws SQLNonTransientException {
pdVisitor.visit();
String sql = pdVisitor.getSql().toString();
SQLStatementParser parser = new MySqlStatementParser(sql);
SQLSelectStatement select = (SQLSelectStatement) parser.parseStatement();
MycatSchemaStatVisitor visitor = new MycatSchemaStatVisitor();
DruidParser druidParser = new DruidBaseSelectParser();
RouteResultset rrs = new RouteResultset(sql, ServerParse.SELECT, null);
LayerCachePool pool = MycatServer.getInstance().getRouterservice().getTableId2DataNodeCache();
SchemaConfig schemaConfig = mycatConfig.getSchemas().get(schema);
rrs = RouterUtil.routeFromParser(druidParser, schemaConfig, rrs, select, sql, pool, visitor);
return rrs.getNodes();
}
/* -------------------- getter/setter -------------------- */
public boolean getNeedCommonFlag() {
return needCommonFlag;
}
public boolean isSimpleVisited() {
return simpleVisited;
}
public boolean getNeedSendMakerFlag() {
return needSendMakerFlag;
}
public void setNodeType(MySQLNodeType nodeType) {
this.nodeType = nodeType;
}
public void setSchema(String schema) {
this.schema = schema;
}
}

View File

@@ -0,0 +1,41 @@
package io.mycat.backend.mysql.nio.handler.builder;
import java.util.ArrayList;
import java.util.List;
import io.mycat.backend.mysql.nio.handler.query.DMLResponseHandler;
import io.mycat.backend.mysql.nio.handler.query.impl.DistinctHandler;
import io.mycat.backend.mysql.nio.handler.query.impl.UnionHandler;
import io.mycat.plan.PlanNode;
import io.mycat.plan.node.MergeNode;
import io.mycat.server.NonBlockingSession;
class MergeNodeHandlerBuilder extends BaseHandlerBuilder {
private MergeNode node;
protected MergeNodeHandlerBuilder(NonBlockingSession session, MergeNode node, HandlerBuilder hBuilder) {
super(session, node, hBuilder);
this.node = node;
}
@Override
protected List<DMLResponseHandler> buildPre() {
List<DMLResponseHandler> pres = new ArrayList<DMLResponseHandler>();
for (PlanNode child : node.getChildren()) {
DMLResponseHandler ch = hBuilder.buildNode(session, child);
pres.add(ch);
}
return pres;
}
@Override
public void buildOwn() {
UnionHandler uh = new UnionHandler(getSequenceId(), session, node.getComeInFields(), node.getChildren().size());
addHandler(uh);
if (node.isUnion()) {
DistinctHandler dh = new DistinctHandler(getSequenceId(), session, node.getColumnsSelected());
addHandler(dh);
}
}
}

View File

@@ -0,0 +1,47 @@
package io.mycat.backend.mysql.nio.handler.builder;
import java.util.ArrayList;
import java.util.List;
import io.mycat.backend.mysql.nio.handler.builder.sqlvisitor.PushDownVisitor;
import io.mycat.backend.mysql.nio.handler.query.DMLResponseHandler;
import io.mycat.backend.mysql.nio.handler.query.impl.MultiNodeMergeHandler;
import io.mycat.plan.node.NoNameNode;
import io.mycat.route.RouteResultsetNode;
import io.mycat.server.NonBlockingSession;
/**
* select 1 as name这种sql
*
* @author chenzifei
* @CreateTime 2015年3月23日
*/
class NoNameNodeHandlerBuilder extends BaseHandlerBuilder {
private NoNameNode node;
protected NoNameNodeHandlerBuilder(NonBlockingSession session, NoNameNode node, HandlerBuilder hBuilder) {
super(session, node, hBuilder);
this.node = node;
this.needWhereHandler = false;
this.needCommon = false;
}
@Override
public List<DMLResponseHandler> buildPre() {
return new ArrayList<DMLResponseHandler>();
}
@Override
public void buildOwn() {
PushDownVisitor vistor = new PushDownVisitor(node, true);
vistor.visit();
this.canPushDown = true;
String sql = vistor.getSql().toString();
RouteResultsetNode[] rrss = getTableSources(sql.toString());
hBuilder.checkRRSS(rrss);
MultiNodeMergeHandler mh = new MultiNodeMergeHandler(getSequenceId(), rrss, session.getSource().isAutocommit(),
session, null);
addHandler(mh);
}
}

View File

@@ -0,0 +1,33 @@
package io.mycat.backend.mysql.nio.handler.builder;
import java.util.ArrayList;
import java.util.List;
import io.mycat.backend.mysql.nio.handler.query.DMLResponseHandler;
import io.mycat.plan.PlanNode;
import io.mycat.plan.node.QueryNode;
import io.mycat.server.NonBlockingSession;
class QueryNodeHandlerBuilder extends BaseHandlerBuilder {
private QueryNode node;
protected QueryNodeHandlerBuilder(NonBlockingSession session,
QueryNode node, HandlerBuilder hBuilder) {
super(session, node, hBuilder);
this.node = node;
}
@Override
public List<DMLResponseHandler> buildPre() {
List<DMLResponseHandler> pres = new ArrayList<DMLResponseHandler>();
PlanNode subNode = node.getChild();
DMLResponseHandler subHandler = hBuilder.buildNode(session, subNode);
pres.add(subHandler);
return pres;
}
@Override
public void buildOwn() {
}
}

View File

@@ -0,0 +1,99 @@
package io.mycat.backend.mysql.nio.handler.builder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import io.mycat.backend.mysql.nio.handler.builder.sqlvisitor.PushDownVisitor;
import io.mycat.backend.mysql.nio.handler.query.DMLResponseHandler;
import io.mycat.config.ErrorCode;
import io.mycat.config.model.TableConfig;
import io.mycat.config.model.TableConfig.TableTypeEnum;
import io.mycat.plan.common.exception.MySQLOutPutException;
import io.mycat.plan.common.item.Item;
import io.mycat.plan.node.TableNode;
import io.mycat.route.RouteResultsetNode;
import io.mycat.server.NonBlockingSession;
class TableNodeHandlerBuilder extends BaseHandlerBuilder {
private TableNode node;
private TableConfig tableConfig = null;
protected TableNodeHandlerBuilder(NonBlockingSession session, TableNode node, HandlerBuilder hBuilder) {
super(session, node, hBuilder);
this.node = node;
this.canPushDown = !node.existUnPushDownGroup();
this.needWhereHandler = false;
this.tableConfig = getTableConfig(node.getSchema(), node.getTableName());
}
@Override
public List<DMLResponseHandler> buildPre() {
return new ArrayList<DMLResponseHandler>();
}
@Override
public void buildOwn() {
try {
PushDownVisitor pdVisitor = new PushDownVisitor(node, true);
MergeBuilder mergeBuilder = new MergeBuilder(session, node, needCommon, needSendMaker, pdVisitor);
RouteResultsetNode[] rrssArray = mergeBuilder.construct();
boolean simpleVisited = mergeBuilder.isSimpleVisited();
this.needCommon = mergeBuilder.getNeedCommonFlag();
this.needSendMaker = mergeBuilder.getNeedSendMakerFlag();
buildMergeHandler(node, rrssArray, pdVisitor, simpleVisited);
} catch (Exception e) {
throw new MySQLOutPutException(ErrorCode.ER_QUERYHANDLER, "", "tablenode buildOwn exception!", e);
}
}
@Override
protected void nestLoopBuild() {
try {
List<Item> filters = node.getNestLoopFilters();
PushDownVisitor pdVisitor = new PushDownVisitor(node, true);
if (filters == null || filters.isEmpty())
throw new MySQLOutPutException(ErrorCode.ER_QUERYHANDLER, "", "unexpected exception!");
List<RouteResultsetNode> rrssList = new ArrayList<RouteResultsetNode>();
MergeBuilder mergeBuilder = new MergeBuilder(session, node, needCommon, needSendMaker, pdVisitor);
if (tableConfig == null || tableConfig.getTableType()==TableTypeEnum.TYPE_GLOBAL_TABLE) {
for (Item filter : filters) {
node.setWhereFilter(filter);
RouteResultsetNode[] rrssArray = mergeBuilder.construct();
rrssList.addAll(Arrays.asList(rrssArray));
}
if (filters.size() == 1) {
this.needCommon = false;
this.needSendMaker = mergeBuilder.getNeedSendMakerFlag();
}
// } else if (!node.isPartitioned()) {
// // 防止in的列数太多不再进行parti计算
// for (Item filter : filters) {
// node.setWhereFilter(filter);
// pdVisitor.visit();
// String sql = pdVisitor.getSql().toString();
// RouteResultsetNode[] rrssArray = getTableSources(node.getSchema(), node.getTableName(), sql);
// rrssList.addAll(Arrays.asList(rrssArray));
// }
} else {
boolean tryGlobal = filters.size() == 1;
for (Item filter : filters) {
node.setWhereFilter(filter);
pdVisitor.visit();
RouteResultsetNode[] rrssArray = mergeBuilder.construct();
rrssList.addAll(Arrays.asList(rrssArray));
}
if (tryGlobal) {
this.needCommon = mergeBuilder.getNeedCommonFlag();
this.needSendMaker = mergeBuilder.getNeedSendMakerFlag();
}
}
RouteResultsetNode[] rrssArray = new RouteResultsetNode[rrssList.size()];
rrssArray = rrssList.toArray(rrssArray);
buildMergeHandler(node, rrssArray, pdVisitor, mergeBuilder.isSimpleVisited());
} catch (Exception e) {
throw new MySQLOutPutException(ErrorCode.ER_QUERYHANDLER, "", "", e);
}
}
}

View File

@@ -0,0 +1,323 @@
package io.mycat.backend.mysql.nio.handler.builder.sqlvisitor;
import io.mycat.plan.Order;
import io.mycat.plan.PlanNode;
import io.mycat.plan.PlanNode.PlanNodeType;
import io.mycat.plan.common.item.Item;
import io.mycat.plan.common.item.Item.ItemType;
import io.mycat.plan.node.JoinNode;
import io.mycat.plan.node.MergeNode;
import io.mycat.plan.node.NoNameNode;
import io.mycat.plan.node.QueryNode;
import io.mycat.plan.node.TableNode;
/**
* 标准sql生成器node是什么就下发什么因为node是global的 每一个node都得用一个新的globalvisitor对象来进行visit
*
* @author chenzifei
* @CreateTime 2014年12月10日
*/
public class GlobalVisitor extends MysqlVisitor {
public GlobalVisitor(PlanNode globalQuery, boolean isTopQuery) {
super(globalQuery, isTopQuery);
}
public void visit() {
if (!visited) {
replaceableSqlBuilder.clear();
sqlBuilder = replaceableSqlBuilder.getCurrentElement().getSb();
switch (query.type()) {
case TABLE:
visit((TableNode) query);
break;
case JOIN:
visit((JoinNode) query);
break;
case QUERY:
visit((QueryNode) query);
break;
case MERGE:
visit((MergeNode) query);
break;
case NONAME:
visit((NoNameNode) query);
break;
default:
throw new RuntimeException("not implement yet!");
}
visited = true;
} else {
// where的可替换仅针对tablenode不可以迭代
buildWhere(query);
}
}
protected void visit(TableNode query) {
boolean parentIsQuery = query.getParent() != null && query.getParent().type() == PlanNodeType.QUERY;
if (query.isSubQuery() && !parentIsQuery && !isTopQuery) {
sqlBuilder.append(" ( ");
}
if (query.isSubQuery() || isTopQuery) {
buildSelect(query);
if (query.getTableName() == null)
return;
sqlBuilder.append(" from ");
}
// 需要根据是否是下划表进行计算
buildTableName(query, sqlBuilder);
if (query.isSubQuery() || isTopQuery) {
buildWhere(query);
buildGroupBy(query);
buildHaving(query);
buildOrderBy(query);
buildLimit(query);
}
if (query.isSubQuery() && !parentIsQuery && !isTopQuery) {
sqlBuilder.append(" ) ");
if (query.getAlias() != null) {
sqlBuilder.append(" ").append(query.getAlias()).append(" ");
}
}
visited = true;
}
protected void visit(NoNameNode query) {
// to fix:如果在viewoptimizr时将noname的where和select进行了修改则需要
// 改成和tablenode类似的做法
if (!isTopQuery) {
sqlBuilder.append(" ( ");
}
sqlBuilder.append(query.getSql());
if (!isTopQuery) {
sqlBuilder.append(" ) ");
if (query.getAlias() != null) {
sqlBuilder.append(" ").append(query.getAlias()).append(" ");
}
}
}
protected void visit(QueryNode query) {
if (query.isSubQuery() && !isTopQuery) {
sqlBuilder.append(" ( ");
}
if (query.isSubQuery() || isTopQuery) {
buildSelect(query);
sqlBuilder.append(" from ");
}
sqlBuilder.append('(');
PlanNode child = query.getChild();
MysqlVisitor childVisitor = new GlobalVisitor(child, false);
childVisitor.visit();
sqlBuilder.append(childVisitor.getSql()).append(") ").append(child.getAlias());
if (query.isSubQuery() || isTopQuery) {
buildWhere(query);
buildGroupBy(query);
buildHaving(query);
buildOrderBy(query);
buildLimit(query);
}
if (query.isSubQuery() && !isTopQuery) {
sqlBuilder.append(" ) ");
if (query.getAlias() != null) {
sqlBuilder.append(" ").append(query.getAlias()).append(" ");
}
}
}
protected void visit(MergeNode merge) {
boolean isUnion = merge.isUnion();
boolean isFirst = true;
for (PlanNode child : merge.getChildren()) {
if (isFirst)
isFirst = false;
else
sqlBuilder.append(isUnion ? " UNION " : " UNION ALL ");
MysqlVisitor childVisitor = new GlobalVisitor(child, true);
childVisitor.visit();
sqlBuilder.append("(").append(childVisitor.getSql()).append(")");
}
}
protected void visit(JoinNode join) {
if (!isTopQuery) {
sqlBuilder.append(" ( ");
}
if (join.isSubQuery() || isTopQuery) {
buildSelect(join);
sqlBuilder.append(" from ");
}
PlanNode left = join.getLeftNode();
PlanNode right = join.getRightNode();
MysqlVisitor leftVisitor = new GlobalVisitor(left, false);
leftVisitor.visit();
sqlBuilder.append(leftVisitor.getSql());
if (join.getLeftOuter() && join.getRightOuter()) {
throw new RuntimeException("full outter join 不支持");
} else if (join.getLeftOuter() && !join.getRightOuter()) {
sqlBuilder.append(" left");
} else if (join.getRightOuter() && !join.getLeftOuter()) {
sqlBuilder.append(" right");
}
sqlBuilder.append(" join ");
MysqlVisitor rightVisitor = new GlobalVisitor(right, false);
rightVisitor.visit();
sqlBuilder.append(rightVisitor.getSql());
StringBuilder joinOnFilterStr = new StringBuilder();
boolean first = true;
for (int i = 0; i < join.getJoinFilter().size(); i++) {
Item filter = join.getJoinFilter().get(i);
if (first) {
sqlBuilder.append(" on ");
first = false;
} else
joinOnFilterStr.append(" and ");
joinOnFilterStr.append(filter);
}
if (join.getOtherJoinOnFilter() != null) {
if (first) {
first = false;
} else {
joinOnFilterStr.append(" and ");
}
joinOnFilterStr.append(join.getOtherJoinOnFilter());
}
sqlBuilder.append(joinOnFilterStr.toString());
if (join.isSubQuery() || isTopQuery) {
buildWhere(join);
buildGroupBy(join);
buildHaving(join);
buildOrderBy(join);
buildLimit(join);
}
if (!isTopQuery) {
sqlBuilder.append(" ) ");
if (join.getAlias() != null)
sqlBuilder.append(" ").append(join.getAlias()).append(" ");
}
}
protected void buildSelect(PlanNode query) {
sqlBuilder.append("select ");
boolean hasDistinct = query.isDistinct();
boolean first = true;
StringBuilder sb = new StringBuilder();
for (Item selected : query.getColumnsSelected()) {
if (first)
first = false;
else
sb.append(",");
String pdName = visitPushDownNameSel(selected);
sb.append(pdName);
}
if (hasDistinct)
sqlBuilder.append(" distinct ");
sqlBuilder.append(sb);
}
protected void buildGroupBy(PlanNode query) {
boolean first = true;
if (query.getGroupBys() != null && query.getGroupBys().size() > 0) {
sqlBuilder.append(" GROUP BY ");
for (Order group : query.getGroupBys()) {
if (first)
first = false;
else
sqlBuilder.append(",");
Item groupCol = group.getItem();
String pdName = "";
if (groupCol.basicConstItem())
pdName = "'" + groupCol.toString() + "'";
if (pdName.isEmpty())
pdName = visitUnselPushDownName(groupCol, true);
sqlBuilder.append(pdName).append(" ").append(group.getSortOrder());
}
}
}
protected void buildHaving(PlanNode query) {
if (query.getHavingFilter() != null) {
Item filter = query.getHavingFilter();
String pdName = visitUnselPushDownName(filter, true);
sqlBuilder.append(" having ").append(pdName);
}
}
protected void buildOrderBy(PlanNode query) {
boolean first = true;
if (query.getOrderBys() != null && !query.getOrderBys().isEmpty()) {
sqlBuilder.append(" order by ");
for (Order order : query.getOrderBys()) {
if (first) {
first = false;
} else {
sqlBuilder.append(",");
}
Item orderByCol = order.getItem();
String pdName = "";
if (orderByCol.basicConstItem())
pdName = "'" + orderByCol.toString() + "'";
if (pdName.isEmpty())
pdName = visitUnselPushDownName(orderByCol, true);
sqlBuilder.append(pdName).append(" ").append(order.getSortOrder());
}
}
}
protected void buildLimit(PlanNode query) {
long limitFrom = query.getLimitFrom();
long limitTo = query.getLimitTo();
if (limitFrom == -1 && limitTo == -1) {
return;
}
sqlBuilder.append(" limit ");
if (limitFrom > -1)
sqlBuilder.append(limitFrom);
if (limitTo != -1) {
sqlBuilder.append(",").append(limitTo);
}
}
/* -------------------------- help method ------------------------ */
@Override
protected String visitPushDownNameSel(Item item) {
String orgPushDownName = item.getItemName();
if(item.type().equals(ItemType.FIELD_ITEM)){
orgPushDownName = "`"+item.getTableName()+"`.`"+orgPushDownName + "`";
}
String pushAlias = null;
if(item.getPushDownName() != null)
//already set before
pushAlias = item.getPushDownName();
else if (item.getAlias() != null) {
pushAlias = item.getAlias();
if (pushAlias.startsWith(Item.FNAF))
pushAlias = getRandomAliasName();
} else if (orgPushDownName.length() > MAX_COL_LENGTH) {// 如果超出最大长度,则需要自定义别名
pushAlias = getRandomAliasName();
}
if (pushAlias == null) {
if (item.type().equals(ItemType.FIELD_ITEM)) {
pushNameMap.put(orgPushDownName, null);
} else {
item.setPushDownName(orgPushDownName);
pushNameMap.put(orgPushDownName, orgPushDownName);
}
return orgPushDownName;
} else {
item.setPushDownName(pushAlias);
pushNameMap.put(orgPushDownName, pushAlias);
return orgPushDownName + " as `" + pushAlias + "`";
}
}
}

View File

@@ -0,0 +1,131 @@
package io.mycat.backend.mysql.nio.handler.builder.sqlvisitor;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.alibaba.druid.sql.ast.SQLHint;
import com.alibaba.druid.sql.dialect.mysql.visitor.MySqlOutputVisitor;
import io.mycat.plan.PlanNode;
import io.mycat.plan.PlanNode.PlanNodeType;
import io.mycat.plan.common.item.Item;
import io.mycat.plan.common.item.Item.ItemType;
import io.mycat.plan.common.ptr.StringPtr;
import io.mycat.plan.node.TableNode;
/**
* 处理可以下发的查询节点可以下发的情况有可能是全global表 也有可能是部分global部分非global
*
* @author chenzifei
* @CreateTime 2014年12月10日
*/
public abstract class MysqlVisitor {
// mysql支持的最长列长度
protected static final int MAX_COL_LENGTH = 255;
// 记录sel name和push name之间的映射关系
protected Map<String, String> pushNameMap = new HashMap<String, String>();
protected boolean isTopQuery = false;
protected PlanNode query;
protected long randomIndex = 0L;
/* 是否存在不可下发的聚合函数,如果存在,所有函数都不下发,自己进行计算 */
protected boolean existUnPushDownGroup = false;
protected boolean visited = false;
// -- start replaceable stringbuilder
protected ReplaceableStringBuilder replaceableSqlBuilder = new ReplaceableStringBuilder();
// 临时记录的sql
protected StringBuilder sqlBuilder;
protected StringPtr replaceableWhere = new StringPtr("");
// 存储可替换的String
// -- end replaceable stringbuilder
public MysqlVisitor(PlanNode query, boolean isTopQuery) {
this.query = query;
this.isTopQuery = isTopQuery;
}
public ReplaceableStringBuilder getSql() {
return replaceableSqlBuilder;
}
public abstract void visit();
/**
* @param query
*/
protected void buildTableName(TableNode query, StringBuilder sb) {
sb.append(" `").append(query.getPureName()).append("`");
String subAlias = query.getSubAlias();
if (subAlias != null)
sb.append(" `").append(query.getSubAlias()).append("`");
List<SQLHint> hintList = query.getHintList();
if (hintList != null && !hintList.isEmpty()) {
sb.append(' ');
boolean isFirst = true;
for (SQLHint hint : hintList) {
if (isFirst)
isFirst = false;
else
sb.append(" ");
MySqlOutputVisitor ov = new MySqlOutputVisitor(sb);
hint.accept(ov);
}
}
}
/* where修改为可替换的 */
protected void buildWhere(PlanNode query) {
if (!visited)
replaceableSqlBuilder.getCurrentElement().setRepString(replaceableWhere);
StringBuilder whereBuilder = new StringBuilder();
Item filter = query.getWhereFilter();
if (filter != null) {
String pdName = visitUnselPushDownName(filter, false);
whereBuilder.append(" where ").append(pdName);
}
replaceableWhere.set(whereBuilder.toString());
// refresh sqlbuilder
sqlBuilder = replaceableSqlBuilder.getCurrentElement().getSb();
}
public boolean isRandomAliasMade() {
return randomIndex != 0;
}
// 生成自定义的聚合函数别名
public static String getMadeAggAlias(String aggFuncName) {
StringBuilder builder = new StringBuilder();
builder.append("_$").append(aggFuncName).append("$_");
return builder.toString();
}
protected String getRandomAliasName() {
StringBuilder builder = new StringBuilder();
builder.append("rpda_").append(randomIndex++);
return builder.toString();
}
/**
* 生成pushdown信息
*/
protected abstract String visitPushDownNameSel(Item o);
// 非sellist的下推name
public final String visitUnselPushDownName(Item item, boolean canUseAlias) {
String selName = item.getItemName();
if (item.type().equals(ItemType.FIELD_ITEM)) {
selName = "`" + item.getTableName() + "`.`" + selName + "`";
}
String nameInMap = pushNameMap.get(selName);
if (nameInMap != null) {
item.setPushDownName(nameInMap);
if (canUseAlias && !(query.type() == PlanNodeType.JOIN && item.type().equals(ItemType.FIELD_ITEM))) {
// join时 select t1.id,t2.id from t1,t2 order by t1.id
// 尽量用用户原始输入的group byorder by
selName = nameInMap;
}
}
return selName;
}
}

View File

@@ -0,0 +1,348 @@
package io.mycat.backend.mysql.nio.handler.builder.sqlvisitor;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import io.mycat.plan.Order;
import io.mycat.plan.PlanNode;
import io.mycat.plan.common.item.Item;
import io.mycat.plan.common.item.Item.ItemType;
import io.mycat.plan.common.item.function.sumfunc.ItemSum;
import io.mycat.plan.node.JoinNode;
import io.mycat.plan.node.TableNode;
/**
* 处理那种可以下推部分sql又和global表的下推不同的node类型 单个table er关系表 非全global表的下退等等
*
* @author chenzifei
*
*/
public class PushDownVisitor extends MysqlVisitor {
/* 用来记录真正被下发下去的orderby列表 */
private List<Order> pushDownOrderBy;
/* 存储最后下推的列 */
private List<String> pushDownTableInfos;
public PushDownVisitor(PlanNode pushDownQuery, boolean isTopQuery) {
super(pushDownQuery, isTopQuery);
this.existUnPushDownGroup = pushDownQuery.existUnPushDownGroup();
pushDownOrderBy = new ArrayList<Order>();
pushDownTableInfos = new ArrayList<String>();
}
public void visit() {
if (!visited) {
replaceableSqlBuilder.clear();
sqlBuilder = replaceableSqlBuilder.getCurrentElement().getSb();
// 在已经visited的情况下pushdownvisitor只要进行table名称的替换即可
switch (query.type()) {
case TABLE:
visit((TableNode) query);
break;
case JOIN:
visit((JoinNode) query);
break;
default:
throw new RuntimeException("not implement yet!");
}
visited = true;
} else {
// where的可替换仅针对tablenode不可以迭代
buildWhere(query);
}
}
protected void visit(TableNode query) {
if (query.isSubQuery() && !isTopQuery) {
sqlBuilder.append(" ( ");
}
if (query.isSubQuery() || isTopQuery) {
buildSelect(query);
if (query.getTableName() == null)
return;
sqlBuilder.append(" from ");
}
// 需要根据是否是下划表进行计算生成可替换的String
buildTableName(query, sqlBuilder);
if (query.isSubQuery() || isTopQuery) {
buildWhere(query);
buildGroupBy(query);
buildHaving(query);
buildOrderBy(query);
buildLimit(query, sqlBuilder);
}
if (query.isSubQuery() && !isTopQuery) {
sqlBuilder.append(" ) ");
if (query.getAlias() != null) {
sqlBuilder.append(" ").append(query.getAlias()).append(" ");
}
}
}
protected void visit(JoinNode join) {
if (!isTopQuery) {
sqlBuilder.append(" ( ");
}
if (join.isSubQuery() || isTopQuery) {
buildSelect(join);
sqlBuilder.append(" from ");
}
PlanNode left = join.getLeftNode();
PlanNode right = join.getRightNode();
MysqlVisitor leftVisitor = new GlobalVisitor(left, false);
leftVisitor.visit();
replaceableSqlBuilder.append(leftVisitor.getSql());
sqlBuilder = replaceableSqlBuilder.getCurrentElement().getSb();
if (join.getLeftOuter() && join.getRightOuter()) {
throw new RuntimeException("full outter join 不支持");
} else if (join.getLeftOuter() && !join.getRightOuter()) {
sqlBuilder.append(" left");
} else if (join.getRightOuter() && !join.getLeftOuter()) {
sqlBuilder.append(" right");
}
sqlBuilder.append(" join ");
MysqlVisitor rightVisitor = new GlobalVisitor(right, false);
rightVisitor.visit();
replaceableSqlBuilder.append(rightVisitor.getSql());
sqlBuilder = replaceableSqlBuilder.getCurrentElement().getSb();
StringBuilder joinOnFilterStr = new StringBuilder();
boolean first = true;
for (int i = 0; i < join.getJoinFilter().size(); i++) {
Item filter = join.getJoinFilter().get(i);
if (first) {
sqlBuilder.append(" on ");
first = false;
} else
joinOnFilterStr.append(" and ");
joinOnFilterStr.append(filter);
}
if (join.getOtherJoinOnFilter() != null) {
if (first) {
first = false;
} else {
joinOnFilterStr.append(" and ");
}
joinOnFilterStr.append(join.getOtherJoinOnFilter());
}
sqlBuilder.append(joinOnFilterStr.toString());
if (join.isSubQuery() || isTopQuery) {
buildWhere(join);
buildGroupBy(join);
buildHaving(join);
buildOrderBy(join);
buildLimit(join, sqlBuilder);
}
if (!isTopQuery) {
sqlBuilder.append(" ) ");
if (join.getAlias() != null)
sqlBuilder.append(" ").append(join.getAlias()).append(" ");
}
}
protected void buildSelect(PlanNode query) {
boolean addPushDownTableInfo = pushDownTableInfos.isEmpty() && isTopQuery;
sqlBuilder.append("select ");
List<Item> columns = query.getColumnsRefered();
if (query.isDistinct()) {
sqlBuilder.append("DISTINCT ");
}
for (Item col : columns) {
if (existUnPushDownGroup && col.type().equals(ItemType.SUM_FUNC_ITEM))
continue;
if ((col.type().equals(ItemType.FUNC_ITEM) || col.type().equals(ItemType.COND_ITEM)) && col.withSumFunc)
continue;
String pdName = visitPushDownNameSel(col);
if (StringUtils.isEmpty(pdName))// 重复列
continue;
if (col.type().equals(ItemType.SUM_FUNC_ITEM)) {
ItemSum funCol = (ItemSum) col;
String funName = funCol.funcName().toUpperCase();
String colName = pdName;
switch (funCol.sumType()) {
case AVG_FUNC: {
String colNameSum = colName.replace(funName + "(", "SUM(");
colNameSum = colNameSum.replace(getMadeAggAlias(funName), getMadeAggAlias("SUM"));
String colNameCount = colName.replace(funName + "(", "COUNT(");
colNameCount = colNameCount.replace(getMadeAggAlias(funName), getMadeAggAlias("COUNT"));
sqlBuilder.append(colNameSum).append(",").append(colNameCount).append(",");
if (addPushDownTableInfo) {
pushDownTableInfos.add(null);
pushDownTableInfos.add(null);
}
}
continue;
case STD_FUNC:
case VARIANCE_FUNC: {
// variance:下发时 v[0]:count,v[1]:sum,v[2]:variance(局部)
String colNameCount = colName.replace(funName + "(", "COUNT(");
colNameCount = colNameCount.replace(getMadeAggAlias(funName), getMadeAggAlias("COUNT"));
String colNameSum = colName.replace(funName + "(", "SUM(");
colNameSum = colNameSum.replace(getMadeAggAlias(funName), getMadeAggAlias("SUM"));
String colNameVar = colName.replace(funName + "(", "VARIANCE(");
colNameVar = colNameVar.replace(getMadeAggAlias(funName), getMadeAggAlias("VARIANCE"));
// VARIANCE
sqlBuilder.append(colNameCount).append(",").append(colNameSum).append(",").append(colNameVar)
.append(",");
if (addPushDownTableInfo) {
pushDownTableInfos.add(null);
pushDownTableInfos.add(null);
pushDownTableInfos.add(null);
}
}
continue;
}
}
sqlBuilder.append(pdName);
if (addPushDownTableInfo)
pushDownTableInfos.add(col.getTableName());
sqlBuilder.append(",");
}
sqlBuilder.deleteCharAt(sqlBuilder.length() - 1);
}
protected void buildGroupBy(PlanNode query) {
if (nodeHasGroupBy(query)) {
// 可以下发整个group by的情形
if (!existUnPushDownGroup) {
if (!query.getGroupBys().isEmpty()) {
sqlBuilder.append(" GROUP BY ");
for (Order group : query.getGroupBys()) {
// 记录下当前下推的结果集的排序
pushDownOrderBy.add(group.copy());
Item groupCol = group.getItem();
String pdName = "";
if (groupCol.basicConstItem())
pdName = "'" + groupCol.toString() + "'";
if (pdName.isEmpty())
pdName = visitUnselPushDownName(groupCol, true);
sqlBuilder.append(pdName).append(" ").append(group.getSortOrder()).append(",");
}
sqlBuilder.deleteCharAt(sqlBuilder.length() - 1);
}
} else {
// 不可以下发group by的情况转化为下发order
pushDownOrderBy.addAll(query.getGroupBys());
if (pushDownOrderBy != null && pushDownOrderBy.size() > 0) {
sqlBuilder.append(" ORDER BY ");
for (Order order : pushDownOrderBy) {
Item orderSel = order.getItem();
String pdName = "";
if (orderSel.basicConstItem())
pdName = "'" + orderSel.toString() + "'";
if (pdName.isEmpty())
pdName = visitUnselPushDownName(orderSel, true);
sqlBuilder.append(pdName).append(" ").append(order.getSortOrder()).append(",");
}
sqlBuilder.deleteCharAt(sqlBuilder.length() - 1);
}
}
}
}
protected void buildHaving(PlanNode query) {
// having中由于可能存在聚合函数而聚合函数需要merge之后结果才能出来所以需要自己进行计算
}
protected void buildOrderBy(PlanNode query) {
/* 由于有groupby时在merge的时候需要根据groupby的列进行排序merge所以有groupby时不能下发order */
boolean realPush = query.getGroupBys().isEmpty();
if (query.getOrderBys().size() > 0) {
if (realPush)
sqlBuilder.append(" ORDER BY ");
for (Order order : query.getOrderBys()) {
Item orderByCol = order.getItem();
String pdName = "";
if (orderByCol.basicConstItem())
pdName = "'" + orderByCol.toString() + "'";
if (pdName.isEmpty())
pdName = visitUnselPushDownName(orderByCol, true);
if (realPush) {
pushDownOrderBy.add(order.copy());
sqlBuilder.append(pdName).append(" ").append(order.getSortOrder()).append(",");
}
}
if (realPush)
sqlBuilder.deleteCharAt(sqlBuilder.length() - 1);
}
}
protected void buildLimit(PlanNode query, StringBuilder sb) {
/* groupby和limit共存时是不可以下发limit的 */
if (query.getGroupBys().isEmpty() && !existUnPushDownGroup) {
/* 只有order by可以下发时limit才可以下发 */
if (query.getLimitFrom() != -1 && query.getLimitTo() != -1) {
sb.append(" LIMIT ").append(query.getLimitFrom() + query.getLimitTo());
}
}
}
/**
* @return the pushDownTableInfos
*/
public List<String> getPushDownTableInfos() {
return pushDownTableInfos;
}
/* -------------------------- help method ------------------------ */
/* 判断node是否需要groupby */
public static boolean nodeHasGroupBy(PlanNode node) {
return (node.sumFuncs.size() > 0 || node.getGroupBys().size() > 0);
}
@Override
protected String visitPushDownNameSel(Item item) {
String orgPushDownName = item.getItemName();
if (item.type().equals(ItemType.FIELD_ITEM)) {
orgPushDownName = "`" + item.getTableName() + "`.`" + orgPushDownName + "`";
}
String pushAlias = null;
if (pushNameMap.containsKey(orgPushDownName)) {
// 重复的列不下发
item.setPushDownName(pushNameMap.get(orgPushDownName));
return null;
}
if (item.type().equals(ItemType.SUM_FUNC_ITEM)) {
// 聚合函数添加别名,但是需要表示出是哪个聚合函数
String aggName = ((ItemSum) item).funcName().toUpperCase();
pushAlias = getMadeAggAlias(aggName) + getRandomAliasName();
} else if (item.getAlias() != null) {
pushAlias = item.getAlias();
if (pushAlias.startsWith(Item.FNAF))
pushAlias = getRandomAliasName();
} else if (orgPushDownName.length() > MAX_COL_LENGTH) {
pushAlias = getRandomAliasName();
} else if (isTopQuery && !item.type().equals(ItemType.FIELD_ITEM)) {
pushAlias = getRandomAliasName();
}
if (pushAlias == null) {
if (item.type().equals(ItemType.FIELD_ITEM)) {
pushNameMap.put(orgPushDownName, null);
} else {
item.setPushDownName(orgPushDownName);
pushNameMap.put(orgPushDownName, orgPushDownName);
}
} else {
item.setPushDownName(pushAlias);
pushNameMap.put(orgPushDownName, pushAlias);
}
if (pushAlias == null)
return orgPushDownName;
else
return orgPushDownName + " as `" + pushAlias + "`";
}
}

View File

@@ -0,0 +1,93 @@
package io.mycat.backend.mysql.nio.handler.builder.sqlvisitor;
import java.util.ArrayList;
import java.util.List;
import io.mycat.plan.common.ptr.StringPtr;
/**
* @author chenzifei
* @CreateTime 2015年12月15日
*/
public class ReplaceableStringBuilder {
private List<Element> elements;
public ReplaceableStringBuilder() {
elements = new ArrayList<Element>();
}
public Element getCurrentElement() {
Element curEle = null;
if (elements.isEmpty()) {
curEle = new Element();
elements.add(curEle);
} else {
curEle = elements.get(elements.size() - 1);
if (curEle.getRepString() != null) {
curEle = new Element();
elements.add(curEle);
}
}
return curEle;
}
public ReplaceableStringBuilder append(ReplaceableStringBuilder other) {
if (other != null)
this.elements.addAll(other.elements);
return this;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
for (Element ele : elements) {
sb.append(ele.getSb());
StringPtr rep = ele.getRepString();
if (rep != null)
sb.append(rep.get());
}
return sb.toString();
}
public final static class Element {
private final StringBuilder sb;
private StringPtr repString;
public Element() {
sb = new StringBuilder();
}
/**
* @return the sb
*/
public StringBuilder getSb() {
return sb;
}
/**
* @return the repString
*/
public StringPtr getRepString() {
return repString;
}
/**
* @param repString
* the repString to set
*/
public void setRepString(StringPtr repString) {
if (this.repString != null)
throw new RuntimeException("error use");
this.repString = repString;
}
}
/**
* like stringbuilder.setlength(0)
*/
public void clear() {
elements.clear();
}
}

View File

@@ -0,0 +1,128 @@
package io.mycat.backend.mysql.nio.handler.query;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.log4j.Logger;
import io.mycat.backend.BackendConnection;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.server.NonBlockingSession;
import io.mycat.util.ConcurrentHashSet;
public abstract class BaseDMLHandler implements DMLResponseHandler {
private static Logger logger = Logger.getLogger(BaseDMLHandler.class);
protected final long id;
/**
* 是否是处理所有的都pushdown了包括函数
*/
private boolean allPushDown = false;
/**
* 从上一层hangdler接受到的fieldpackets集合
*/
protected List<FieldPacket> fieldPackets = new ArrayList<FieldPacket>();
protected BaseDMLHandler nextHandler = null;
protected boolean isLeft = false;
protected NonBlockingSession session;
protected AtomicBoolean terminate = new AtomicBoolean(false);
protected Set<DMLResponseHandler> merges;
public BaseDMLHandler(long id, NonBlockingSession session) {
this.id = id;
this.session = session;
this.merges = new ConcurrentHashSet<DMLResponseHandler>();
}
@Override
public final BaseDMLHandler getNextHandler() {
return this.nextHandler;
}
@Override
public final void setNextHandler(DMLResponseHandler next) {
this.nextHandler = (BaseDMLHandler) next;
DMLResponseHandler toAddMergesHandler = next;
do {
toAddMergesHandler.getMerges().addAll(this.getMerges());
toAddMergesHandler = toAddMergesHandler.getNextHandler();
} while (toAddMergesHandler != null);
}
@Override
public void setLeft(boolean isLeft) {
this.isLeft = isLeft;
}
@Override
public final Set<DMLResponseHandler> getMerges() {
return this.merges;
}
public boolean isAllPushDown() {
return allPushDown;
}
public void setAllPushDown(boolean allPushDown) {
this.allPushDown = allPushDown;
}
@Override
public final void terminate() {
if (terminate.compareAndSet(false, true)) {
try {
onTerminate();
} catch (Exception e) {
logger.warn("handler terminate exception:", e);
}
}
}
protected abstract void onTerminate() throws Exception;
@Override
public void connectionError(Throwable e, BackendConnection conn) {
// TODO Auto-generated method stub
}
@Override
public void errorResponse(byte[] err, BackendConnection conn) {
nextHandler.errorResponse(err, conn);
}
@Override
public void okResponse(byte[] ok, BackendConnection conn) {
}
@Override
public void relayPacketResponse(byte[] relayPacket, BackendConnection conn) {
// TODO Auto-generated method stub
}
@Override
public void endPacketResponse(byte[] endPacket, BackendConnection conn) {
// TODO Auto-generated method stub
}
@Override
public void writeQueueAvailable() {
// TODO Auto-generated method stub
}
@Override
public void connectionClose(BackendConnection conn, String reason) {
// TODO Auto-generated method stub
}
@Override
public void connectionAcquired(BackendConnection conn) {
// TODO Auto-generated method stub
}
}

View File

@@ -0,0 +1,28 @@
package io.mycat.backend.mysql.nio.handler.query;
import java.util.Set;
import io.mycat.backend.mysql.nio.handler.ResponseHandler;
public interface DMLResponseHandler extends ResponseHandler {
public enum HandlerType {
DIRECT, TEMPTABLE, BASESEL, REFRESHFP, MERGE, JOIN, WHERE, GROUPBY, HAVING, ORDERBY, LIMIT, UNION, DISTINCT, SENDMAKER, FINAL
}
HandlerType type();
DMLResponseHandler getNextHandler();
void setNextHandler(DMLResponseHandler next);
Set<DMLResponseHandler> getMerges();
boolean isAllPushDown();
void setAllPushDown(boolean allPushDown);
void setLeft(boolean isLeft);
void terminate();
}

View File

@@ -0,0 +1,71 @@
package io.mycat.backend.mysql.nio.handler.query;
import java.util.concurrent.atomic.AtomicBoolean;
import io.mycat.server.NonBlockingSession;
/**
* 拥有自己的thread的dmlhandler
*
* @author chenzifei
* @CreateTime 2014年11月27日
*/
public abstract class OwnThreadDMLHandler extends BaseDMLHandler {
/* 当前是否需要结束ownthreadownthread运行中时为true */
private AtomicBoolean ownJobFlag;
private Object ownThreadLock = new Object();
private boolean preparedToRecycle;
public OwnThreadDMLHandler(long id, NonBlockingSession session) {
super(id, session);
this.ownJobFlag = new AtomicBoolean(false);
this.preparedToRecycle = false;
}
@Override
public final void onTerminate() throws Exception {
if (ownJobFlag.compareAndSet(false, true)) {
// thread未启动即进入了terminate
recycleResources();
} else {// thread已经启动
synchronized (ownThreadLock) {
if (!preparedToRecycle) { // 还未进入释放资源的地步
terminateThread();
}
}
}
}
/**
* @param objects
* 有可能会用到的参数
*/
protected final void startOwnThread(final Object... objects) {
session.getSource().getProcessor().getExecutor().execute(new Runnable() {
@Override
public void run() {
if (terminate.get())
return;
if (ownJobFlag.compareAndSet(false, true)) {
try {
ownThreadJob(objects);
} finally {
synchronized (ownThreadLock) {
preparedToRecycle = true;
}
recycleResources();
}
}
}
});
}
protected abstract void ownThreadJob(Object... objects);
/* 通过一些动作可以让running的thread终结 */
protected abstract void terminateThread() throws Exception;
/* 线程结束后需要执行的动作 */
protected abstract void recycleResources();
}

View File

@@ -0,0 +1,166 @@
package io.mycat.backend.mysql.nio.handler.query.impl;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.List;
import org.apache.log4j.Logger;
import io.mycat.MycatServer;
import io.mycat.backend.BackendConnection;
import io.mycat.backend.datasource.PhysicalDBNode;
import io.mycat.backend.mysql.nio.MySQLConnection;
import io.mycat.backend.mysql.nio.handler.query.BaseDMLHandler;
import io.mycat.config.MycatConfig;
import io.mycat.net.mysql.ErrorPacket;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.route.RouteResultsetNode;
import io.mycat.server.NonBlockingSession;
/**
* 仅用来执行Sql将接收到的数据转发到下一个handler
*
*/
public class BaseSelectHandler extends BaseDMLHandler {
private static final Logger logger = Logger.getLogger(BaseSelectHandler.class);
private final boolean autocommit;
private volatile int fieldCounts = -1;
private RouteResultsetNode rrss;
/**
* @param route
* @param autocommit
* @param orderBys
* @param session
*/
public BaseSelectHandler(long id, RouteResultsetNode rrss, boolean autocommit, NonBlockingSession session) {
super(id, session);
this.rrss = rrss;
this.autocommit = autocommit;
}
public MySQLConnection initConnection() throws Exception {
if (session.closed()) {
return null;
}
MySQLConnection exeConn = (MySQLConnection) session.getTarget(rrss);
if (session.tryExistsCon(exeConn, rrss)) {
return exeConn;
} else {
MycatConfig conf = MycatServer.getInstance().getConfig();
PhysicalDBNode dn = conf.getDataNodes().get(rrss.getName());
final BackendConnection newConn= dn.getConnection(dn.getDatabase(), session.getSource().isAutocommit());
session.bindConnection(rrss, newConn);
return (MySQLConnection)newConn;
}
}
public void execute(MySQLConnection conn) {
if (session.closed()) {
session.clearResources(true);
return;
}
conn.setResponseHandler(this);
if (logger.isInfoEnabled()) {
logger.info(conn.toString() + " send sql:" + rrss.getStatement());
}
if (session.closed()) {
session.onQueryError("failed or cancelled by other thread".getBytes());
return;
}
conn.execute(rrss, session.getSource(), autocommit);
}
@Override
public void okResponse(byte[] ok, BackendConnection conn) {
conn.syncAndExcute();
}
@Override
public void fieldEofResponse(byte[] header, List<byte[]> fields, List<FieldPacket> fieldPacketsNull, byte[] eof,
boolean isLeft, BackendConnection conn) {
if (logger.isDebugEnabled()) {
logger.debug(conn.toString() + "'s field is reached.");
}
if (terminate.get()) {
return;
}
if (fieldCounts == -1) {
fieldCounts = fields.size();
}
List<FieldPacket> fieldPackets = new ArrayList<FieldPacket>();
for (int i = 0; i < fields.size(); i++) {
FieldPacket field = new FieldPacket();
field.read(fields.get(i));
fieldPackets.add(field);
}
nextHandler.fieldEofResponse(null, null, fieldPackets, null, this.isLeft, conn);
}
@Override
public boolean rowResponse(byte[] row, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
if (terminate.get())
return true;
RowDataPacket rp = new RowDataPacket(fieldCounts);
rp.read(row);
nextHandler.rowResponse(null, rp, this.isLeft, conn);
return false;
}
@Override
public void rowEofResponse(byte[] data, boolean isLeft, BackendConnection conn) {
if (logger.isDebugEnabled()) {
logger.debug(conn.toString() + " 's rowEof is reached.");
}
((MySQLConnection)conn).setRunning(false);
if (this.terminate.get())
return;
nextHandler.rowEofResponse(data, this.isLeft, conn);
}
/**
* 1、if some connection's thread status is await. 2、if some connection's
* thread status is running.
*/
@Override
public void connectionError(Throwable e, BackendConnection conn) {
if (terminate.get())
return;
logger.warn(
new StringBuilder().append(conn.toString()).append("|connectionError()|").append(e.getMessage()).toString());
session.onQueryError(e.getMessage().getBytes());
}
@Override
public void errorResponse(byte[] err, BackendConnection conn) {
((MySQLConnection)conn).setRunning(false);
ErrorPacket errPacket = new ErrorPacket();
errPacket.read(err);
String errMsg;
try {
errMsg = new String(errPacket.message,conn.getCharset());
} catch (UnsupportedEncodingException e) {
errMsg ="UnsupportedEncodingException:"+conn.getCharset();
}
logger.warn(conn.toString() + errMsg);
if (terminate.get())
return;
session.onQueryError(errMsg.getBytes());
}
@Override
protected void onTerminate() {
this.session.releaseConnections(false);
}
@Override
public HandlerType type() {
return HandlerType.BASESEL;
}
}

View File

@@ -0,0 +1,110 @@
package io.mycat.backend.mysql.nio.handler.query.impl;
import java.util.ArrayList;
import java.util.List;
import org.apache.log4j.Logger;
import io.mycat.MycatServer;
import io.mycat.backend.BackendConnection;
import io.mycat.backend.mysql.nio.handler.query.BaseDMLHandler;
import io.mycat.backend.mysql.nio.handler.util.HandlerTool;
import io.mycat.backend.mysql.nio.handler.util.RowDataComparator;
import io.mycat.backend.mysql.store.DistinctLocalResult;
import io.mycat.backend.mysql.store.LocalResult;
import io.mycat.buffer.BufferPool;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.plan.Order;
import io.mycat.plan.common.field.Field;
import io.mycat.plan.common.item.Item;
import io.mycat.server.NonBlockingSession;
public class DistinctHandler extends BaseDMLHandler {
private static final Logger logger = Logger.getLogger(DistinctHandler.class);
private List<Field> sourceFields;
private LocalResult localResult;
private RowDataComparator cmptor;
private List<Order> fixedOrders;
private BufferPool pool;
/* if distincts is null, distinct the total row */
private List<Item> distincts;
public DistinctHandler(long id, NonBlockingSession session, List<Item> columns) {
this(id, session, columns, null);
}
public DistinctHandler(long id, NonBlockingSession session, List<Item> columns, List<Order> fixedOrders) {
super(id, session);
this.distincts = columns;
this.fixedOrders = fixedOrders;
}
@Override
public HandlerType type() {
return HandlerType.DISTINCT;
}
/**
* 所有的上一级表传递过来的信息全部视作Field类型
*/
public void fieldEofResponse(byte[] headernull, List<byte[]> fieldsnull, final List<FieldPacket> fieldPackets,
byte[] eofnull, boolean isLeft, BackendConnection conn) {
if (terminate.get())
return;
if (this.pool == null)
this.pool = MycatServer.getInstance().getBufferPool();
this.fieldPackets = fieldPackets;
this.sourceFields = HandlerTool.createFields(this.fieldPackets);
if (this.distincts == null) {
// 比如show tables这种语句
this.distincts = new ArrayList<Item>();
for (FieldPacket fp : this.fieldPackets) {
Item sel = HandlerTool.createItemField(fp);
this.distincts.add(sel);
}
}
List<Order> orders = this.fixedOrders;
if (orders == null)
orders = HandlerTool.makeOrder(this.distincts);
cmptor = new RowDataComparator(this.fieldPackets, orders, this.isAllPushDown(), type(), conn.getCharset());
localResult = new DistinctLocalResult(pool, this.sourceFields.size(), cmptor, conn.getCharset())
.setMemSizeController(session.getOtherBufferMC());
nextHandler.fieldEofResponse(null, null, this.fieldPackets, null, this.isLeft, conn);
}
/**
* 收到行数据包的响应处理
*/
public boolean rowResponse(byte[] rownull, final RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
if (terminate.get())
return true;
localResult.add(rowPacket);
return false;
}
@Override
public void rowEofResponse(byte[] data, boolean isLeft, BackendConnection conn) {
logger.debug("roweof");
if (terminate.get())
return;
sendDistinctRowPacket(conn);
nextHandler.rowEofResponse(null, isLeft, conn);
}
private void sendDistinctRowPacket(BackendConnection conn) {
localResult.done();
RowDataPacket row = null;
while ((row = localResult.next()) != null) {
nextHandler.rowResponse(null, row, this.isLeft, conn);
}
}
@Override
public void onTerminate() {
if (this.localResult != null)
this.localResult.close();
}
}

View File

@@ -0,0 +1,91 @@
package io.mycat.backend.mysql.nio.handler.query.impl;
import java.util.List;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.log4j.Logger;
import io.mycat.backend.BackendConnection;
import io.mycat.backend.mysql.nio.handler.query.BaseDMLHandler;
import io.mycat.backend.mysql.nio.handler.util.HandlerTool;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.plan.common.field.Field;
import io.mycat.plan.common.item.Item;
import io.mycat.server.NonBlockingSession;
/**
* 目前having做成和where一样的处理
*
* @author chenzifei
*
*/
public class HavingHandler extends BaseDMLHandler {
private static final Logger logger = Logger.getLogger(HavingHandler.class);
public HavingHandler(long id, NonBlockingSession session, Item having) {
super(id, session);
assert (having != null);
this.having = having;
}
private Item having = null;
private Item havingItem = null;
private List<Field> sourceFields;
private ReentrantLock lock = new ReentrantLock();
@Override
public HandlerType type() {
return HandlerType.HAVING;
}
/**
* 所有的上一级表传递过来的信息全部视作Field类型
*/
public void fieldEofResponse(byte[] headernull, List<byte[]> fieldsnull, final List<FieldPacket> fieldPackets,
byte[] eofnull, boolean isLeft, BackendConnection conn) {
if (terminate.get())
return;
this.fieldPackets = fieldPackets;
this.sourceFields = HandlerTool.createFields(this.fieldPackets);
/**
* having的函数我们基本算他不下发因为他有可能带group by
*/
this.havingItem = HandlerTool.createItem(this.having, this.sourceFields, 0, false, this.type(),
conn.getCharset());
nextHandler.fieldEofResponse(null, null, this.fieldPackets, null, this.isLeft, conn);
}
/**
* 收到行数据包的响应处理
*/
public boolean rowResponse(byte[] rownull, final RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
if (terminate.get())
return true;
lock.lock();
try {
HandlerTool.initFields(this.sourceFields, rowPacket.fieldValues);
/* 根据where条件进行过滤 */
if (havingItem.valBool()) {
nextHandler.rowResponse(null, rowPacket, this.isLeft, conn);
} else {
// nothing
}
return false;
} finally {
lock.unlock();
}
}
@Override
public void rowEofResponse(byte[] data, boolean isLeft, BackendConnection conn) {
logger.debug("roweof");
if (terminate.get())
return;
nextHandler.rowEofResponse(data, isLeft, conn);
}
@Override
public void onTerminate() {
}
}

View File

@@ -0,0 +1,71 @@
package io.mycat.backend.mysql.nio.handler.query.impl;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.log4j.Logger;
import io.mycat.backend.BackendConnection;
import io.mycat.backend.mysql.nio.handler.query.BaseDMLHandler;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.server.NonBlockingSession;
/**
* 用来处理limit仅作简单的统计过滤
*
* @author chenzifei
*
*/
public class LimitHandler extends BaseDMLHandler {
private static final Logger logger = Logger.getLogger(LimitHandler.class);
private long limitIndex;
private final long limitCount;
// current index
private AtomicLong curIndex = new AtomicLong(-1L);
public LimitHandler(long id, NonBlockingSession session, long limitIndex, long limitCount) {
super(id, session);
this.limitIndex = limitIndex;
this.limitCount = limitCount;
}
@Override
public void rowEofResponse(byte[] data, boolean isLeft, BackendConnection conn) {
logger.debug("row eof");
if (!terminate.get()) {
nextHandler.rowEofResponse(data, this.isLeft, conn);
}
}
@Override
public HandlerType type() {
return HandlerType.LIMIT;
}
@Override
public void fieldEofResponse(byte[] headernull, List<byte[]> fieldsnull, List<FieldPacket> fieldPackets,
byte[] eofnull, boolean isLeft, BackendConnection conn) {
nextHandler.fieldEofResponse(null, null, fieldPackets, null, this.isLeft, conn);
}
@Override
public boolean rowResponse(byte[] rownull, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
if (terminate.get()) {
return true;
}
long curIndexTmp = curIndex.incrementAndGet();
if (curIndexTmp < limitIndex) {
return false;
} else if (curIndexTmp >= limitIndex && curIndexTmp < limitIndex + limitCount) {
nextHandler.rowResponse(null, rowPacket, this.isLeft, conn);
} else {
return true;
}
return false;
}
@Override
protected void onTerminate() {
}
}

View File

@@ -0,0 +1,329 @@
package io.mycat.backend.mysql.nio.handler.query.impl;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.log4j.Logger;
import io.mycat.MycatServer;
import io.mycat.backend.BackendConnection;
import io.mycat.backend.mysql.nio.MySQLConnection;
import io.mycat.backend.mysql.nio.handler.query.DMLResponseHandler;
import io.mycat.backend.mysql.nio.handler.query.OwnThreadDMLHandler;
import io.mycat.backend.mysql.nio.handler.util.ArrayMinHeap;
import io.mycat.backend.mysql.nio.handler.util.HeapItem;
import io.mycat.backend.mysql.nio.handler.util.RowDataComparator;
import io.mycat.config.ErrorCode;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.plan.Order;
import io.mycat.plan.common.exception.MySQLOutPutException;
import io.mycat.route.RouteResultsetNode;
import io.mycat.server.NonBlockingSession;
/**
* mergeHandler仅负责将从数据库采集回来的数据进行merge如果有聚合函数的话使用group byhandler进行处理
*
* @author chenzifei
*
*/
public class MultiNodeMergeHandler extends OwnThreadDMLHandler {
private static final Logger logger = Logger.getLogger(MultiNodeMergeHandler.class);
private final int queueSize;
private final ReentrantLock lock;
private List<BaseSelectHandler> exeHandlers;
// 对应MultiSource的row结果的blockingquene,if rowend, add NullHeapItem into queue;
private Map<MySQLConnection, BlockingQueue<HeapItem>> queues;
private List<Order> orderBys;
private RowDataComparator rowComparator;
private RouteResultsetNode[] route;
private int reachedConCount;
private boolean isEasyMerge;
public MultiNodeMergeHandler(long id, RouteResultsetNode[] route, boolean autocommit, NonBlockingSession session,
List<Order> orderBys) {
super(id, session);
this.exeHandlers = new ArrayList<BaseSelectHandler>();
this.lock = new ReentrantLock();
if (route.length == 0)
throw new MySQLOutPutException(ErrorCode.ER_QUERYHANDLER, "", "can not execute empty rrss!");
for (RouteResultsetNode rrss : route) {
BaseSelectHandler exeHandler = new BaseSelectHandler(id, rrss, autocommit, session);
exeHandler.setNextHandler(this);
this.exeHandlers.add(exeHandler);
}
this.route = route;
this.orderBys = orderBys;
this.queueSize = MycatServer.getInstance().getConfig().getSystem().getMergeQueueSize();
if (route.length == 1 || (orderBys == null || orderBys.size() == 0)) {
this.isEasyMerge = true;
} else {
this.isEasyMerge = false;
}
this.queues = new ConcurrentHashMap<MySQLConnection, BlockingQueue<HeapItem>>();
this.merges.add(this);
}
// /**
// * @param route
// * @param autocommit
// * @param orderBys
// * @param session
// */
// public MultiNodeMergeHandler(long id, RouteResultsetNode[] route, boolean autocommit, NonBlockingSession session,
// List<Order> orderBys,List<String> colTables) {
// super(id, session);
// this.exeHandlers = new ArrayList<BaseSelectHandler>();
// this.lock = new ReentrantLock();
// if (route.length == 0)
// throw new MySQLOutPutException(ErrorCode.ER_QUERYHANDLER, "", "can not execute empty rrss!");
// for (RouteResultsetNode rrss : route) {
// BaseSelectHandler exeHandler = new BaseSelectHandler(id, rrss, autocommit, session);
// exeHandler.setNextHandler(this);
// this.exeHandlers.add(exeHandler);
// }
// this.route = route;
// this.orderBys = orderBys;
// this.queueSize = ProxyServer.getInstance().getConfig().getSystem().getMergeQueueSize();
// if (route.length == 1 || (orderBys == null || orderBys.size() == 0)) {
// this.isEasyMerge = true;
// } else {
// this.isEasyMerge = false;
// }
// this.queues = new ConcurrentHashMap<MySQLConnection, BlockingQueue<HeapItem>>();
// this.merges.add(this);
// }
public RouteResultsetNode[] getRouteSources() {
return this.route;
}
public void execute() throws Exception {
synchronized (exeHandlers) {
if (terminate.get())
return;
for (BaseSelectHandler exeHandler : exeHandlers) {
MySQLConnection exeConn = exeHandler.initConnection();
if (exeConn != null) {
BlockingQueue<HeapItem> queue = new LinkedBlockingQueue<HeapItem>(queueSize);
queues.put(exeConn, queue);
exeHandler.execute(exeConn);
}
}
}
}
@Override
public void fieldEofResponse(byte[] header, List<byte[]> fields, List<FieldPacket> fieldPackets, byte[] eof,
boolean isLeft, BackendConnection conn) {
if (logger.isInfoEnabled()) {
logger.info(conn.toString() + "'s field is reached.");
}
// 保证连接及时中断
if (terminate.get()) {
return;
}
lock.lock(); // for combine
try {
if (this.fieldPackets.isEmpty()) {
this.fieldPackets = fieldPackets;
rowComparator = makeRowDataSorter((MySQLConnection)conn);
nextHandler.fieldEofResponse(null, null, fieldPackets, null, this.isLeft, conn);
}
if (!isEasyMerge) {
if (++reachedConCount == route.length) {
startOwnThread();
}
}
} finally {
lock.unlock();
}
}
@Override
public boolean rowResponse(byte[] row, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
if (terminate.get())
return true;
if (isEasyMerge) {
nextHandler.rowResponse(null, rowPacket, this.isLeft, conn);
} else {
BlockingQueue<HeapItem> queue = queues.get(conn);
if (queue == null)
return true;
HeapItem item = new HeapItem(row, rowPacket, (MySQLConnection)conn);
try {
queue.put(item);
} catch (InterruptedException e) {
}
}
return false;
}
@Override
public void rowEofResponse(byte[] data, boolean isLeft, BackendConnection conn) {
if (logger.isInfoEnabled()) {
logger.info(conn.toString() + " 's rowEof is reached.");
}
((MySQLConnection)conn).setRunning(false);
if (this.terminate.get())
return;
if (isEasyMerge) {
lock.lock();
try {
if (++reachedConCount == route.length)
nextHandler.rowEofResponse(null, this.isLeft, conn);
} finally {
lock.unlock();
}
} else {
BlockingQueue<HeapItem> queue = queues.get(conn);
if (queue == null)
return;
try {
queue.put(HeapItem.NULLITEM());
} catch (InterruptedException e) {
}
}
}
@Override
protected void ownThreadJob(Object... objects) {
try {
ArrayMinHeap<HeapItem> heap = new ArrayMinHeap<HeapItem>(new Comparator<HeapItem>() {
@Override
public int compare(HeapItem o1, HeapItem o2) {
RowDataPacket row1 = o1.getRowPacket();
RowDataPacket row2 = o2.getRowPacket();
if (row1 == null || row2 == null) {
if (row1 == row2)
return 0;
if (row1 == null)
return -1;
return 1;
}
return rowComparator.compare(row1, row2);
}
});
// init heap
for (MySQLConnection conn : queues.keySet()) {
HeapItem firstItem = queues.get(conn).take();
heap.add(firstItem);
}
while (!heap.isEmpty()) {
if (terminate.get())
return;
HeapItem top = heap.peak();
if (top.IsNullItem()) {
heap.poll();
} else {
BlockingQueue<HeapItem> topitemQueue = queues.get(top.getIndex());
HeapItem item = topitemQueue.take();
heap.replaceTop(item);
if (nextHandler.rowResponse(top.getRowData(), top.getRowPacket(), this.isLeft, top.getIndex())) {
// should still send eof,so could not return
break;
}
}
}
if (logger.isInfoEnabled()) {
String executeSqls = getRoutesSql(route);
logger.info(executeSqls + " heap send eof: ");
}
nextHandler.rowEofResponse(null, this.isLeft, queues.keySet().iterator().next());
} catch (Exception e) {
String msg = "Merge thread error, " + e.getLocalizedMessage();
logger.warn(msg, e);
session.onQueryError(msg.getBytes());
}
}
@Override
protected void terminateThread() throws Exception {
Iterator<Entry<MySQLConnection, BlockingQueue<HeapItem>>> iter = this.queues.entrySet().iterator();
while (iter.hasNext()) {
Entry<MySQLConnection, BlockingQueue<HeapItem>> entry = iter.next();
// add EOF to signal atoMerge thread
entry.getValue().clear();
entry.getValue().put(new HeapItem(null, null, entry.getKey()));
}
}
@Override
protected void recycleResources() {
synchronized (exeHandlers) {
for (BaseSelectHandler exeHandler : exeHandlers) {
terminatePreHandler(exeHandler);
}
}
Iterator<Entry<MySQLConnection, BlockingQueue<HeapItem>>> iter = this.queues.entrySet().iterator();
while (iter.hasNext()) {
Entry<MySQLConnection, BlockingQueue<HeapItem>> entry = iter.next();
// fair lock queue,poll for clear
while (entry.getValue().poll() != null)
;
iter.remove();
}
}
/**
* terminate前置handler
*
* @param handler
*/
private void terminatePreHandler(DMLResponseHandler handler) {
DMLResponseHandler current = handler;
while (current != null) {
if (current == this)
break;
current.terminate();
current = current.getNextHandler();
}
}
private RowDataComparator makeRowDataSorter(MySQLConnection conn) {
if (!isEasyMerge)
return new RowDataComparator(this.fieldPackets, orderBys, this.isAllPushDown(), this.type(),
conn.getCharset());
return null;
}
@Override
public HandlerType type() {
return HandlerType.MERGE;
}
private String getRoutesSql(RouteResultsetNode[] route) {
StringBuilder sb = new StringBuilder();
sb.append('{');
Map<String, List<RouteResultsetNode>> sqlMap = new HashMap<String, List<RouteResultsetNode>>();
for (RouteResultsetNode rrss : route) {
String sql = rrss.getStatement();
if (!sqlMap.containsKey(sql)) {
List<RouteResultsetNode> rrssList = new ArrayList<RouteResultsetNode>();
rrssList.add(rrss);
sqlMap.put(sql, rrssList);
} else {
List<RouteResultsetNode> rrssList = sqlMap.get(sql);
rrssList.add(rrss);
}
}
for (String sql : sqlMap.keySet()) {
sb.append(sql).append(sqlMap.get(sql)).append(';');
}
sb.append('}');
return sb.toString();
}
}

View File

@@ -0,0 +1,146 @@
package io.mycat.backend.mysql.nio.handler.query.impl;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingDeque;
import org.apache.log4j.Logger;
import io.mycat.MycatServer;
import io.mycat.backend.BackendConnection;
import io.mycat.backend.mysql.nio.MySQLConnection;
import io.mycat.backend.mysql.nio.handler.query.OwnThreadDMLHandler;
import io.mycat.backend.mysql.nio.handler.util.RowDataComparator;
import io.mycat.backend.mysql.store.LocalResult;
import io.mycat.backend.mysql.store.SortedLocalResult;
import io.mycat.buffer.BufferPool;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.plan.Order;
import io.mycat.server.NonBlockingSession;
import io.mycat.util.TimeUtil;
public class OrderByHandler extends OwnThreadDMLHandler {
private static final Logger logger = Logger.getLogger(OrderByHandler.class);
private List<Order> orders;
private RowDataComparator cmp = null;
private BlockingQueue<RowDataPacket> queue;
/* 排序对象,支持缓存、文件系统 */
private LocalResult localResult;
private BufferPool pool;
private int queueSize;
public OrderByHandler(long id, NonBlockingSession session, List<Order> orders) {
super(id, session);
this.orders = orders;
this.queueSize = MycatServer.getInstance().getConfig().getSystem().getOrderByQueueSize();
this.queue = new LinkedBlockingDeque<RowDataPacket>(queueSize);
}
@Override
public HandlerType type() {
return HandlerType.ORDERBY;
}
@Override
public void fieldEofResponse(byte[] headernull, List<byte[]> fieldsnull, final List<FieldPacket> fieldPackets,
byte[] eofnull, boolean isLeft, final BackendConnection conn) {
if (terminate.get())
return;
if (this.pool == null)
this.pool = MycatServer.getInstance().getBufferPool();
this.fieldPackets = fieldPackets;
cmp = new RowDataComparator(this.fieldPackets, orders, isAllPushDown(), type(), conn.getCharset());
localResult = new SortedLocalResult(pool, fieldPackets.size(), cmp, conn.getCharset())
.setMemSizeController(session.getOrderBufferMC());
nextHandler.fieldEofResponse(null, null, fieldPackets, null, this.isLeft, conn);
startOwnThread(conn);
}
@Override
public boolean rowResponse(byte[] rownull, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
if (terminate.get())
return true;
try {
queue.put(rowPacket);
} catch (InterruptedException e) {
return true;
}
return false;
}
@Override
public void rowEofResponse(byte[] data, boolean isLeft, BackendConnection conn) {
logger.debug("roweof");
if (terminate.get())
return;
try {
queue.put(new RowDataPacket(0));
} catch (InterruptedException e) {
}
}
@Override
protected void ownThreadJob(Object... objects) {
MySQLConnection conn = (MySQLConnection) objects[0];
recordElapsedTime("order write start :");
try {
while (true) {
if (terminate.get()) {
return;
}
RowDataPacket row = null;
try {
row = queue.take();
} catch (InterruptedException e) {
}
if (row.fieldCount == 0) {
break;
}
localResult.add(row);
}
recordElapsedTime("order write end :");
localResult.done();
recordElapsedTime("order read start :");
while (true) {
if (terminate.get()) {
return;
}
RowDataPacket row = localResult.next();
if (row == null) {
break;
}
if (nextHandler.rowResponse(null, row, this.isLeft, conn))
break;
}
recordElapsedTime("order read end:");
nextHandler.rowEofResponse(null, this.isLeft, conn);
} catch (Exception e) {
String msg = "OrderBy thread error, " + e.getLocalizedMessage();
logger.warn(msg, e);
session.onQueryError(msg.getBytes());
}
}
private void recordElapsedTime(String prefix) {
if (logger.isInfoEnabled()) {
logger.info(prefix + TimeUtil.currentTimeMillis());
}
}
@Override
protected void terminateThread() throws Exception {
this.queue.clear();
this.queue.add(new RowDataPacket(0));
}
@Override
protected void recycleResources() {
this.queue.clear();
if (this.localResult != null)
this.localResult.close();
}
}

View File

@@ -0,0 +1,257 @@
package io.mycat.backend.mysql.nio.handler.query.impl;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.log4j.Logger;
import io.mycat.backend.BackendConnection;
import io.mycat.backend.mysql.nio.handler.query.BaseDMLHandler;
import io.mycat.backend.mysql.nio.handler.util.HandlerTool;
import io.mycat.config.ErrorCode;
import io.mycat.net.mysql.BinaryRowDataPacket;
import io.mycat.net.mysql.EOFPacket;
import io.mycat.net.mysql.ErrorPacket;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.OkPacket;
import io.mycat.net.mysql.ResultSetHeaderPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.net.mysql.StatusFlags;
import io.mycat.server.NonBlockingSession;
import io.mycat.server.ServerConnection;
/*
* 最终将数据返回给用户的hander处理
*/
public class OutputHandler extends BaseDMLHandler {
private static Logger logger = Logger.getLogger(OutputHandler.class);
/**
* 回收资源和其他的response方法有可能同步
*/
protected final ReentrantLock lock;
private byte packetId;
private NonBlockingSession session;
private ByteBuffer buffer;
private boolean isBinary;
private boolean hasNext;
public OutputHandler(long id, NonBlockingSession session, boolean hasNext) {
super(id, session);
session.setOutputHandler(this);
this.lock = new ReentrantLock();
this.packetId = 0;
this.session = session;
this.hasNext = hasNext;
this.isBinary = session.isPrepared();
this.buffer = session.getSource().allocate();
}
@Override
public HandlerType type() {
return HandlerType.FINAL;
}
@Override
public void okResponse(byte[] ok, BackendConnection conn) {
OkPacket okPacket = new OkPacket();
okPacket.read(ok);
ServerConnection source = session.getSource();
lock.lock();
try {
ok[3] = ++packetId;
if ((okPacket.serverStatus & StatusFlags.SERVER_MORE_RESULTS_EXISTS) > 0) {
buffer = source.writeToBuffer(ok, buffer);
} else {
HandlerTool.terminateHandlerTree(this);
if (hasNext) {
okPacket.serverStatus |= StatusFlags.SERVER_MORE_RESULTS_EXISTS;
}
buffer = source.writeToBuffer(ok, buffer);
if (hasNext) {
source.write(buffer);
// source.excuteNext(packetId, false);
} else {
// source.excuteNext(packetId, false);
source.write(buffer);
}
}
} finally {
lock.unlock();
}
}
@Override
public void errorResponse(byte[] err, BackendConnection conn) {
ErrorPacket errPacket = new ErrorPacket();
errPacket.read(err);
logger.warn(new StringBuilder().append(conn.toString()).append("|errorResponse()|").append(errPacket.message)
.toString());
lock.lock();
try {
buffer = session.getSource().writeToBuffer(err, buffer);
// session.getSource().excuteNext(packetId, true);
session.getSource().write(buffer);
} finally {
lock.unlock();
}
}
@Override
public void fieldEofResponse(byte[] headernull, List<byte[]> fieldsnull, List<FieldPacket> fieldPackets,
byte[] eofnull, boolean isLeft, BackendConnection conn) {
if (terminate.get()) {
return;
}
lock.lock();
try {
if (this.isBinary)
this.fieldPackets = fieldPackets;
ResultSetHeaderPacket hp = new ResultSetHeaderPacket();
hp.fieldCount = fieldPackets.size();
hp.packetId = ++packetId;
ServerConnection source = session.getSource();
buffer = hp.write(buffer, source, true);
for (FieldPacket fp : fieldPackets) {
fp.packetId = ++packetId;
buffer = fp.write(buffer, source, true);
}
EOFPacket ep = new EOFPacket();
ep.packetId = ++packetId;
buffer = ep.write(buffer, source, true);
} finally {
lock.unlock();
}
}
@Override
public boolean rowResponse(byte[] rownull, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
if (terminate.get()) {
return true;
}
lock.lock();
try {
byte[] row;
if (this.isBinary) {
BinaryRowDataPacket binRowPacket = new BinaryRowDataPacket();
binRowPacket.read(this.fieldPackets, rowPacket);
binRowPacket.packetId = ++packetId;
buffer = binRowPacket.write(buffer, session.getSource(), true);
} else {
if (rowPacket != null) {
rowPacket.packetId = ++packetId;
buffer = rowPacket.write(buffer, session.getSource(), true);
} else {
row = rownull;
row[3] = ++packetId;
buffer = session.getSource().writeToBuffer(row, buffer);
}
}
} finally {
lock.unlock();
}
return false;
}
@Override
public void rowEofResponse(byte[] data, boolean isLeft, BackendConnection conn) {
if (terminate.get()) {
return;
}
logger.info("--------sql execute end!");
ServerConnection source = session.getSource();
lock.lock();
try {
EOFPacket eofPacket = new EOFPacket();
if (data != null) {
eofPacket.read(data);
}
eofPacket.packetId = ++packetId;
if (hasNext) {
eofPacket.status |= StatusFlags.SERVER_MORE_RESULTS_EXISTS;
}
HandlerTool.terminateHandlerTree(this);
byte[] eof = eofPacket.toBytes();
buffer = source.writeToBuffer(eof, buffer);
if (hasNext) {
source.write(buffer);
// source.excuteNext(packetId, false);
} else {
// source.excuteNext(packetId, false);
source.write(buffer);
}
} finally {
lock.unlock();
}
}
@Override
public void relayPacketResponse(byte[] relayPacket, BackendConnection conn) {
lock.lock();
try {
buffer = session.getSource().writeToBuffer(relayPacket, buffer);
} finally {
lock.unlock();
}
}
@Override
public void endPacketResponse(byte[] endPacket, BackendConnection conn) {
lock.lock();
try {
buffer = session.getSource().writeToBuffer(endPacket, buffer);
session.getSource().write(buffer);
} finally {
lock.unlock();
}
}
public void backendConnError(byte[] errMsg) {
if (terminate.compareAndSet(false, true)) {
ErrorPacket err = new ErrorPacket();
err.errno = ErrorCode.ER_YES;
err.message = errMsg;
HandlerTool.terminateHandlerTree(this);
backendConnError(err);
}
}
protected void backendConnError(ErrorPacket error) {
lock.lock();
try {
recycleResources();
if (error == null) {
error = new ErrorPacket();
error.errno = ErrorCode.ER_YES;
error.message = "unknown error".getBytes();
}
error.packetId = ++packetId;
// session.getSource().excuteNext(packetId, true);
session.getSource().write(error.toBytes());
} finally {
lock.unlock();
}
}
private void recycleResources() {
if (buffer != null) {
if (buffer.position() > 0) {
session.getSource().write(buffer);
} else {
session.getSource().recycle(buffer);
buffer = null;
}
}
}
@Override
protected void onTerminate() {
if (this.isBinary) {
if (this.fieldPackets != null)
this.fieldPackets.clear();
}
}
}

View File

@@ -0,0 +1,126 @@
package io.mycat.backend.mysql.nio.handler.query.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.lang.StringUtils;
import io.mycat.backend.BackendConnection;
import io.mycat.backend.mysql.nio.handler.query.BaseDMLHandler;
import io.mycat.backend.mysql.nio.handler.util.HandlerTool;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.plan.common.field.Field;
import io.mycat.plan.common.item.FieldTypes;
import io.mycat.plan.common.item.Item;
import io.mycat.server.NonBlockingSession;
/**
* 如果Item是Item_sum,那么Item肯定已经在GroupBy中生成过了如果不是Item_sum,则有可能需要自己进行一次计算
*
*
*/
public class SendMakeHandler extends BaseDMLHandler {
private final ReentrantLock lock;
private List<Item> sels;
private List<Field> sourceFields;
private List<Item> selItems;
/* 表的别名 */
private String tbAlias;
/**
*
* @param session
* @param sels
* 用户最终需要的sel集合
*/
public SendMakeHandler(long id, NonBlockingSession session, List<Item> sels, String tableAlias) {
super(id, session);
lock = new ReentrantLock();
this.sels = sels;
this.selItems = new ArrayList<Item>();
this.tbAlias = tableAlias;
}
@Override
public HandlerType type() {
return HandlerType.SENDMAKER;
}
@Override
public void fieldEofResponse(byte[] headernull, List<byte[]> fieldsnull, List<FieldPacket> fieldPackets,
byte[] eofnull, boolean isLeft, BackendConnection conn) {
lock.lock();
try {
if (terminate.get())
return;
this.fieldPackets = fieldPackets;
this.sourceFields = HandlerTool.createFields(this.fieldPackets);
for (Item sel : sels) {
Item tmpItem = HandlerTool.createItem(sel, this.sourceFields, 0, isAllPushDown(), type(),
conn.getCharset());
tmpItem.setItemName(sel.getItemName());
if (sel.getAlias() != null || tbAlias != null) {
String selAlias = sel.getAlias();
// 由于添加了FNAF需要去掉
if (StringUtils.indexOf(selAlias, Item.FNAF) == 0)
selAlias = StringUtils.substring(selAlias, Item.FNAF.length());
tmpItem = HandlerTool.createRefItem(tmpItem, tbAlias, selAlias);
}
this.selItems.add(tmpItem);
}
List<FieldPacket> newFieldPackets = new ArrayList<FieldPacket>();
for (Item selItem : this.selItems) {
FieldPacket tmpFp = new FieldPacket();
selItem.makeField(tmpFp);
/* Keep things compatible for old clients */
if (tmpFp.type == FieldTypes.MYSQL_TYPE_VARCHAR.numberValue())
tmpFp.type = FieldTypes.MYSQL_TYPE_VAR_STRING.numberValue();
newFieldPackets.add(tmpFp);
}
nextHandler.fieldEofResponse(null, null, newFieldPackets, null, this.isLeft, conn);
} finally {
lock.unlock();
}
}
@Override
public boolean rowResponse(byte[] rownull, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
lock.lock();
try {
if (terminate.get())
return true;
HandlerTool.initFields(sourceFields, rowPacket.fieldValues);
RowDataPacket newRp = new RowDataPacket(selItems.size());
for (Item selItem : selItems) {
byte[] b = selItem.getRowPacketByte();
newRp.add(b);
}
nextHandler.rowResponse(null, newRp, this.isLeft, conn);
return false;
} finally {
lock.unlock();
}
}
@Override
public void rowEofResponse(byte[] eof, boolean isLeft, BackendConnection conn) {
lock.lock();
try {
if (terminate.get())
return;
nextHandler.rowEofResponse(eof, this.isLeft, conn);
} finally {
lock.unlock();
}
}
@Override
public void onTerminate() {
}
}

View File

@@ -0,0 +1,176 @@
package io.mycat.backend.mysql.nio.handler.query.impl;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.log4j.Logger;
import io.mycat.MycatServer;
import io.mycat.backend.BackendConnection;
import io.mycat.backend.mysql.nio.handler.query.BaseDMLHandler;
import io.mycat.backend.mysql.nio.handler.query.DMLResponseHandler;
import io.mycat.backend.mysql.nio.handler.util.CallBackHandler;
import io.mycat.backend.mysql.nio.handler.util.HandlerTool;
import io.mycat.backend.mysql.store.UnSortedLocalResult;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.plan.common.exception.TempTableException;
import io.mycat.plan.common.field.Field;
import io.mycat.plan.common.item.Item;
import io.mycat.plan.common.meta.TempTable;
import io.mycat.server.NonBlockingSession;
/**
* 将结果集生成到临时表中
*
*/
public class TempTableHandler extends BaseDMLHandler {
private static final Logger logger = Logger.getLogger(TempTableHandler.class);
private final ReentrantLock lock;
private final TempTable tempTable;
private int maxPartSize = 2000;
private int maxConnSize = 4;
private int rowCount = 0;
private CallBackHandler tempDoneCallBack;
// 由tempHandler生成的Handler还得由它来释放
private DMLResponseHandler createdHandler;
private int sourceSelIndex = -1;
private final Item sourceSel;
private Field sourceField;
private Set<String> valueSet;
public TempTableHandler(long id, NonBlockingSession session, Item sourceSel) {
super(id, session);
this.lock = new ReentrantLock();
this.tempTable = new TempTable();
this.maxPartSize = MycatServer.getInstance().getConfig().getSystem().getNestLoopRowsSize();
this.maxConnSize = MycatServer.getInstance().getConfig().getSystem().getNestLoopConnSize();
this.sourceSel = sourceSel;
this.valueSet = new HashSet<String>();
}
@Override
public void fieldEofResponse(byte[] headernull, List<byte[]> fieldsnull, List<FieldPacket> fieldPackets,
byte[] eofnull, boolean isLeft, BackendConnection conn) {
if (terminate.get()) {
return;
}
lock.lock();
try {
if (this.fieldPackets.isEmpty()) {
this.fieldPackets = fieldPackets;
tempTable.setFieldPackets(this.fieldPackets);
tempTable.setCharset(conn.getCharset());
tempTable.setRowsStore(new UnSortedLocalResult(fieldPackets.size(), MycatServer.getInstance().getBufferPool(),
conn.getCharset()).setMemSizeController(session.getOtherBufferMC()));
List<Field> fields = HandlerTool.createFields(this.fieldPackets);
sourceSelIndex = HandlerTool.findField(sourceSel, fields, 0);
if (sourceSelIndex < 0)
throw new TempTableException( "sourcesel ["+sourceSel.toString()+"] not found in fields" );
sourceField = fields.get(sourceSelIndex);
if (nextHandler != null) {
nextHandler.fieldEofResponse(headernull, fieldsnull, fieldPackets, eofnull, this.isLeft, conn);
} else {
throw new TempTableException("unexpected nextHandler is null");
}
}
} finally {
lock.unlock();
}
}
@Override
public boolean rowResponse(byte[] rownull, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
lock.lock();
try {
if (terminate.get()) {
return true;
}
if (++rowCount > maxPartSize * maxConnSize) {
String errMessage = "temptable of ["+conn.toString()+"] too much rows,[rows="+rowCount+"]!";
logger.warn(errMessage);
throw new TempTableException(errMessage);
}
RowDataPacket row = rowPacket;
if (row == null) {
row = new RowDataPacket(this.fieldPackets.size());
row.read(rownull);
}
tempTable.addRow(row);
sourceField.setPtr(row.getValue(sourceSelIndex));
valueSet.add(sourceField.valStr());
} finally {
lock.unlock();
}
return false;
}
@Override
public void rowEofResponse(byte[] eof, boolean isLeft, BackendConnection conn) {
lock.lock();
try {
// terminate之后仍然进行callBack操作
if (terminate.get()) {
return;
}
tempTable.dataEof();
// onTerminate加了锁避免了terminate的时候启动了
tempDoneCallBack.call();
RowDataPacket rp = null;
while ((rp = tempTable.nextRow()) != null) {
nextHandler.rowResponse(null, rp, this.isLeft, conn);
}
nextHandler.rowEofResponse(eof, this.isLeft, conn);
} catch (Exception e) {
logger.warn("rowEof exception!", e);
throw new TempTableException("rowEof exception!", e);
} finally {
lock.unlock();
}
}
@Override
protected void onTerminate() {
lock.lock();
try {
this.tempTable.close();
this.valueSet.clear();
if (createdHandler != null) {
HandlerTool.terminateHandlerTree(createdHandler);
}
} finally {
lock.unlock();
}
}
public TempTable getTempTable() {
return tempTable;
}
public void setTempDoneCallBack(CallBackHandler tempDoneCallBack) {
this.tempDoneCallBack = tempDoneCallBack;
}
public void setCreatedHandler(DMLResponseHandler createdHandler) {
this.createdHandler = createdHandler;
}
public Set<String> getValueSet() {
return valueSet;
}
public int getMaxPartSize() {
return maxPartSize;
}
@Override
public HandlerType type() {
return HandlerType.TEMPTABLE;
}
}

View File

@@ -0,0 +1,159 @@
package io.mycat.backend.mysql.nio.handler.query.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.log4j.Logger;
import io.mycat.backend.BackendConnection;
import io.mycat.backend.mysql.nio.handler.query.BaseDMLHandler;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.plan.common.field.FieldUtil;
import io.mycat.plan.common.item.FieldTypes;
import io.mycat.plan.common.item.Item;
import io.mycat.server.NonBlockingSession;
/**
* union all语句的handler如果是union语句的话则在handlerbuilder时
* 向unionallhandler后面添加distinctHandler
*
* @author chenzifei
*
*/
public class UnionHandler extends BaseDMLHandler {
private static final Logger logger = Logger.getLogger(UnionHandler.class);
public UnionHandler(long id, NonBlockingSession session, List<Item> sels, int nodecount) {
super(id, session);
this.sels = sels;
this.nodeCount = new AtomicInteger(nodecount);
this.nodeCountField = new AtomicInteger(nodecount);
}
/**
* 因为union有可能是多个表最终出去的节点仅按照第一个表的列名来
*/
private List<Item> sels;
private AtomicInteger nodeCount;
/* 供fieldeof使用的 */
private AtomicInteger nodeCountField;
private ReentrantLock lock = new ReentrantLock();
private Condition conFieldSend = lock.newCondition();
@Override
public HandlerType type() {
return HandlerType.UNION;
}
/**
* 所有的上一级表传递过来的信息全部视作Field类型
*/
public void fieldEofResponse(byte[] headernull, List<byte[]> fieldsnull, final List<FieldPacket> fieldPackets,
byte[] eofnull, boolean isLeft, BackendConnection conn) {
if (terminate.get())
return;
lock.lock();
try {
if (this.fieldPackets == null || this.fieldPackets.size() == 0) {
this.fieldPackets = fieldPackets;
} else {
this.fieldPackets = unionFieldPackets(this.fieldPackets, fieldPackets);
}
if (nodeCountField.decrementAndGet() == 0) {
// 将fieldpackets赋成正确的fieldname
checkFieldPackets();
nextHandler.fieldEofResponse(null, null, this.fieldPackets, null, this.isLeft, conn);
conFieldSend.signalAll();
} else {
conFieldSend.await();
}
} catch (Exception e) {
String msg = "Union field merge error, " + e.getLocalizedMessage();
logger.warn(msg, e);
conFieldSend.signalAll();
session.onQueryError(msg.getBytes());
} finally {
lock.unlock();
}
}
private void checkFieldPackets() {
for (int i = 0; i < sels.size(); i++) {
FieldPacket fp = this.fieldPackets.get(i);
Item sel = sels.get(i);
fp.name = sel.getItemName().getBytes();
// @fix: union语句没有表名只要列名相等即可
fp.table = null;
}
}
/**
* 将fieldpakcets和fieldpackets2进行merge比如说
* 一个int的列和一个double的列union完了之后结果是一个double的列
*
* @param fieldPackets
* @param fieldPackets2
*/
private List<FieldPacket> unionFieldPackets(List<FieldPacket> fieldPackets, List<FieldPacket> fieldPackets2) {
List<FieldPacket> newFps = new ArrayList<FieldPacket>();
for (int i = 0; i < fieldPackets.size(); i++) {
FieldPacket fp1 = fieldPackets.get(i);
FieldPacket fp2 = fieldPackets2.get(i);
FieldPacket newFp = unionFieldPacket(fp1, fp2);
newFps.add(newFp);
}
return newFps;
}
private FieldPacket unionFieldPacket(FieldPacket fp1, FieldPacket fp2) {
FieldPacket union = new FieldPacket();
union.catalog = fp1.catalog;
union.charsetIndex = fp1.charsetIndex;
union.db = fp1.db;
union.decimals = (byte) Math.max(fp1.decimals, fp2.decimals);
union.definition = fp1.definition;
union.flags = fp1.flags | fp2.flags;
union.length = Math.max(fp1.length, fp2.length);
FieldTypes field_type1 = FieldTypes.valueOf(fp1.type);
FieldTypes field_type2 = FieldTypes.valueOf(fp2.type);
FieldTypes merge_field_type = FieldUtil.field_type_merge(field_type1, field_type2);
union.type = merge_field_type.numberValue();
return union;
}
/**
* 收到行数据包的响应处理这里需要等上面的field都merge完了才可以发送
*/
public boolean rowResponse(byte[] rownull, final RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
if (terminate.get())
return true;
nextHandler.rowResponse(null, rowPacket, this.isLeft, conn);
return false;
}
/**
* 收到行数据包结束的响应处理
*/
public void rowEofResponse(byte[] data, boolean isLeft, BackendConnection conn) {
if (terminate.get())
return;
if (nodeCount.decrementAndGet() == 0) {
nextHandler.rowEofResponse(data, this.isLeft, conn);
}
}
@Override
public void onTerminate() {
lock.lock();
try {
this.conFieldSend.signalAll();
} finally {
lock.unlock();
}
}
}

View File

@@ -0,0 +1,83 @@
package io.mycat.backend.mysql.nio.handler.query.impl;
import java.util.List;
import java.util.concurrent.locks.ReentrantLock;
import io.mycat.backend.BackendConnection;
import io.mycat.backend.mysql.nio.handler.query.BaseDMLHandler;
import io.mycat.backend.mysql.nio.handler.util.HandlerTool;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.plan.common.field.Field;
import io.mycat.plan.common.item.Item;
import io.mycat.server.NonBlockingSession;
public class WhereHandler extends BaseDMLHandler {
public WhereHandler(long id, NonBlockingSession session, Item where) {
super(id, session);
assert (where != null);
this.where = where;
}
private Item where = null;
private Item whereItem = null;
private List<Field> sourceFields;
// 因为merge在没有order by时会存在多线程并发rowresponse
private ReentrantLock lock = new ReentrantLock();
@Override
public HandlerType type() {
return HandlerType.WHERE;
}
/**
* 所有的上一级表传递过来的信息全部视作Field类型
*/
public void fieldEofResponse(byte[] headernull, List<byte[]> fieldsnull, final List<FieldPacket> fieldPackets,
byte[] eofnull, boolean isLeft, BackendConnection conn) {
if (terminate.get())
return;
this.fieldPackets = fieldPackets;
this.sourceFields = HandlerTool.createFields(this.fieldPackets);
whereItem = HandlerTool.createItem(this.where, this.sourceFields, 0, this.isAllPushDown(), this.type(),
conn.getCharset());
nextHandler.fieldEofResponse(null, null, this.fieldPackets, null, this.isLeft, conn);
}
/**
* 收到行数据包的响应处理
*/
public boolean rowResponse(byte[] rownull, final RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
if (terminate.get())
return true;
lock.lock();
try {
HandlerTool.initFields(this.sourceFields, rowPacket.fieldValues);
/* 根据where条件进行过滤 */
if (whereItem.valBool()) {
nextHandler.rowResponse(null, rowPacket, this.isLeft, conn);
} else {
// nothing
}
return false;
} finally {
lock.unlock();
}
}
/**
* 收到行数据包结束的响应处理
*/
public void rowEofResponse(byte[] data, boolean isLeft, BackendConnection conn) {
if (terminate.get())
return;
nextHandler.rowEofResponse(data, isLeft, conn);
}
@Override
public void onTerminate() {
}
}

View File

@@ -0,0 +1,338 @@
package io.mycat.backend.mysql.nio.handler.query.impl.groupby;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.log4j.Logger;
import io.mycat.MycatServer;
import io.mycat.backend.BackendConnection;
import io.mycat.backend.mysql.nio.MySQLConnection;
import io.mycat.backend.mysql.nio.handler.query.OwnThreadDMLHandler;
import io.mycat.backend.mysql.nio.handler.query.impl.groupby.directgroupby.DGRowPacket;
import io.mycat.backend.mysql.nio.handler.query.impl.groupby.directgroupby.GroupByBucket;
import io.mycat.backend.mysql.nio.handler.util.HandlerTool;
import io.mycat.backend.mysql.nio.handler.util.RowDataComparator;
import io.mycat.backend.mysql.store.GroupByLocalResult;
import io.mycat.backend.mysql.store.LocalResult;
import io.mycat.buffer.BufferPool;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.plan.Order;
import io.mycat.plan.common.field.Field;
import io.mycat.plan.common.item.Item;
import io.mycat.plan.common.item.function.sumfunc.Aggregator;
import io.mycat.plan.common.item.function.sumfunc.ItemSum;
import io.mycat.server.NonBlockingSession;
import io.mycat.util.TimeUtil;
/**
* groupBy的前提是已经经过了OrderBy
* 通过groupbylocalresult直接进行groupby计算在localresult计算中优先进行内存内部的groupby计算然后再在
* 内存中再次进行group by计算 这种计算不支持的情况如下: 1.sum函数存在distinct约束 2.sum函数存在groupconcat类的函数
*
* @author chenzifei
*
*/
public class DirectGroupByHandler extends OwnThreadDMLHandler {
private static final Logger logger = Logger.getLogger(DirectGroupByHandler.class);
private BlockingQueue<RowDataPacket> queue;
/* 接收到的参数 */
private List<Order> groupBys;
private List<ItemSum> referedSumFunctions;
private RowDataComparator cmptor;
private BufferPool pool;
private LocalResult groupLocalResult;
private AtomicBoolean groupStart = new AtomicBoolean(false);
/* 所有的sum函数集合 */
private List<Field> sourceFields = new ArrayList<Field>();
private List<ItemSum> sums = new ArrayList<ItemSum>();
private AtomicBoolean hasFirstRow = new AtomicBoolean(false);
/* 下发到localresult中的ISelectable */
private List<ItemSum> localResultReferedSums;
/* 下发到localresult中的fieldPackets */
private List<FieldPacket> localResultFps;
private int queueSize;
private BlockingQueue<RowDataPacket> outQueue;
int bucketSize = 10;
private List<GroupByBucket> buckets;
/**
*
* @param groupBys
* @param refers
* 涉及到的所有的sumfunction集合
*/
public DirectGroupByHandler(long id, NonBlockingSession session, List<Order> groupBys,
List<ItemSum> referedSumFunctions) {
super(id, session);
this.groupBys = groupBys;
this.referedSumFunctions = referedSumFunctions;
this.queueSize = MycatServer.getInstance().getConfig().getSystem().getMergeQueueSize();
this.queue = new LinkedBlockingQueue<RowDataPacket>(queueSize);
this.outQueue = new LinkedBlockingQueue<RowDataPacket>(queueSize);
this.buckets = new ArrayList<GroupByBucket>();
}
@Override
public HandlerType type() {
return HandlerType.GROUPBY;
}
@Override
public void fieldEofResponse(byte[] headernull, List<byte[]> fieldsnull, final List<FieldPacket> fieldPackets,
byte[] eofnull, boolean isLeft, BackendConnection conn) {
if (terminate.get())
return;
if (this.pool == null)
this.pool = MycatServer.getInstance().getBufferPool();
this.fieldPackets = fieldPackets;
this.sourceFields = HandlerTool.createFields(this.fieldPackets);
for (int index = 0; index < referedSumFunctions.size(); index++) {
ItemSum sumFunc = referedSumFunctions.get(index);
ItemSum sum = (ItemSum) (HandlerTool.createItem(sumFunc, this.sourceFields, 0, this.isAllPushDown(),
this.type(), conn.getCharset()));
sums.add(sum);
}
prepare_sum_aggregators(sums, true);
setup_sum_funcs(sums);
/* group fieldpackets are front of the origin */
sendGroupFieldPackets((MySQLConnection)conn);
// localresult中的row为DGRowPacket比原始的rowdatapacket增加了聚合结果对象
localResultFps = this.fieldPackets;
localResultReferedSums = referedSumFunctions;
cmptor = new RowDataComparator(this.localResultFps, this.groupBys, this.isAllPushDown(), this.type(),
conn.getCharset());
groupLocalResult = new GroupByLocalResult(pool, localResultFps.size(), cmptor, localResultFps,
localResultReferedSums, this.isAllPushDown(), conn.getCharset())
.setMemSizeController(session.getOtherBufferMC());
for (int i = 0; i < bucketSize; i++) {
RowDataComparator tmpcmptor = new RowDataComparator(this.localResultFps, this.groupBys,
this.isAllPushDown(), this.type(), conn.getCharset());
GroupByBucket bucket = new GroupByBucket(queue, outQueue, pool, localResultFps.size(), tmpcmptor,
localResultFps, localResultReferedSums, this.isAllPushDown(), conn.getCharset());
bucket.setMemSizeController(session.getOtherBufferMC());
buckets.add(bucket);
bucket.start();
}
if (this.groupStart.compareAndSet(false, true)) {
startOwnThread(conn);
}
}
/**
* 生成新的fieldPackets包括生成的聚合函数以及原始的fieldpackets
*/
private List<FieldPacket> sendGroupFieldPackets(MySQLConnection conn) {
List<FieldPacket> newFps = new ArrayList<FieldPacket>();
for (int i = 0; i < sums.size(); i++) {
Item sum = sums.get(i);
FieldPacket tmpfp = new FieldPacket();
sum.makeField(tmpfp);
newFps.add(tmpfp);
}
newFps.addAll(this.fieldPackets);
nextHandler.fieldEofResponse(null, null, newFps, null, this.isLeft, conn);
return newFps;
}
@Override
protected void ownThreadJob(Object... objects) {
MySQLConnection conn = (MySQLConnection) objects[0];
recordElapsedTime("local group by thread is start:");
try {
int eofCount = 0;
for (;;) {
RowDataPacket row = outQueue.take();
if (row.fieldCount == 0) {
eofCount++;
if (eofCount == bucketSize)
break;
else
continue;
}
groupLocalResult.add(row);
}
recordElapsedTime("local group by thread is end:");
groupLocalResult.done();
recordElapsedTime("local group by thread is done for read:");
if (!hasFirstRow.get()) {
if (HandlerTool.needSendNoRow(this.groupBys))
sendNoRowGroupRowPacket(conn);
} else {
sendGroupRowPacket(conn);
}
nextHandler.rowEofResponse(null, this.isLeft, conn);
} catch (Exception e) {
String msg = "group by thread is error," + e.getLocalizedMessage();
logger.warn(msg, e);
session.onQueryError(msg.getBytes());
}
}
private void recordElapsedTime(String prefix) {
if (logger.isInfoEnabled()) {
logger.info(prefix + TimeUtil.currentTimeMillis());
}
}
@Override
public boolean rowResponse(byte[] rownull, final RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
logger.debug("rowResponse");
if (terminate.get())
return true;
hasFirstRow.compareAndSet(false, true);
try {
DGRowPacket row = new DGRowPacket(rowPacket, this.referedSumFunctions.size());
queue.put(row);
} catch (InterruptedException e) {
}
return false;
}
@Override
public void rowEofResponse(byte[] data, boolean isLeft, BackendConnection conn) {
logger.debug("roweof");
if (terminate.get())
return;
try {
// @bug1042
for (int i = 0; i < bucketSize; i++)
queue.put(new RowDataPacket(0));
} catch (InterruptedException e) {
}
}
/**
* 将一组group好的数据发送出去
*/
private void sendGroupRowPacket(MySQLConnection conn) {
groupLocalResult.done();
RowDataPacket row = null;
List<Field> localFields = HandlerTool.createFields(localResultFps);
List<ItemSum> sendSums = new ArrayList<ItemSum>();
for (int i = 0; i < referedSumFunctions.size(); i++) {
ItemSum selSum = referedSumFunctions.get(i);
ItemSum sum = (ItemSum) HandlerTool.createItem(selSum, localFields, 0, false, HandlerType.GROUPBY,
conn.getCharset());
sendSums.add(sum);
}
prepare_sum_aggregators(sendSums, true);
while ((row = groupLocalResult.next()) != null)// group函数已经在row中被计算过了
{
if (sendGroupRowPacket(conn, row, sendSums))
break;
}
}
/**
* 将一组group好的数据发送出去
*/
private boolean sendGroupRowPacket(MySQLConnection conn, RowDataPacket row, List<ItemSum> sendSums) {
init_sum_functions(sendSums, row);
RowDataPacket newRp = new RowDataPacket(this.fieldPackets.size() + sendSums.size());
/**
* 将自己生成的聚合函数的值放在前面这样在tablenode时如果用户语句如select count(*) from t
* 由于整个语句下发所以最后生成的rowpacket顺序为
* count(*){groupbyhandler生成的},count(*){下发到各个节点的,不是真实的值}
*/
for (int i = 0; i < sendSums.size(); i++) {
byte[] tmpb = sendSums.get(i).getRowPacketByte();
newRp.add(tmpb);
}
for (int i = 0; i < row.fieldCount; i++) {
newRp.add(row.getValue(i));
}
if (nextHandler.rowResponse(null, newRp, this.isLeft, conn))
return true;
return false;
}
/**
* 没有数据时,也要发送结果 比如select count(*) from t2 如果t2是一张空表的话那么显示为0
*/
private void sendNoRowGroupRowPacket(MySQLConnection conn) {
RowDataPacket newRp = new RowDataPacket(this.fieldPackets.size() + this.sums.size());
for (int i = 0; i < this.sums.size(); i++) {
ItemSum sum = this.sums.get(i);
sum.noRowsInResult();
byte[] tmpb = sum.getRowPacketByte();
newRp.add(tmpb);
}
for (int i = 0; i < this.fieldPackets.size(); i++) {
newRp.add(null);
}
nextHandler.rowResponse(null, newRp, this.isLeft, conn);
}
/**
* see Sql_executor.cc
*
* @return
*/
protected void prepare_sum_aggregators(List<ItemSum> funcs, boolean need_distinct) {
logger.info("prepare_sum_aggregators");
for (ItemSum func : funcs) {
func.setAggregator(need_distinct && func.has_with_distinct()
? Aggregator.AggregatorType.DISTINCT_AGGREGATOR : Aggregator.AggregatorType.SIMPLE_AGGREGATOR,
null);
}
}
/**
* Call ::setup for all sum functions.
*
* @param thd
* thread handler
* @param func_ptr
* sum function list
* @retval FALSE ok
* @retval TRUE error
*/
protected boolean setup_sum_funcs(List<ItemSum> funcs) {
logger.info("setup_sum_funcs");
for (ItemSum func : funcs) {
if (func.aggregatorSetup())
return true;
}
return false;
}
protected void init_sum_functions(List<ItemSum> funcs, RowDataPacket row) {
for (int index = 0; index < funcs.size(); index++) {
ItemSum sum = funcs.get(index);
Object transObj = ((DGRowPacket) row).getSumTran(index);
sum.resetAndAdd(row, transObj);
}
}
@Override
protected void terminateThread() throws Exception {
this.queue.clear();
for (int i = 0; i < bucketSize; i++)
queue.put(new RowDataPacket(0));
}
@Override
protected void recycleResources() {
this.queue.clear();
if (this.groupLocalResult != null)
this.groupLocalResult.close();
for (LocalResult bucket : buckets) {
bucket.close();
}
}
}

View File

@@ -0,0 +1,261 @@
package io.mycat.backend.mysql.nio.handler.query.impl.groupby;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.log4j.Logger;
import io.mycat.MycatServer;
import io.mycat.backend.BackendConnection;
import io.mycat.backend.mysql.nio.MySQLConnection;
import io.mycat.backend.mysql.nio.handler.query.BaseDMLHandler;
import io.mycat.backend.mysql.nio.handler.util.HandlerTool;
import io.mycat.backend.mysql.nio.handler.util.RowDataComparator;
import io.mycat.backend.mysql.store.DistinctLocalResult;
import io.mycat.buffer.BufferPool;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.plan.Order;
import io.mycat.plan.common.external.ResultStore;
import io.mycat.plan.common.field.Field;
import io.mycat.plan.common.item.Item;
import io.mycat.plan.common.item.function.sumfunc.Aggregator.AggregatorType;
import io.mycat.plan.common.item.function.sumfunc.ItemSum;
import io.mycat.server.NonBlockingSession;
/**
* 1.处理已经依据groupby的列进行过排序的groupby 2.处理需要用到Aggregator_distinct的group by
*
*
*/
public class OrderedGroupByHandler extends BaseDMLHandler {
private static final Logger logger = Logger.getLogger(OrderedGroupByHandler.class);
/* 接收到的参数 */
private List<Order> groupBys;
private List<ItemSum> referedSumFunctions;
private RowDataComparator cmptor;
/* 所有的sum函数集合 */
private List<Field> sourceFields = new ArrayList<Field>();
private List<ItemSum> sums = new ArrayList<ItemSum>();
/* group组的原始rowpacket目前保留第一条数据的值 */
private RowDataPacket originRp = null;
private boolean hasFirstRow = false;
private BufferPool pool;
private String charset = "UTF-8";
/** merge以及sendmaker现在都是多线程 **/
private ReentrantLock lock = new ReentrantLock();
/* 例如count(distinct id)中用到的distinct store */
private List<ResultStore> distinctStores;
/**
*
* @param groupBys
* @param refers
* 涉及到的所有的sumfunction集合
*/
public OrderedGroupByHandler(long id, NonBlockingSession session, List<Order> groupBys, List<ItemSum> referedSumFunctions) {
super(id, session);
this.groupBys = groupBys;
this.referedSumFunctions = referedSumFunctions;
this.distinctStores = new ArrayList<ResultStore>();
}
@Override
public HandlerType type() {
return HandlerType.GROUPBY;
}
@Override
public void fieldEofResponse(byte[] headernull, List<byte[]> fieldsnull, final List<FieldPacket> fieldPackets,
byte[] eofnull, boolean isLeft, BackendConnection conn) {
this.charset = conn.getCharset();
if (terminate.get())
return;
if (this.pool == null)
this.pool = MycatServer.getInstance().getBufferPool();
this.fieldPackets = fieldPackets;
this.sourceFields = HandlerTool.createFields(this.fieldPackets);
for (int index = 0; index < referedSumFunctions.size(); index++) {
ItemSum sumFunc = referedSumFunctions.get(index);
ItemSum sum = (ItemSum) (HandlerTool.createItem(sumFunc, this.sourceFields, 0, this.isAllPushDown(),
this.type(), conn.getCharset()));
sums.add(sum);
}
cmptor = new RowDataComparator(this.fieldPackets, this.groupBys, this.isAllPushDown(), this.type(),
conn.getCharset());
prepare_sum_aggregators(sums, this.referedSumFunctions, this.fieldPackets, this.isAllPushDown(), true, (MySQLConnection)conn);
setup_sum_funcs(sums);
sendGroupFieldPackets(conn);
}
/**
* 生成新的fieldPackets包括生成的聚合函数以及原始的fieldpackets
*/
private void sendGroupFieldPackets(BackendConnection conn) {
List<FieldPacket> newFps = new ArrayList<FieldPacket>();
for (int i = 0; i < sums.size(); i++) {
Item sum = sums.get(i);
FieldPacket tmpfp = new FieldPacket();
sum.makeField(tmpfp);
newFps.add(tmpfp);
}
newFps.addAll(this.fieldPackets);
nextHandler.fieldEofResponse(null, null, newFps, null, this.isLeft, conn);
}
@Override
public boolean rowResponse(byte[] rownull, final RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
logger.debug("rowresponse");
if (terminate.get())
return true;
lock.lock();
try {
if (!hasFirstRow) {
hasFirstRow = true;
originRp = rowPacket;
init_sum_functions(sums, rowPacket);
} else {
boolean sameGroupRow = this.groupBys.size() == 0 ? true : (cmptor.compare(originRp, rowPacket) == 0);
if (!sameGroupRow) {
// 需要将这一组数据发送出去
sendGroupRowPacket((MySQLConnection)conn);
originRp = rowPacket;
init_sum_functions(sums, rowPacket);
} else {
update_sum_func(sums, rowPacket);
}
}
return false;
} finally {
lock.unlock();
}
}
/**
* 将一组group好的数据发送出去
*/
private void sendGroupRowPacket(MySQLConnection conn) {
RowDataPacket newRp = new RowDataPacket(this.fieldPackets.size() + this.sums.size());
/**
* 将自己生成的聚合函数的值放在前面这样在tablenode时如果用户语句如select count(*) from t
* 由于整个语句下发所以最后生成的rowpacket顺序为
* count(*){groupbyhandler生成的},count(*){下发到各个节点的,不是真实的值}
*/
for (int i = 0; i < this.sums.size(); i++) {
byte[] tmpb = this.sums.get(i).getRowPacketByte();
newRp.add(tmpb);
}
for (int i = 0; i < originRp.fieldCount; i++) {
newRp.add(originRp.getValue(i));
}
nextHandler.rowResponse(null, newRp, this.isLeft, conn);
}
@Override
public void rowEofResponse(byte[] data, boolean isLeft, BackendConnection conn) {
logger.debug("row eof for orderby.");
if (terminate.get())
return;
if (!hasFirstRow) {
if (HandlerTool.needSendNoRow(this.groupBys))
sendNoRowGroupRowPacket((MySQLConnection)conn);
} else {
sendGroupRowPacket((MySQLConnection)conn);
}
nextHandler.rowEofResponse(data, this.isLeft, conn);
}
/**
* 没有数据时,也要发送结果 比如select count(*) from t2 如果t2是一张空表的话那么显示为0
*/
private void sendNoRowGroupRowPacket(MySQLConnection conn) {
RowDataPacket newRp = new RowDataPacket(this.fieldPackets.size() + this.sums.size());
// @bug 1050
// sumfuncs are front
for (int i = 0; i < this.sums.size(); i++) {
ItemSum sum = this.sums.get(i);
sum.noRowsInResult();
byte[] tmpb = sum.getRowPacketByte();
newRp.add(tmpb);
}
for (int i = 0; i < this.fieldPackets.size(); i++) {
newRp.add(null);
}
originRp = null;
nextHandler.rowResponse(null, newRp, this.isLeft, conn);
}
/**
* see Sql_executor.cc
*
* @return
*/
protected void prepare_sum_aggregators(List<ItemSum> funcs, List<ItemSum> sumfuncs, List<FieldPacket> packets,
boolean isAllPushDown, boolean need_distinct, MySQLConnection conn) {
logger.info("prepare_sum_aggregators");
for (int i = 0; i < funcs.size(); i++) {
ItemSum func = funcs.get(i);
ResultStore store = null;
if (func.has_with_distinct()) {
ItemSum selFunc = sumfuncs.get(i);
List<Order> orders = HandlerTool.makeOrder(selFunc.arguments());
RowDataComparator distinctCmp = new RowDataComparator(packets, orders, isAllPushDown, this.type(),
conn.getCharset());
store = new DistinctLocalResult(pool, packets.size(), distinctCmp, this.charset)
.setMemSizeController(session.getOtherBufferMC());
distinctStores.add(store);
}
func.setAggregator(need_distinct && func.has_with_distinct()
? AggregatorType.DISTINCT_AGGREGATOR : AggregatorType.SIMPLE_AGGREGATOR,
store);
}
}
/**
* Call ::setup for all sum functions.
*
* @param thd
* thread handler
* @param func_ptr
* sum function list
* @retval FALSE ok
* @retval TRUE error
*/
protected boolean setup_sum_funcs(List<ItemSum> funcs) {
logger.info("setup_sum_funcs");
for (ItemSum func : funcs) {
if (func.aggregatorSetup())
return true;
}
return false;
}
protected void init_sum_functions(List<ItemSum> funcs, RowDataPacket row) {
for (ItemSum func : funcs) {
func.resetAndAdd(row, null);
}
}
protected void update_sum_func(List<ItemSum> funcs, RowDataPacket row) {
for (ItemSum func : funcs) {
func.aggregatorAdd(row, null);
}
}
@Override
public void onTerminate() {
for (ResultStore store : distinctStores) {
store.close();
}
}
}

View File

@@ -0,0 +1,154 @@
package io.mycat.backend.mysql.nio.handler.query.impl.groupby.directgroupby;
import java.io.Serializable;
import java.nio.ByteBuffer;
import org.apache.commons.lang.SerializationUtils;
import io.mycat.MycatServer;
import io.mycat.backend.mysql.BufferUtil;
import io.mycat.backend.mysql.ByteUtil;
import io.mycat.net.mysql.RowDataPacket;
/**
* proxy层进行group by计算时用到的RowPacket比传统的rowpacket多保存了聚合函数的结果
* sum的结果存放在RowPacket的最前面
*
*/
public class DGRowPacket extends RowDataPacket {
private int sumSize;
/** 保存的中间聚合对象 **/
private Object[] sumTranObjs;
/** 保存中间聚合结果的大小 **/
private int[] sumByteSizes;
public DGRowPacket(RowDataPacket innerRow, int sumSize) {
this(innerRow.fieldCount, sumSize);
this.addAll(innerRow.fieldValues);
}
/**
*
* @param fieldCount
* 原始的field的个数
* @param sumSize
* 要计算的sum的个数
*/
public DGRowPacket(int fieldCount, int sumSize) {
super(fieldCount);
this.sumSize = sumSize;
sumTranObjs = new Object[sumSize];
sumByteSizes = new int[sumSize];
}
public void setSumTran(int index, Object trans, int transSize) {
if (index >= sumSize)
throw new RuntimeException("Set sumTran out of sumSize index!");
else {
sumTranObjs[index] = trans;
sumByteSizes[index] = transSize;
}
}
public Object getSumTran(int index) {
if (index >= sumSize)
throw new RuntimeException("Set sumTran out of sumSize index!");
else {
return sumTranObjs[index];
}
}
@Override
/**
* 提供一个不准确的size
*/
public int calcPacketSize() {
int size = super.calcPacketSize();
for (int i = 0; i < sumSize; i++) {
int byteSize = sumByteSizes[i];
size += ByteUtil.decodeLength(byteSize) + byteSize;
}
return size;
}
private int getRealSize() {
int size = super.calcPacketSize();
for (int i = 0; i < sumSize; i++) {
byte[] v = null;
Object obj = sumTranObjs[i];
if (obj != null)
v = SerializationUtils.serialize((Serializable) obj);
size += (v == null || v.length == 0) ? 1 : ByteUtil.decodeLength(v);
}
return size;
}
@Override
public byte[] toBytes() {
int size = getRealSize();
ByteBuffer buffer = MycatServer.getInstance().getBufferPool().allocate(size + packetHeaderSize);
BufferUtil.writeUB3(buffer, size);
buffer.put(packetId);
for (int i = 0; i < this.sumSize; i++) {
Object obj = sumTranObjs[i];
byte[] ov = null;
if (obj != null)
ov = SerializationUtils.serialize((Serializable) obj);
if (ov == null) {
buffer.put(NULL_MARK);
} else if (ov.length == 0) {
buffer.put(EMPTY_MARK);
} else {
BufferUtil.writeWithLength(buffer, ov);
}
}
for (int i = 0; i < this.fieldCount; i++) {
byte[] fv = fieldValues.get(i);
if (fv == null) {
buffer.put(NULL_MARK);
} else if (fv.length == 0) {
buffer.put(EMPTY_MARK);
} else {
BufferUtil.writeWithLength(buffer, fv);
}
}
buffer.flip();
byte[] data = new byte[buffer.limit()];
buffer.get(data);
MycatServer.getInstance().getBufferPool().recycle(buffer);
return data;
}
@Override
public String getPacketInfo() {
return "Direct Groupby RowData Packet";
}
public static void main(String[] args) {
DGRowPacket row = new DGRowPacket(2, 2);
row.add(new byte[1]);
row.add(new byte[1]);
row.setSumTran(0, 1, 4);
row.setSumTran(1, 2.2, 8);
byte[] bb = row.toBytes();
RowDataPacket rp = new RowDataPacket(4);
rp.read(bb);
DGRowPacket dgRow = new DGRowPacket(2, 2);
for (int i = 0; i < 2; i++) {
byte[] b = rp.getValue(i);
if (b != null) {
Object obj = SerializationUtils.deserialize(b);
dgRow.setSumTran(i, obj, 4);
}
}
for (int i = 2; i < 4; i++) {
dgRow.add(rp.getValue(i));
}
}
}

View File

@@ -0,0 +1,60 @@
package io.mycat.backend.mysql.nio.handler.query.impl.groupby.directgroupby;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import io.mycat.backend.mysql.nio.handler.util.RowDataComparator;
import io.mycat.backend.mysql.store.GroupByLocalResult;
import io.mycat.buffer.BufferPool;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.plan.common.item.function.sumfunc.ItemSum;
/**
* 多线程的Group By桶并发生成Group By的中间结果最终再进行Group By从而生成Group By的总结果
*
*/
public class GroupByBucket extends GroupByLocalResult {
// 进行groupby的输入来源
private BlockingQueue<RowDataPacket> inData;
private BlockingQueue<RowDataPacket> outData;
private Thread thread;
public GroupByBucket(BlockingQueue<RowDataPacket> sourceData, BlockingQueue<RowDataPacket> outData,
BufferPool pool, int fieldsCount, RowDataComparator groupCmp,
List<FieldPacket> fieldPackets, List<ItemSum> sumFunctions,
boolean isAllPushDown, String charset) {
super(pool, fieldsCount, groupCmp, fieldPackets, sumFunctions,
isAllPushDown, charset);
this.inData = sourceData;
this.outData = outData;
}
/**
* 开启一个新的线程进行Group by工作
*/
public void start() {
thread = new Thread(new Runnable() {
@Override
public void run() {
try {
while (true) {
RowDataPacket rp = inData.take();
if (rp.fieldCount == 0)
break;
add(rp);
}
done();
RowDataPacket groupedRow = null;
while ((groupedRow = next()) != null)
outData.put(groupedRow);
outData.put(new RowDataPacket((0)));
} catch (Exception e) {
e.printStackTrace();
}
}
});
thread.start();
}
}

View File

@@ -0,0 +1,390 @@
package io.mycat.backend.mysql.nio.handler.query.impl.join;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.log4j.Logger;
import io.mycat.MycatServer;
import io.mycat.backend.BackendConnection;
import io.mycat.backend.mysql.nio.MySQLConnection;
import io.mycat.backend.mysql.nio.handler.query.OwnThreadDMLHandler;
import io.mycat.backend.mysql.nio.handler.util.HandlerTool;
import io.mycat.backend.mysql.nio.handler.util.RowDataComparator;
import io.mycat.backend.mysql.nio.handler.util.TwoTableComparator;
import io.mycat.backend.mysql.store.LocalResult;
import io.mycat.backend.mysql.store.UnSortedLocalResult;
import io.mycat.buffer.BufferPool;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.plan.Order;
import io.mycat.plan.common.field.Field;
import io.mycat.plan.common.item.Item;
import io.mycat.server.NonBlockingSession;
import io.mycat.util.FairLinkedBlockingDeque;
/**
* join的策略目前为sortmerge在merge数据到达之前已经按照merge列进行了排序
*
* @author chenzifei
*
*/
public class JoinHandler extends OwnThreadDMLHandler {
protected Logger logger = Logger.getLogger(JoinHandler.class);
protected boolean isLeftJoin = false;
protected FairLinkedBlockingDeque<LocalResult> leftQueue;
protected FairLinkedBlockingDeque<LocalResult> rightQueue;
protected List<Order> leftOrders;
protected List<Order> rightOrders;
protected List<FieldPacket> leftFieldPackets;
protected List<FieldPacket> rightFieldPackets;
private AtomicBoolean fieldSent = new AtomicBoolean(false);
private BufferPool pool;
private RowDataComparator leftCmptor;
private RowDataComparator rightCmptor;
// @bug 1097
// only join columns same is not enough
private List<Field> joinRowFields;
private Item otherJoinOn;
private Item otherJoinOnItem;
private int queueSize;
// @bug 1208
private String charset = "UTF-8";
// prevent multi thread rowresponse
protected ReentrantLock leftLock = new ReentrantLock();
protected ReentrantLock rightLock = new ReentrantLock();
public JoinHandler(long id, NonBlockingSession session, boolean isLeftJoin, List<Order> leftOrder,
List<Order> rightOrder, Item otherJoinOn) {
super(id, session);
this.isLeftJoin = isLeftJoin;
this.leftOrders = leftOrder;
this.rightOrders = rightOrder;
this.queueSize = MycatServer.getInstance().getConfig().getSystem().getJoinQueueSize();
this.leftQueue = new FairLinkedBlockingDeque<LocalResult>(queueSize);
this.rightQueue = new FairLinkedBlockingDeque<LocalResult>(queueSize);
this.leftFieldPackets = new ArrayList<FieldPacket>();
this.rightFieldPackets = new ArrayList<FieldPacket>();
this.otherJoinOn = otherJoinOn;
}
@Override
public HandlerType type() {
return HandlerType.JOIN;
}
@Override
public void fieldEofResponse(byte[] headernull, List<byte[]> fieldsnull, final List<FieldPacket> fieldPackets,
byte[] eofnull, boolean isLeft, final BackendConnection conn) {
if (this.pool == null)
this.pool = MycatServer.getInstance().getBufferPool();
if (isLeft) {
// logger.debug("field eof left");
leftFieldPackets = fieldPackets;
leftCmptor = new RowDataComparator(leftFieldPackets, leftOrders, this.isAllPushDown(), this.type(),
conn.getCharset());
} else {
// logger.debug("field eof right");
rightFieldPackets = fieldPackets;
rightCmptor = new RowDataComparator(rightFieldPackets, rightOrders, this.isAllPushDown(), this.type(),
conn.getCharset());
}
if (!fieldSent.compareAndSet(false, true)) {
this.charset = conn.getCharset();
List<FieldPacket> newFieldPacket = new ArrayList<FieldPacket>();
newFieldPacket.addAll(leftFieldPackets);
newFieldPacket.addAll(rightFieldPackets);
nextHandler.fieldEofResponse(null, null, newFieldPacket, null, this.isLeft, conn);
otherJoinOnItem = makeOtherJoinOnItem(newFieldPacket, conn);
// logger.debug("all ready");
startOwnThread(conn);
}
}
/* 用来处理otherjoinonfilter的 */
private Item makeOtherJoinOnItem(List<FieldPacket> rowpackets, BackendConnection conn) {
this.joinRowFields = HandlerTool.createFields(rowpackets);
if (otherJoinOn == null)
return null;
Item ret = HandlerTool.createItem(this.otherJoinOn, this.joinRowFields, 0, this.isAllPushDown(), this.type(),
conn.getCharset());
return ret;
}
@Override
public boolean rowResponse(byte[] rownull, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
logger.debug("rowresponse");
if (terminate.get()) {
return true;
}
try {
if (isLeft) {
leftLock.lock();
try {
addRowToDeque(rowPacket, leftFieldPackets.size(), leftQueue, leftCmptor);
} finally {
leftLock.unlock();
}
} else {
rightLock.lock();
try {
addRowToDeque(rowPacket, rightFieldPackets.size(), rightQueue, rightCmptor);
} finally {
rightLock.unlock();
}
}
} catch (InterruptedException e) {
logger.error("join row response exception", e);
return true;
}
return false;
}
@Override
public void rowEofResponse(byte[] data, boolean isLeft, BackendConnection conn) {
logger.debug("roweof");
if (terminate.get()) {
return;
}
RowDataPacket eofRow = new RowDataPacket(0);
try {
if (isLeft) {
logger.debug("row eof left");
addRowToDeque(eofRow, leftFieldPackets.size(), leftQueue, leftCmptor);
} else {
logger.debug("row eof right");
addRowToDeque(eofRow, rightFieldPackets.size(), rightQueue, rightCmptor);
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
@Override
protected void ownThreadJob(Object... objects) {
MySQLConnection conn = (MySQLConnection) objects[0];
LocalResult leftLocal = null, rightLocal = null;
try {
Comparator<RowDataPacket> joinCmptor = new TwoTableComparator(leftFieldPackets, rightFieldPackets,
leftOrders, rightOrders, this.isAllPushDown(), this.type(), conn.getCharset());
// logger.debug("merge Join start");
leftLocal = takeFirst(leftQueue);
rightLocal = takeFirst(rightQueue);
while (true) {
if (terminate.get())
return;
RowDataPacket leftRow = leftLocal.getLastRow();
RowDataPacket rightRow = rightLocal.getLastRow();
if (leftRow.fieldCount == 0) {
break;
}
if (rightRow.fieldCount == 0) {
if (isLeftJoin) {
if (connectLeftAndNull(leftLocal, conn))
break;
leftLocal = takeFirst(leftQueue);
continue;
} else {
break;
}
}
int rs = joinCmptor.compare(leftRow, rightRow);
if (rs < 0) {
if (isLeftJoin) {
if (connectLeftAndNull(leftLocal, conn))
break;
leftLocal = takeFirst(leftQueue);
continue;
} else {
leftLocal.close();
leftLocal = takeFirst(leftQueue);
}
} else if (rs > 0) {
rightLocal.close();
rightLocal = takeFirst(rightQueue);
} else {
if (connectLeftAndRight(leftLocal, rightLocal, conn))
break;
leftLocal = takeFirst(leftQueue);
rightLocal = takeFirst(rightQueue);
}
}
nextHandler.rowEofResponse(null, isLeft, conn);
HandlerTool.terminateHandlerTree(this);
} catch (Exception e) {
String msg = "join thread error, " + e.getLocalizedMessage();
logger.error(msg, e);
session.onQueryError(msg.getBytes());
} finally {
if (leftLocal != null)
leftLocal.close();
if (rightLocal != null)
rightLocal.close();
}
}
private LocalResult takeFirst(FairLinkedBlockingDeque<LocalResult> deque) throws InterruptedException {
/**
* 前提条件是这个方法是个单线程
*/
deque.waitUtilCount(1);
LocalResult result = deque.peekFirst();
RowDataPacket lastRow = result.getLastRow();
if (lastRow.fieldCount == 0)
return deque.takeFirst();
else {
deque.waitUtilCount(2);
return deque.takeFirst();
}
}
/**
*
* @param leftRows
* @param rightRows
* @param conn
* @return if is interrupted by next handler ,return true,else false
* @throws Exception
*/
private boolean connectLeftAndRight(LocalResult leftRows, LocalResult rightRows, MySQLConnection conn)
throws Exception {
RowDataPacket leftRow = null;
RowDataPacket rightRow = null;
try {
while ((leftRow = leftRows.next()) != null) {
// @bug 1097
int matchCount = 0;
while ((rightRow = rightRows.next()) != null) {
RowDataPacket rowPacket = new RowDataPacket(leftFieldPackets.size() + rightFieldPackets.size());
for (byte[] value : leftRow.fieldValues) {
rowPacket.add(value);
}
for (byte[] value : rightRow.fieldValues) {
rowPacket.add(value);
}
if (otherJoinOnItem != null) {
HandlerTool.initFields(joinRowFields, rowPacket.fieldValues);
if (otherJoinOnItem.valBool() == false)
continue;
}
matchCount++;
if (nextHandler.rowResponse(null, rowPacket, isLeft, conn))
return true;
}
// @bug 1097
// condition: exist otherOnItem and no row match other condition
// send left row and null
if (matchCount == 0 && isLeftJoin) {
RowDataPacket rowPacket = new RowDataPacket(leftFieldPackets.size() + rightFieldPackets.size());
for (byte[] value : leftRow.fieldValues) {
rowPacket.add(value);
}
for (int i = 0; i < rightFieldPackets.size(); i++) {
rowPacket.add(null);
}
if (nextHandler.rowResponse(null, rowPacket, isLeft, conn))
return true;
}
rightRows.reset();
}
return false;
} finally {
leftRows.close();
rightRows.close();
}
}
private boolean connectLeftAndNull(LocalResult leftRows, MySQLConnection conn) throws Exception {
RowDataPacket leftRow = null;
try {
while ((leftRow = leftRows.next()) != null) {
RowDataPacket rowPacket = new RowDataPacket(leftFieldPackets.size() + rightFieldPackets.size());
for (byte[] value : leftRow.fieldValues) {
rowPacket.add(value);
}
for (int i = 0; i < rightFieldPackets.size(); i++) {
rowPacket.add(null);
}
if (nextHandler.rowResponse(null, rowPacket, isLeft, conn))
return true;
}
return false;
} finally {
leftRows.close();
}
}
private void addRowToDeque(RowDataPacket row, int columnCount, FairLinkedBlockingDeque<LocalResult> deque,
RowDataComparator cmp) throws InterruptedException {
LocalResult localResult = deque.peekLast();
if (localResult != null) {
RowDataPacket lastRow = localResult.getLastRow();
if (lastRow.fieldCount == 0) {
// 有可能是terminateThread添加的eof
return;
} else if (row.fieldCount > 0 && cmp.compare(lastRow, row) == 0) {
localResult.add(row);
return;
} else {
localResult.done();
}
}
LocalResult newLocalResult = new UnSortedLocalResult(columnCount, pool, this.charset)
.setMemSizeController(session.getJoinBufferMC());
newLocalResult.add(row);
if (row.fieldCount == 0)
newLocalResult.done();
deque.putLast(newLocalResult);
}
/**
* only for terminate.
*
* @param row
* @param columnCount
* @param deque
* @throws InterruptedException
*/
private void addEndRowToDeque(RowDataPacket row, int columnCount, FairLinkedBlockingDeque<LocalResult> deque)
throws InterruptedException {
LocalResult newLocalResult = new UnSortedLocalResult(columnCount, pool, this.charset)
.setMemSizeController(session.getJoinBufferMC());
newLocalResult.add(row);
newLocalResult.done();
LocalResult localResult = deque.addOrReplaceLast(newLocalResult);
if (localResult != null)
localResult.close();
}
@Override
protected void terminateThread() throws Exception {
RowDataPacket eofRow = new RowDataPacket(0);
addEndRowToDeque(eofRow, leftFieldPackets.size(), leftQueue);
RowDataPacket eofRow2 = new RowDataPacket(0);
addEndRowToDeque(eofRow2, rightFieldPackets.size(), rightQueue);
}
@Override
protected void recycleResources() {
clearDeque(this.leftQueue);
clearDeque(this.rightQueue);
}
private void clearDeque(FairLinkedBlockingDeque<LocalResult> deque) {
if (deque == null)
return;
LocalResult local = deque.poll();
while (local != null) {
local.close();
local = deque.poll();
}
}
}

View File

@@ -0,0 +1,262 @@
package io.mycat.backend.mysql.nio.handler.query.impl.join;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.log4j.Logger;
import io.mycat.MycatServer;
import io.mycat.backend.BackendConnection;
import io.mycat.backend.mysql.nio.MySQLConnection;
import io.mycat.backend.mysql.nio.handler.query.OwnThreadDMLHandler;
import io.mycat.backend.mysql.nio.handler.util.HandlerTool;
import io.mycat.backend.mysql.nio.handler.util.RowDataComparator;
import io.mycat.backend.mysql.nio.handler.util.TwoTableComparator;
import io.mycat.backend.mysql.store.LocalResult;
import io.mycat.backend.mysql.store.UnSortedLocalResult;
import io.mycat.buffer.BufferPool;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.plan.Order;
import io.mycat.server.NonBlockingSession;
import io.mycat.util.FairLinkedBlockingDeque;
public class NotInHandler extends OwnThreadDMLHandler {
private static final Logger logger = Logger.getLogger(NotInHandler.class);
private FairLinkedBlockingDeque<LocalResult> leftQueue;
private FairLinkedBlockingDeque<LocalResult> rightQueue;
private List<Order> leftOrders;
private List<Order> rightOrders;
private List<FieldPacket> leftFieldPackets;
private List<FieldPacket> rightFieldPackets;
private BufferPool pool;
private RowDataComparator leftCmptor;
private RowDataComparator rightCmptor;
private AtomicBoolean fieldSent = new AtomicBoolean(false);
private int queueSize;
private String charset = "UTF-8";
public NotInHandler(long id, NonBlockingSession session, List<Order> leftOrder, List<Order> rightOrder) {
super(id, session);
this.leftOrders = leftOrder;
this.rightOrders = rightOrder;
this.queueSize = MycatServer.getInstance().getConfig().getSystem().getJoinQueueSize();
this.leftQueue = new FairLinkedBlockingDeque<LocalResult>(queueSize);
this.rightQueue = new FairLinkedBlockingDeque<LocalResult>(queueSize);
this.leftFieldPackets = new ArrayList<FieldPacket>();
this.rightFieldPackets = new ArrayList<FieldPacket>();
}
@Override
public HandlerType type() {
return HandlerType.JOIN;
}
@Override
public void fieldEofResponse(byte[] headernull, List<byte[]> fieldsnull, final List<FieldPacket> fieldPackets,
byte[] eofnull, boolean isLeft, final BackendConnection conn) {
if (this.pool == null)
this.pool = MycatServer.getInstance().getBufferPool();
if (isLeft) {
// logger.debug("field eof left");
leftFieldPackets = fieldPackets;
leftCmptor = new RowDataComparator(leftFieldPackets, leftOrders, this.isAllPushDown(), this.type(),
conn.getCharset());
} else {
// logger.debug("field eof right");
rightFieldPackets = fieldPackets;
rightCmptor = new RowDataComparator(rightFieldPackets, rightOrders, this.isAllPushDown(), this.type(),
conn.getCharset());
}
if (!fieldSent.compareAndSet(false, true)) {
this.charset = conn.getCharset();
nextHandler.fieldEofResponse(null, null, leftFieldPackets, null, this.isLeft, conn);
// logger.debug("all ready");
startOwnThread(conn);
}
}
@Override
public boolean rowResponse(byte[] rownull, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
logger.debug("rowresponse");
if (terminate.get()) {
return true;
}
try {
if (isLeft) {
addRowToDeque(rowPacket, leftFieldPackets.size(), leftQueue, leftCmptor);
} else {
addRowToDeque(rowPacket, rightFieldPackets.size(), rightQueue, rightCmptor);
}
} catch (InterruptedException e) {
logger.warn("not in row exception", e);
return true;
}
return false;
}
@Override
public void rowEofResponse(byte[] data, boolean isLeft, BackendConnection conn) {
logger.info("roweof");
if (terminate.get()) {
return;
}
RowDataPacket eofRow = new RowDataPacket(0);
try {
if (isLeft) {
// logger.debug("row eof left");
addRowToDeque(eofRow, leftFieldPackets.size(), leftQueue, leftCmptor);
} else {
// logger.debug("row eof right");
addRowToDeque(eofRow, rightFieldPackets.size(), rightQueue, rightCmptor);
}
} catch (Exception e) {
logger.warn("not in rowEof exception", e);
}
}
@Override
protected void ownThreadJob(Object... objects) {
MySQLConnection conn = (MySQLConnection) objects[0];
LocalResult leftLocal = null, rightLocal = null;
try {
Comparator<RowDataPacket> notInCmptor = new TwoTableComparator(leftFieldPackets, rightFieldPackets,
leftOrders, rightOrders, this.isAllPushDown(), this.type(), conn.getCharset());
leftLocal = takeFirst(leftQueue);
rightLocal = takeFirst(rightQueue);
while (true) {
RowDataPacket leftRow = leftLocal.getLastRow();
RowDataPacket rightRow = rightLocal.getLastRow();
if (leftRow.fieldCount == 0) {
break;
}
if (rightRow.fieldCount == 0) {
sendLeft(leftLocal, conn);
leftLocal.close();
leftLocal = takeFirst(leftQueue);
continue;
}
int rs = notInCmptor.compare(leftRow, rightRow);
if (rs < 0) {
sendLeft(leftLocal, conn);
leftLocal.close();
leftLocal = takeFirst(leftQueue);
continue;
} else if (rs > 0) {
rightLocal.close();
rightLocal = takeFirst(rightQueue);
} else {
// because not in, if equal left should move to next value
leftLocal.close();
rightLocal.close();
leftLocal = takeFirst(leftQueue);
rightLocal = takeFirst(rightQueue);
}
}
nextHandler.rowEofResponse(null, isLeft, conn);
HandlerTool.terminateHandlerTree(this);
} catch (Exception e) {
String msg = "notIn thread error, " + e.getLocalizedMessage();
logger.warn(msg, e);
session.onQueryError(msg.getBytes());
} finally {
if (leftLocal != null)
leftLocal.close();
if (rightLocal != null)
rightLocal.close();
}
}
private LocalResult takeFirst(FairLinkedBlockingDeque<LocalResult> deque) throws InterruptedException {
/**
* 前提条件是这个方法是个单线程
*/
deque.waitUtilCount(1);
LocalResult result = deque.peekFirst();
RowDataPacket lastRow = result.getLastRow();
if (lastRow.fieldCount == 0)
return deque.takeFirst();
else {
deque.waitUtilCount(2);
return deque.takeFirst();
}
}
private void sendLeft(LocalResult leftRows, MySQLConnection conn) throws Exception {
RowDataPacket leftRow = null;
while ((leftRow = leftRows.next()) != null) {
nextHandler.rowResponse(null, leftRow, isLeft, conn);
}
}
private void addRowToDeque(RowDataPacket row, int columnCount, FairLinkedBlockingDeque<LocalResult> deque,
RowDataComparator cmp) throws InterruptedException {
LocalResult localResult = deque.peekLast();
if (localResult != null) {
RowDataPacket lastRow = localResult.getLastRow();
if (lastRow.fieldCount == 0) {
// 有可能是terminateThread添加的eof
return;
} else if (row.fieldCount > 0 && cmp.compare(lastRow, row) == 0) {
localResult.add(row);
return;
} else {
localResult.done();
}
}
LocalResult newLocalResult = new UnSortedLocalResult(columnCount, pool, this.charset)
.setMemSizeController(session.getJoinBufferMC());
newLocalResult.add(row);
if (row.fieldCount == 0)
newLocalResult.done();
deque.putLast(newLocalResult);
}
/**
* only for terminate.
*
* @param row
* @param columnCount
* @param deque
* @throws InterruptedException
*/
private void addEndRowToDeque(RowDataPacket row, int columnCount, FairLinkedBlockingDeque<LocalResult> deque)
throws InterruptedException {
LocalResult newLocalResult = new UnSortedLocalResult(columnCount, pool, this.charset)
.setMemSizeController(session.getJoinBufferMC());
newLocalResult.add(row);
newLocalResult.done();
LocalResult localResult = deque.addOrReplaceLast(newLocalResult);
if (localResult != null)
localResult.close();
}
@Override
protected void terminateThread() throws Exception {
RowDataPacket eofRow = new RowDataPacket(0);
addEndRowToDeque(eofRow, leftFieldPackets.size(), leftQueue);
RowDataPacket eofRow2 = new RowDataPacket(0);
addEndRowToDeque(eofRow2, rightFieldPackets.size(), rightQueue);
}
@Override
protected void recycleResources() {
clearDeque(this.leftQueue);
clearDeque(this.rightQueue);
}
private void clearDeque(FairLinkedBlockingDeque<LocalResult> deque) {
if (deque == null)
return;
LocalResult local = deque.poll();
while (local != null) {
local.close();
local = deque.poll();
}
}
}

View File

@@ -8,6 +8,8 @@ import org.slf4j.LoggerFactory;
import io.mycat.backend.BackendConnection;
import io.mycat.backend.mysql.nio.MySQLConnection;
import io.mycat.backend.mysql.nio.handler.MultiNodeHandler;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.route.RouteResultsetNode;
import io.mycat.server.NonBlockingSession;
@@ -39,7 +41,7 @@ public abstract class AbstractCommitNodesHandler extends MultiNodeHandler imple
}
@Override
public void rowEofResponse(byte[] eof, BackendConnection conn) {
public void rowEofResponse(byte[] eof, boolean isLeft, BackendConnection conn) {
LOGGER.error(new StringBuilder().append("unexpected packet for ")
.append(conn).append(" bound by ").append(session.getSource())
.append(": field's eof").toString());
@@ -51,17 +53,19 @@ public abstract class AbstractCommitNodesHandler extends MultiNodeHandler imple
}
@Override
public void fieldEofResponse(byte[] header, List<byte[]> fields, byte[] eof, BackendConnection conn) {
public void fieldEofResponse(byte[] header, List<byte[]> fields, List<FieldPacket> fieldPackets, byte[] eof,
boolean isLeft, BackendConnection conn) {
LOGGER.error(new StringBuilder().append("unexpected packet for ")
.append(conn).append(" bound by ").append(session.getSource())
.append(": field's eof").toString());
}
@Override
public void rowResponse(byte[] row, BackendConnection conn) {
public boolean rowResponse(byte[] row, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
LOGGER.error(new StringBuilder().append("unexpected packet for ")
.append(conn).append(" bound by ").append(session.getSource())
.append(": field's eof").toString());
return false;
}
@Override

View File

@@ -8,6 +8,8 @@ import org.slf4j.LoggerFactory;
import io.mycat.backend.BackendConnection;
import io.mycat.backend.mysql.nio.MySQLConnection;
import io.mycat.backend.mysql.nio.handler.MultiNodeHandler;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.route.RouteResultsetNode;
import io.mycat.server.NonBlockingSession;
@@ -39,7 +41,7 @@ public abstract class AbstractRollbackNodesHandler extends MultiNodeHandler impl
}
@Override
public void rowEofResponse(byte[] eof, BackendConnection conn) {
public void rowEofResponse(byte[] eof, boolean isLeft, BackendConnection conn) {
LOGGER.error(new StringBuilder().append("unexpected packet for ")
.append(conn).append(" bound by ").append(session.getSource())
.append(": field's eof").toString());
@@ -51,17 +53,19 @@ public abstract class AbstractRollbackNodesHandler extends MultiNodeHandler impl
}
@Override
public void fieldEofResponse(byte[] header, List<byte[]> fields, byte[] eof, BackendConnection conn) {
public void fieldEofResponse(byte[] header, List<byte[]> fields, List<FieldPacket> fieldPackets, byte[] eof,
boolean isLeft, BackendConnection conn) {
LOGGER.error(new StringBuilder().append("unexpected packet for ")
.append(conn).append(" bound by ").append(session.getSource())
.append(": field's eof").toString());
}
@Override
public void rowResponse(byte[] row, BackendConnection conn) {
public boolean rowResponse(byte[] row, RowDataPacket rowPacket, boolean isLeft, BackendConnection conn) {
LOGGER.error(new StringBuilder().append("unexpected packet for ")
.append(conn).append(" bound by ").append(session.getSource())
.append(": field's eof").toString());
return false;
}
@Override

View File

@@ -0,0 +1,337 @@
package io.mycat.backend.mysql.nio.handler.util;
import java.util.ArrayDeque;
import java.util.Arrays;
import java.util.Collection;
import java.util.Comparator;
import java.util.Iterator;
import java.util.NoSuchElementException;
import io.mycat.util.MinHeap;
@SuppressWarnings("unchecked")
public class ArrayMinHeap<E> implements MinHeap<E> {
private static final int DEFAULT_INITIAL_CAPACITY = 3;
private Object[] heap;
private int size = 0;
private Comparator<E> comparator;
public ArrayMinHeap(Comparator<E> comparator) {
this(DEFAULT_INITIAL_CAPACITY, comparator);
}
public ArrayMinHeap(int initialCapacity, Comparator<E> comparator) {
if (initialCapacity < 1)
throw new IllegalArgumentException();
this.heap = new Object[initialCapacity];
this.comparator = comparator;
}
public E find(E e) {
if (e != null) {
for (int i = 0; i < size; i++)
if (comparator.compare(e, (E) heap[i]) == 0)
return (E) heap[i];
}
return null;
}
@Override
public int size() {
return this.size;
}
@Override
public boolean isEmpty() {
return size() == 0;
}
@Override
public boolean contains(Object o) {
return indexOf(o) != -1;
}
@Override
public Iterator<E> iterator() {
return new Itr();
}
private final class Itr implements Iterator<E> {
/**
* Index (into queue array) of element to be returned by subsequent call
* to next.
*/
private int cursor = 0;
/**
* Index of element returned by most recent call to next, unless that
* element came from the forgetMeNot list. Set to -1 if element is
* deleted by a call to remove.
*/
private int lastRet = -1;
/**
* A queue of elements that were moved from the unvisited portion of the
* heap into the visited portion as a result of "unlucky" element
* removals during the iteration. (Unlucky element removals are those
* that require a siftup instead of a siftdown.) We must visit all of
* the elements in this list to complete the iteration. We do this after
* we've completed the "normal" iteration.
*
* We expect that most iterations, even those involving removals, will
* not need to store elements in this field.
*/
private ArrayDeque<E> forgetMeNot = null;
/**
* Element returned by the most recent call to next iff that element was
* drawn from the forgetMeNot list.
*/
private E lastRetElt = null;
public boolean hasNext() {
return cursor < size
|| (forgetMeNot != null && !forgetMeNot.isEmpty());
}
public E next() {
if (cursor < size)
return (E) heap[lastRet = cursor++];
if (forgetMeNot != null) {
lastRet = -1;
lastRetElt = forgetMeNot.poll();
if (lastRetElt != null)
return lastRetElt;
}
throw new NoSuchElementException();
}
public void remove() {
if (lastRet != -1) {
E moved = ArrayMinHeap.this.removeAt(lastRet);
lastRet = -1;
if (moved == null)
cursor--;
else {
if (forgetMeNot == null)
forgetMeNot = new ArrayDeque<E>();
forgetMeNot.add(moved);
}
} else if (lastRetElt != null) {
ArrayMinHeap.this.removeEq(lastRetElt);
lastRetElt = null;
} else {
throw new IllegalStateException();
}
}
}
@Override
public Object[] toArray() {
return Arrays.copyOf(heap, size);
}
@Override
public <T> T[] toArray(T[] a) {
if (a.length < size)
// Make a new array of a's runtime type, but my contents:
return (T[]) Arrays.copyOf(heap, size, a.getClass());
System.arraycopy(heap, 0, a, 0, size);
if (a.length > size)
a[size] = null;
return a;
}
private void grow(int minCapacity) {
if (minCapacity < 0) // overflow
throw new OutOfMemoryError();
int oldCapacity = heap.length;
// Double size if small; else grow by 50%
int newCapacity = ((oldCapacity < 64) ? ((oldCapacity + 1) * 2)
: ((oldCapacity / 2) * 3));
if (newCapacity < 0) // overflow
newCapacity = Integer.MAX_VALUE;
if (newCapacity < minCapacity)
newCapacity = minCapacity;
heap = Arrays.copyOf(heap, newCapacity);
}
@Override
public boolean add(E e) {
return offer(e);
}
@Override
public void replaceTop(E e) {
if(size == 0)
return;
siftDown(0, e);
}
public boolean offer(E e) {
if (e == null)
throw new NullPointerException();
int i = size;
if (i >= heap.length)
grow(i + 1);
size = i + 1;
if (i == 0)
heap[0] = e;
else
siftUp(i, e);
return true;
}
private void siftUp(int k, E x) {
while (k > 0) {
int parant = (k - 1) >>> 1;
Object e = heap[parant];
if (comparator.compare(x, (E) e) >= 0)
break;
heap[k] = e;
k = parant;
}
heap[k] = x;
}
private int indexOf(Object o) {
if (o != null) {
for (int i = 0; i < size; i++)
if (o.equals(heap[i]))
return i;
}
return -1;
}
@Override
public E peak() {
if (size == 0)
return null;
return (E) heap[0];
}
@Override
public E poll() {
if (size == 0)
return null;
int s = --size;
E result = (E) heap[0];
E x = (E) heap[s];
heap[s] = null;
if (s != 0)
siftDown(0, x);
return result;
}
private E removeAt(int i) {
assert i >= 0 && i < size;
int s = --size;
if (s == i) // removed last element
heap[i] = null;
else {
E moved = (E) heap[s];
heap[s] = null;
siftDown(i, moved);
if (heap[i] == moved) {
siftUp(i, moved);
if (heap[i] != moved)
return moved;
}
}
return null;
}
private void siftDown(int k, E x) {
// the last element's parent index
int half = size >>> 1;
while (k < half) {
int child = (k << 1) + 1;
Object c = heap[child];
int right = child + 1;
if (right < size && comparator.compare((E) c, (E) heap[right]) > 0)
c = heap[child = right];
if (comparator.compare(x, (E) c) <= 0)
break;
heap[k] = c;
k = child;
}
heap[k] = x;
}
boolean removeEq(Object o) {
for (int i = 0; i < size; i++) {
if (o == heap[i]) {
removeAt(i);
return true;
}
}
return false;
}
@Override
public boolean remove(Object o) {
int i = indexOf(o);
if (i == -1)
return false;
else {
removeAt(i);
return true;
}
}
@Override
public boolean containsAll(Collection<?> c) {
Iterator<?> e = c.iterator();
while (e.hasNext())
if (!contains(e.next()))
return false;
return true;
}
@Override
public boolean addAll(Collection<? extends E> c) {
boolean modified = false;
Iterator<? extends E> e = c.iterator();
while (e.hasNext()) {
if (add(e.next()))
modified = true;
}
return modified;
}
@Override
public boolean removeAll(Collection<?> c) {
boolean modified = false;
Iterator<?> e = iterator();
while (e.hasNext()) {
if (c.contains(e.next())) {
e.remove();
modified = true;
}
}
return modified;
}
@Override
public boolean retainAll(Collection<?> c) {
boolean modified = false;
Iterator<E> e = iterator();
while (e.hasNext()) {
if (!c.contains(e.next())) {
e.remove();
modified = true;
}
}
return modified;
}
@Override
public void clear() {
while (poll() != null)
;
}
}

View File

@@ -0,0 +1,6 @@
package io.mycat.backend.mysql.nio.handler.util;
public interface CallBackHandler {
void call() throws Exception;
}

View File

@@ -0,0 +1,339 @@
package io.mycat.backend.mysql.nio.handler.util;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.regex.Pattern;
import org.apache.log4j.Logger;
import com.alibaba.druid.sql.ast.SQLOrderingSpecification;
import io.mycat.backend.mysql.nio.handler.builder.sqlvisitor.MysqlVisitor;
import io.mycat.backend.mysql.nio.handler.query.DMLResponseHandler;
import io.mycat.backend.mysql.nio.handler.query.DMLResponseHandler.HandlerType;
import io.mycat.config.ErrorCode;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.plan.Order;
import io.mycat.plan.common.exception.MySQLOutPutException;
import io.mycat.plan.common.field.Field;
import io.mycat.plan.common.field.FieldUtil;
import io.mycat.plan.common.item.Item;
import io.mycat.plan.common.item.ItemField;
import io.mycat.plan.common.item.ItemInt;
import io.mycat.plan.common.item.ItemRef;
import io.mycat.plan.common.item.function.ItemFunc;
import io.mycat.plan.common.item.function.sumfunc.ItemSum;
public class HandlerTool {
// 两端是单引号,并且中间不允许出现单引号
// private static Pattern pat = Pattern.compile("^\'([^\']*?)\'$");
/**
* 停止以node为根节点的handler树
*
* @param node
*/
public static void terminateHandlerTree(final DMLResponseHandler node) {
try {
if (node == null)
return;
Set<DMLResponseHandler> merges = node.getMerges();
for (DMLResponseHandler merge : merges) {
DMLResponseHandler currentHandler = merge;
while (currentHandler != node) {
currentHandler.terminate();
currentHandler = currentHandler.getNextHandler();
}
}
node.terminate();
} catch (Exception e) {
Logger.getLogger(HandlerTool.class).error("terminate node exception:", e);
}
}
// public static byte[] getEofBytes(MySQLConnection conn) {
// EOFPacket eof = new EOFPacket();
// return eof.toByteBuffer(conn.getCharset()).array();
// }
public static Field createField(FieldPacket fp) {
Field field = Field.getFieldItem(fp.name, fp.table, fp.type, fp.charsetIndex, (int) fp.length, fp.decimals,
fp.flags);
return field;
}
public static List<Field> createFields(List<FieldPacket> fps) {
List<Field> ret = new ArrayList<Field>();
for (FieldPacket fp : fps) {
Field field = createField(fp);
ret.add(field);
}
return ret;
}
/**
* 创建一个Item并且Item内部的对象指向fields中的某个对象当field的实际值改变时Item的value也改变
*
* @param sel
* @param fields
* @param type
* @return
*/
public static Item createItem(Item sel, List<Field> fields, int startIndex, boolean allPushDown, HandlerType type,
String charset) {
Item ret = null;
if (sel.basicConstItem())
return sel;
switch (sel.type()) {
case FUNC_ITEM:
case COND_ITEM:
ItemFunc func = (ItemFunc) sel;
if (func.getPushDownName()==null ||func.getPushDownName().length()==0) {
// 自己计算
ret = createFunctionItem(func, fields, startIndex, allPushDown, type, charset);
} else {
ret = createFieldItem(func, fields, startIndex);
}
break;
case SUM_FUNC_ITEM:
ItemSum sumFunc = (ItemSum) sel;
if (type != HandlerType.GROUPBY) {
ret = createFieldItem(sumFunc, fields, startIndex);
} else if (sumFunc.getPushDownName()==null ||sumFunc.getPushDownName().length()==0) {
ret = createSumItem(sumFunc, fields, startIndex, allPushDown, type, charset);
} else {
ret = createPushDownGroupBy(sumFunc, fields, startIndex);
}
break;
default:
ret = createFieldItem(sel, fields, startIndex);
}
if (ret == null)
throw new MySQLOutPutException(ErrorCode.ER_QUERYHANDLER, "", "item not found:" + sel);
if (ret.getItemName() == null)
ret.setItemName(sel.getPushDownName() == null ? sel.getItemName() : sel.getPushDownName());
ret.fixFields();
return ret;
}
public static Item createRefItem(Item ref, String tbAlias, String fieldAlias) {
return new ItemRef(ref, tbAlias, fieldAlias);
}
/**
* 将field进行复制
*
* @param fields
* @param bs
*/
public static void initFields(List<Field> fields, List<byte[]> bs) {
FieldUtil.initFields(fields, bs);
}
public static List<byte[]> getItemListBytes(List<Item> items) {
List<byte[]> ret = new ArrayList<byte[]>();
for (Item item : items) {
byte[] b = item.getRowPacketByte();
ret.add(b);
}
return ret;
}
public static ItemField createItemField(FieldPacket fp) {
Field field = createField(fp);
return new ItemField(field);
}
/*
* ------------------------------- helper methods ------------------------
*/
/**
* 计算下发的聚合函数 1.count(id) 下发count(id) 之后 count(id) = sum[count(id) 0...n];
* 2.sum(id) sum(id) = sum[sum(id) 0...n]; 3.avg(id) avg(id) = sum[sum(id)
* 0...n]/sum[count(id) 0...n];
*
* @param sumfun
* 聚合函数的名称
* @param fields
* 当前所有行的fields信息
* @return
*/
protected static Item createPushDownGroupBy(ItemSum sumfun, List<Field> fields, int startIndex) {
String funName = sumfun.funcName().toUpperCase();
String colName = sumfun.getItemName();
String pdName = sumfun.getPushDownName();
Item ret = null;
List<Item> args = new ArrayList<Item>();
if (funName.equalsIgnoreCase("AVG")) {
String colNameSum = colName.replace(funName + "(", "SUM(");
String colNameCount = colName.replace(funName + "(", "COUNT(");
Item sumfunSum = new ItemField(null, null, colNameSum);
sumfunSum.setPushDownName(
pdName.replace(MysqlVisitor.getMadeAggAlias(funName), MysqlVisitor.getMadeAggAlias("SUM")));
Item sumfunCount = new ItemField(null, null, colNameCount);
sumfunCount.setPushDownName(
pdName.replace(MysqlVisitor.getMadeAggAlias(funName), MysqlVisitor.getMadeAggAlias("COUNT")));
Item itemSum = createFieldItem(sumfunSum, fields, startIndex);
Item itemCount = createFieldItem(sumfunCount, fields, startIndex);
args.add(itemSum);
args.add(itemCount);
} else if (funName.equalsIgnoreCase("STD") || funName.equalsIgnoreCase("STDDEV_POP")
|| funName.equalsIgnoreCase("STDDEV_SAMP") || funName.equalsIgnoreCase("STDDEV")
|| funName.equalsIgnoreCase("VAR_POP") || funName.equalsIgnoreCase("VAR_SAMP")
|| funName.equalsIgnoreCase("VARIANCE")) {
// variance:下发时 v[0]:count,v[1]:sum,v[2]:variance(局部)
String colNameCount = colName.replace(funName + "(", "COUNT(");
String colNameSum = colName.replace(funName + "(", "SUM(");
String colNameVar = colName.replace(funName + "(", "VARIANCE(");
Item sumfunCount = new ItemField(null, null, colNameCount);
sumfunCount.setPushDownName(
pdName.replace(MysqlVisitor.getMadeAggAlias(funName), MysqlVisitor.getMadeAggAlias("COUNT")));
Item sumfunSum = new ItemField(null, null, colNameSum);
sumfunSum.setPushDownName(
pdName.replace(MysqlVisitor.getMadeAggAlias(funName), MysqlVisitor.getMadeAggAlias("SUM")));
Item sumfunVar = new ItemField(null, null, colNameVar);
sumfunVar.setPushDownName(
pdName.replace(MysqlVisitor.getMadeAggAlias(funName), MysqlVisitor.getMadeAggAlias("VARIANCE")));
Item itemCount = createFieldItem(sumfunCount, fields, startIndex);
Item itemSum = createFieldItem(sumfunSum, fields, startIndex);
Item itemVar = createFieldItem(sumfunVar, fields, startIndex);
args.add(itemCount);
args.add(itemSum);
args.add(itemVar);
} else {
Item subItem = createFieldItem(sumfun, fields, startIndex);
args.add(subItem);
}
ret = sumfun.reStruct(args, true, fields);
ret.setItemName(sumfun.getPushDownName() == null ? sumfun.getItemName() : sumfun.getPushDownName());
return ret;
}
protected static ItemFunc createFunctionItem(ItemFunc f, List<Field> fields, int startIndex, boolean allPushDown,
HandlerType type, String charset) {
ItemFunc ret = null;
List<Item> args = new ArrayList<Item>();
for (int index = 0; index < f.getArgCount(); index++) {
Item arg = f.arguments().get(index);
Item newArg = null;
if (arg.isWild())
newArg = new ItemInt(0);
else
newArg = createItem(arg, fields, startIndex, allPushDown, type, charset);
if (newArg == null)
throw new RuntimeException("Function argument not found:" + arg);
args.add(newArg);
}
ret = (ItemFunc) f.reStruct(args, allPushDown, fields);
ret.setItemName(f.getPushDownName() == null ? f.getItemName() : f.getPushDownName());
return ret;
}
/**
* @param func
* @param fields
* @param startIndex
* @param allPushDown
* @param type
* @param charset
* @return
*/
private static ItemSum createSumItem(ItemSum f, List<Field> fields, int startIndex, boolean allPushDown,
HandlerType type, String charset) {
ItemSum ret = null;
List<Item> args = new ArrayList<Item>();
for (int index = 0; index < f.getArgCount(); index++) {
Item arg = f.arguments().get(index);
Item newArg = null;
if (arg.isWild())
newArg = new ItemInt(0);
else
newArg = createItem(arg, fields, startIndex, allPushDown, type, charset);
if (newArg == null)
throw new RuntimeException("Function argument not found:" + arg);
args.add(newArg);
}
ret = (ItemSum) f.reStruct(args, allPushDown, fields);
ret.setItemName(f.getPushDownName() == null ? f.getItemName() : f.getPushDownName());
return ret;
}
/**
* 查出col对应的field所有的col对象不管是函数还是非函数均当做普通列处理直接比较他们的表名和列名
*
* @param col
* @param fields
* @return
*/
protected static ItemField createFieldItem(Item col, List<Field> fields, int startIndex) {
int index = findField(col, fields, startIndex);
if (index < 0)
throw new MySQLOutPutException(ErrorCode.ER_QUERYHANDLER, "", "field not found:" + col);
ItemField ret = new ItemField(fields.get(index));
ret.setItemName(col.getPushDownName() == null ? col.getItemName() : col.getPushDownName());
return ret;
}
/**
* 查找sel在fields中的对应包含start
*
* @param sel
* @param fields
* @param startIndex
* @param endIndex
* @return
*/
public static int findField(Item sel, List<Field> fields, int startIndex) {
String selName = (sel.getPushDownName() == null ? sel.getItemName() : sel.getPushDownName());
selName = selName.trim();
String tableName = sel.getTableName();
for (int index = startIndex; index < fields.size(); index++) {
Field field = fields.get(index);
// ''下发之后field.name==null
String colName2 = field.name == null ? null : field.name.trim();
String tableName2 = field.table;
if (sel instanceof ItemField && !((tableName==null && tableName2==null)||tableName.equals(tableName2)))
continue;
if (selName.equalsIgnoreCase(colName2))
return index;
}
return -1;
}
/**
* 判断name是否是函数
*
* @param name
* @param func
* @return
*/
public static boolean matchFunc(String name, String func) {
Pattern pt = Pattern.compile("^" + func + "\\(.*", Pattern.CASE_INSENSITIVE);
return pt.matcher(name).matches();
}
/**
* 根据distinct的列生成orderby
*
* @param sels
* @return
*/
public static List<Order> makeOrder(List<Item> sels) {
List<Order> orders = new ArrayList<Order>();
for (Item sel : sels) {
Order order = new Order(sel, SQLOrderingSpecification.ASC);
orders.add(order);
}
return orders;
}
// @bug 1086
public static boolean needSendNoRow(List<Order> groupBys) {
if (groupBys == null || groupBys.size() == 0) {
return true;
} else {
return false;
}
}
}

View File

@@ -0,0 +1,42 @@
package io.mycat.backend.mysql.nio.handler.util;
import io.mycat.backend.mysql.nio.MySQLConnection;
import io.mycat.net.mysql.RowDataPacket;
public class HeapItem {
private byte[] row;
private RowDataPacket rowPacket;
private MySQLConnection hashIndex;
private boolean isNullItem = false;
public static HeapItem NULLITEM() {
HeapItem NULLITEM = new HeapItem(null, null, null);
NULLITEM.isNullItem = true;
return NULLITEM;
}
public boolean IsNullItem() {
if (row == null && isNullItem == true)
return true;
return false;
}
public HeapItem(byte[] row, RowDataPacket rdp, MySQLConnection index) {
this.row = row;
this.rowPacket = rdp;
this.hashIndex = index;
}
public MySQLConnection getIndex() {
return hashIndex;
}
public byte[] getRowData() {
return row;
}
public RowDataPacket getRowPacket() {
return this.rowPacket;
}
}

View File

@@ -0,0 +1,541 @@
package io.mycat.backend.mysql.nio.handler.util;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import io.mycat.util.MinHeap;
@SuppressWarnings("unchecked")
public class RBTMinHeap<E> implements MinHeap<E> {
private static final boolean RED = false;
private static final boolean BLACK = true;
private RBTNode<E> root;
private int size = 0;
private Comparator<E> comparator;
public RBTMinHeap(Comparator<E> comparator) {
this.comparator = comparator;
}
public E find(E e) {
RBTNode<E> node = search(e);
if (node == null)
return null;
return node.value;
}
@Override
public int size() {
return size;
}
@Override
public boolean isEmpty() {
return size() == 0;
}
@Override
public boolean contains(Object o) {
return search((E) o) == null;
}
private RBTNode<E> search(E e) {
RBTNode<E> t = root;
if (t == null)
return t;
while (t != null) {
int cmp = comparator.compare(e, t.value);
if (cmp < 0)
t = t.left;
else if (cmp > 0)
t = t.right;
else
return t;
}
return t;
}
@Override
public Iterator<E> iterator() {
throw new RuntimeException("unsupport iterator in RBTMinHeap");
}
@Override
public Object[] toArray() {
Object[] obj = inOrder();
return Arrays.copyOf(obj, size);
}
private void inOrder(RBTNode<E> node, List<E> list) {
if (node != null) {
inOrder(node.left, list);
list.add(node.getValue());
inOrder(node.right, list);
}
}
private Object[] inOrder() {
List<E> list = new ArrayList<E>(size);
inOrder(root, list);
return list.toArray();
}
@Override
public <T> T[] toArray(T[] a) {
Object[] obj = inOrder();
if (a.length < size)
// Make a new array of a's runtime type, but my contents:
return (T[]) Arrays.copyOf(obj, size, a.getClass());
System.arraycopy(obj, 0, a, 0, size);
if (a.length > size)
a[size] = null;
return a;
}
@Override
public boolean add(E e) {
size++;
RBTNode<E> node = new RBTNode<E>(BLACK, e);
insert(node);
return true;
}
private void insert(RBTNode<E> node) {
if (root == null) {
root = node;
return;
}
int cmp;
RBTNode<E> x = this.root;
RBTNode<E> parent;
do {
parent = x;
cmp = comparator.compare(node.getValue(), x.getValue());
if (cmp < 0)
x = x.left;
else
x = x.right;
} while (x != null);
node.parent = parent;
if (cmp < 0)
parent.left = node;
else
parent.right = node;
fixAfterInsertion(node);
}
private void fixAfterInsertion(RBTNode<E> x) {
x.color = RED;
while (x != null && x != root && x.parent.color == RED) {
// parent is grandparent's left child
if (parentOf(x) == leftOf(parentOf(parentOf(x)))) {
RBTNode<E> y = rightOf(parentOf(parentOf(x)));
// uncle's color is red
if (colorOf(y) == RED) {
setColor(parentOf(x), BLACK);
setColor(y, BLACK);
setColor(parentOf(parentOf(x)), RED);
x = parentOf(parentOf(x));
} else {
if (x == rightOf(parentOf(x))) {
x = parentOf(x);
rotateLeft(x);
}
setColor(parentOf(x), BLACK);
setColor(parentOf(parentOf(x)), RED);
rotateRight(parentOf(parentOf(x)));
}
} else {
RBTNode<E> y = leftOf(parentOf(parentOf(x)));
if (colorOf(y) == RED) {
setColor(parentOf(x), BLACK);
setColor(y, BLACK);
setColor(parentOf(parentOf(x)), RED);
x = parentOf(parentOf(x));
} else {
if (x == leftOf(parentOf(x))) {
x = parentOf(x);
rotateRight(x);
}
setColor(parentOf(x), BLACK);
setColor(parentOf(parentOf(x)), RED);
rotateLeft(parentOf(parentOf(x)));
}
}
}
root.color = BLACK;
}
/**
* <pre>
*
* px px
* / /
* x y
* / \ --(rotate left)-. / \ #
* lx y x ry
* / \ / \
* ly ry lx ly
* </pre>
*
* @param p
*/
private void rotateLeft(RBTNode<E> p) {
if (p != null) {
RBTNode<E> r = p.right;
p.right = r.left;
if (r.left != null)
r.left.parent = p;
r.parent = p.parent;
if (p.parent == null)
root = r;
else if (p.parent.left == p)
p.parent.left = r;
else
p.parent.right = r;
r.left = p;
p.parent = r;
}
}
/**
* <pre>
*
* py py
* / /
* y x
* / \ --(rotate right)-. / \ #
* x ry lx y
* / \ / \ #
* lx rx rx ry
* </pre>
*
* @param p
*/
private void rotateRight(RBTNode<E> p) {
if (p != null) {
RBTNode<E> l = p.left;
p.left = l.right;
if (l.right != null)
l.right.parent = p;
l.parent = p.parent;
if (p.parent == null)
root = l;
else if (p.parent.right == p)
p.parent.right = l;
else
p.parent.left = l;
l.right = p;
p.parent = l;
}
}
private boolean colorOf(RBTNode<E> node) {
return (node == null ? BLACK : node.color);
}
private RBTNode<E> parentOf(RBTNode<E> node) {
return (node == null ? null : node.parent);
}
private void setColor(RBTNode<E> node, boolean c) {
if (node != null)
node.color = c;
}
private RBTNode<E> leftOf(RBTNode<E> node) {
return (node == null) ? null : node.left;
}
private RBTNode<E> rightOf(RBTNode<E> node) {
return (node == null) ? null : node.right;
}
/**
* unused current version
*/
@Override
public E peak() {
RBTNode<E> minNode = findMin(root);
if (minNode == null)
return null;
E e = minNode.value;
return e;
}
/**
* need optimizer, unused current version
*/
@Override
public void replaceTop(E e) {
// find minNode
RBTNode<E> minNode = findMin(root);
if (minNode == null)
return;
// delete minNode
delete(minNode);
// add minNode
RBTNode<E> node = new RBTNode<E>(BLACK, e);
insert(node);
}
@Override
public E poll() {
RBTNode<E> minNode = findMin(root);
if (minNode == null)
return null;
size--;
E e = minNode.value;
delete(minNode);
return e;
}
private RBTNode<E> findMin(RBTNode<E> node) {
if (node == null)
return null;
while (node.left != null) {
node = node.left;
}
return node;
}
@Override
public boolean remove(Object o) {
size--;
RBTNode<E> node = search((E) o);
if (node != null)
delete(node);
return true;
}
/**
* find the minimum node which value >= t.value
*
* @param t
* @return
*/
private RBTNode<E> successor(RBTNode<E> t) {
if (t == null)
return null;
if (t.right != null) {
RBTNode<E> p = t.right;
while (p.left != null)
p = p.left;
return p;
}
RBTNode<E> p = t.parent;
RBTNode<E> ch = t;
// only child is parent's left node can return parent
while (p != null && ch == p.right) {
ch = p;
p = p.parent;
}
return p;
}
private void delete(RBTNode<E> node) {
// If strictly internal, copy successor's element to node and then make
// p
// point to successor.
if (node.left != null && node.right != null) {
RBTNode<E> s = successor(node);
node.value = s.value;
node = s;
} // node has 2 children
// Start fixup at replacement node, if it exists.
RBTNode<E> replacement = (node.left != null ? node.left : node.right);
if (replacement != null) {
// Link replacement to parent
replacement.parent = node.parent;
if (node.parent == null)
root = replacement;
else if (node == node.parent.left)
node.parent.left = replacement;
else
node.parent.right = replacement;
// Null out links so they are OK to use by fixAfterDeletion.
node.left = node.right = node.parent = null;
// Fix replacement
if (node.color == BLACK)
fixAfterDeletion(replacement);
} else if (node.parent == null) { // return if we are the only node.
root = null;
} else { // No children. Use self as phantom replacement and unlink.
if (node.color == BLACK)
fixAfterDeletion(node);
if (node.parent != null) {
if (node == node.parent.left)
node.parent.left = null;
else if (node == node.parent.right)
node.parent.right = null;
node.parent = null;
}
}
}
private void fixAfterDeletion(RBTNode<E> x) {
while (x != root && colorOf(x) == BLACK) {
if (x == leftOf(parentOf(x))) {
RBTNode<E> sib = rightOf(parentOf(x));
if (colorOf(sib) == RED) {
setColor(sib, BLACK);
setColor(parentOf(x), RED);
rotateLeft(parentOf(x));
sib = rightOf(parentOf(x));
}
if (colorOf(leftOf(sib)) == BLACK
&& colorOf(rightOf(sib)) == BLACK) {
setColor(sib, RED);
x = parentOf(x);
} else {
if (colorOf(rightOf(sib)) == BLACK) {
setColor(leftOf(sib), BLACK);
setColor(sib, RED);
rotateRight(sib);
sib = rightOf(parentOf(x));
}
setColor(sib, colorOf(parentOf(x)));
setColor(parentOf(x), BLACK);
setColor(rightOf(sib), BLACK);
rotateLeft(parentOf(x));
x = root;
}
} else { // symmetric
RBTNode<E> sib = leftOf(parentOf(x));
if (colorOf(sib) == RED) {
setColor(sib, BLACK);
setColor(parentOf(x), RED);
rotateRight(parentOf(x));
sib = leftOf(parentOf(x));
}
if (colorOf(rightOf(sib)) == BLACK
&& colorOf(leftOf(sib)) == BLACK) {
setColor(sib, RED);
x = parentOf(x);
} else {
if (colorOf(leftOf(sib)) == BLACK) {
setColor(rightOf(sib), BLACK);
setColor(sib, RED);
rotateLeft(sib);
sib = leftOf(parentOf(x));
}
setColor(sib, colorOf(parentOf(x)));
setColor(parentOf(x), BLACK);
setColor(leftOf(sib), BLACK);
rotateRight(parentOf(x));
x = root;
}
}
}
setColor(x, BLACK);
}
@Override
public boolean containsAll(Collection<?> c) {
Iterator<?> e = c.iterator();
while (e.hasNext())
if (!contains(e.next()))
return false;
return true;
}
@Override
public boolean addAll(Collection<? extends E> c) {
boolean modified = false;
Iterator<? extends E> e = c.iterator();
while (e.hasNext()) {
if (add(e.next()))
modified = true;
}
return modified;
}
@Override
public boolean removeAll(Collection<?> c) {
boolean modified = false;
Iterator<?> e = iterator();
while (e.hasNext()) {
if (c.contains(e.next())) {
e.remove();
modified = true;
}
}
return modified;
}
@Override
public boolean retainAll(Collection<?> c) {
boolean modified = false;
Iterator<E> e = iterator();
while (e.hasNext()) {
if (!c.contains(e.next())) {
e.remove();
modified = true;
}
}
return modified;
}
@Override
public void clear() {
destory(root);
root = null;
size = 0;
}
private void destory(RBTNode<E> node) {
if (node == null)
return;
if (node.left != null) {
destory(node.left);
node.left = null;
}
if (node.right != null) {
destory(node.right);
node.right = null;
}
node.parent = null;
node.value = null;
}
static class RBTNode<E> {
private boolean color;
private E value;
private RBTNode<E> left;
private RBTNode<E> right;
private RBTNode<E> parent;
public RBTNode(boolean color, E value) {
this.color = color;
this.value = value;
}
public E getValue() {
return value;
}
}
}

View File

@@ -0,0 +1,112 @@
package io.mycat.backend.mysql.nio.handler.util;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import com.alibaba.druid.sql.ast.SQLOrderingSpecification;
import io.mycat.backend.mysql.nio.handler.query.DMLResponseHandler.HandlerType;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.plan.Order;
import io.mycat.plan.common.field.Field;
import io.mycat.plan.common.item.Item;
/**
* 根据OrderBy的item list进行行数据排序的比较器
*
*
*/
public class RowDataComparator implements Comparator<RowDataPacket> {
private List<Field> sourceFields;
private List<Item> cmpItems;
private List<Field> cmpFields;
private List<Boolean> ascs;
// only for test unit
public RowDataComparator(List<Field> sourceFields, List<Item> cmpItems, List<Field> cmpFields, List<Boolean> ascs,
String charset) {
this.sourceFields = sourceFields;
this.cmpItems = cmpItems;
this.cmpFields = cmpFields;
this.ascs = ascs;
}
public RowDataComparator(List<FieldPacket> fps, List<Order> orders, boolean allPushDown, HandlerType type,
String charset) {
sourceFields = HandlerTool.createFields(fps);
if (orders != null && orders.size() > 0) {
ascs = new ArrayList<Boolean>();
cmpFields = new ArrayList<Field>();
cmpItems = new ArrayList<Item>();
for (Order order : orders) {
Item cmpItem = HandlerTool.createItem(order.getItem(), sourceFields, 0, allPushDown, type, charset);
cmpItems.add(cmpItem);
FieldPacket tmpFp = new FieldPacket();
cmpItem.makeField(tmpFp);
Field cmpField = HandlerTool.createField(tmpFp);
cmpFields.add(cmpField);
ascs.add(order.getSortOrder() == SQLOrderingSpecification.ASC ? true : false);
}
}
}
public int getSourceFieldCount() {
return sourceFields.size();
}
public void sort(List<RowDataPacket> rows) {
Comparator<RowDataPacket> c = new Comparator<RowDataPacket>() {
@Override
public int compare(RowDataPacket o1, RowDataPacket o2) {
if (RowDataComparator.this.ascs != null && RowDataComparator.this.ascs.size() > 0)
return RowDataComparator.this.compare(o1, o2);
else
// 无须比较,按照原始的数据输出
return -1;
}
};
Collections.sort(rows, c);
}
@Override
/**
* 传递进来的是源生行的row数据
*/
public int compare(RowDataPacket o1, RowDataPacket o2) {
if (this.ascs != null && this.ascs.size() > 0) {
int cmpValue = cmp(o1, o2, 0);
return cmpValue;
} else
// 无须比较,按照原始的数据输出
return 0;
}
private int cmp(RowDataPacket o1, RowDataPacket o2, int index) {
HandlerTool.initFields(sourceFields, o1.fieldValues);
List<byte[]> bo1 = HandlerTool.getItemListBytes(cmpItems);
HandlerTool.initFields(sourceFields, o2.fieldValues);
List<byte[]> bo2 = HandlerTool.getItemListBytes(cmpItems);
boolean isAsc = ascs.get(index);
Field field = cmpFields.get(index);
byte[] b1 = bo1.get(index);
byte[] b2 = bo2.get(index);
int rs;
if (isAsc) {
rs = field.compare(b1, b2);
} else {
rs = field.compare(b2, b1);
}
if (rs != 0 || cmpFields.size() == (index + 1)) {
return rs;
} else {
return cmp(o1, o2, index + 1);
}
}
}

View File

@@ -0,0 +1,89 @@
package io.mycat.backend.mysql.nio.handler.util;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import com.alibaba.druid.sql.ast.SQLOrderingSpecification;
import io.mycat.backend.mysql.nio.handler.query.DMLResponseHandler.HandlerType;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.plan.Order;
import io.mycat.plan.common.field.Field;
import io.mycat.plan.common.item.Item;
import io.mycat.plan.common.item.function.operator.cmpfunc.util.ArgComparator;
/**
* _2TableComparator和RowDataComparator的区别在于join比较的两列有可能是不同的类型比如一个是整数 一个是字符串等等
*
* @author chenzifei
*
*/
public class TwoTableComparator implements Comparator<RowDataPacket> {
/* 用来存放左右原始数据值的容器item计算前更新 */
private List<Field> leftFields;
private List<Field> rightFields;
/* 左右compare的item对象 */
private List<Item> leftCmpItems;
private List<Item> rightCmpItems;
/* 左右排序规则,必定相同,所以只保留一份 */
private List<ArgComparator> cmptors;
private List<Boolean> ascs;
private HandlerType type;
private boolean isAllPushDown;
public TwoTableComparator(List<FieldPacket> fps1, List<FieldPacket> fps2, List<Order> leftOrders,
List<Order> rightOrders, boolean isAllPushDown, HandlerType type, String charset) {
this.isAllPushDown = isAllPushDown;
this.type = type;
this.leftFields = HandlerTool.createFields(fps1);
this.rightFields = HandlerTool.createFields(fps2);
ascs = new ArrayList<Boolean>();
for (Order order : leftOrders) {
ascs.add(order.getSortOrder() == SQLOrderingSpecification.ASC ? true : false);
}
leftCmpItems = new ArrayList<Item>();
rightCmpItems = new ArrayList<Item>();
cmptors = new ArrayList<ArgComparator>();
for (int index = 0; index < ascs.size(); index++) {
Order leftOrder = leftOrders.get(index);
Order rightOrder = rightOrders.get(index);
Item leftCmpItem = HandlerTool.createItem(leftOrder.getItem(), leftFields, 0, this.isAllPushDown, this.type,
charset);
leftCmpItems.add(leftCmpItem);
Item rightCmpItem = HandlerTool.createItem(rightOrder.getItem(), rightFields, 0, this.isAllPushDown,
this.type, charset);
rightCmpItems.add(rightCmpItem);
ArgComparator cmptor = new ArgComparator(leftCmpItem, rightCmpItem);
cmptor.setCmpFunc(null, leftCmpItem, rightCmpItem, false);
cmptors.add(cmptor);
}
}
@Override
public int compare(RowDataPacket o1, RowDataPacket o2) {
if (ascs == null || ascs.size() == 0) // no join column, all same
return 0;
return compareRecursion(o1, o2, 0);
}
private int compareRecursion(RowDataPacket o1, RowDataPacket o2, int i) {
HandlerTool.initFields(leftFields, o1.fieldValues);
HandlerTool.initFields(rightFields, o2.fieldValues);
ArgComparator cmptor = cmptors.get(i);
boolean isAsc = ascs.get(i);
int rs;
if (isAsc) {
rs = cmptor.compare();
} else {
rs = -cmptor.compare();
}
if (rs != 0 || ascs.size() == (i + 1)) {
return rs;
} else {
return compareRecursion(o1, o2, i + 1);
}
}
}

View File

@@ -0,0 +1,74 @@
package io.mycat.backend.mysql.store;
import io.mycat.backend.mysql.nio.handler.util.RowDataComparator;
import io.mycat.backend.mysql.store.diskbuffer.DistinctResultDiskBuffer;
import io.mycat.backend.mysql.store.result.ResultExternal;
import io.mycat.buffer.BufferPool;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.util.RBTreeList;
/**
* localresult to distinct input rows
*
* @author chenzifei
*
*/
public class DistinctLocalResult extends LocalResult {
private RowDataComparator distinctCmp;
/**
*
* @param initialCapacity
* @param fieldsCount
* @param pool
* @param rowcmp
* distinct selectable compator
*/
public DistinctLocalResult(int initialCapacity, int fieldsCount, BufferPool pool, RowDataComparator distinctCmp,
String charset) {
super(initialCapacity, fieldsCount, pool, charset);
this.distinctCmp = distinctCmp;
this.rows = new RBTreeList<RowDataPacket>(initialCapacity, distinctCmp);
}
public DistinctLocalResult(BufferPool pool, int fieldsCount, RowDataComparator distinctCmp, String charset) {
this(DEFAULT_INITIAL_CAPACITY, fieldsCount, pool, distinctCmp, charset);
}
@Override
protected ResultExternal makeExternal() {
return new DistinctResultDiskBuffer(pool, fieldsCount, distinctCmp, charset);
}
/**
* add a row into distinct localresult,if rows.contains(row),do not add
*
* @param row
*/
@Override
public void add(RowDataPacket row) {
lock.lock();
try {
if (isClosed)
return;
int index = rows.indexOf(row);
if (index >= 0)
return;
super.add(row);
} finally {
lock.unlock();
}
}
@Override
protected void doneOnlyMemory() {
// Collections.sort(rows, this.distinctCmp);
}
@Override
protected void beforeFlushRows() {
// rbtree.toarray() is sorted,so do not need to sort again
}
}

View File

@@ -0,0 +1,56 @@
package io.mycat.backend.mysql.store;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.log4j.Logger;
import io.mycat.MycatServer;
import io.mycat.memory.environment.Hardware;
public class FileCounter {
private static final Logger logger = Logger.getLogger(FileCounter.class);
private static FileCounter fileCounter = new FileCounter();
private final Lock lock;
private final int maxFileSize;
private int currentNum;
private FileCounter() {
this.lock = new ReentrantLock();
long totalMem = Hardware.getSizeOfPhysicalMemory();
long freeMem = Hardware.getFreeSizeOfPhysicalMemoryForLinux();
long currentMem = Math.min(totalMem / 2, freeMem);
this.maxFileSize = (int)(currentMem / (MycatServer.getInstance().getConfig().getSystem().getMappedFileSize() / 1024));
logger.info("current mem is " + currentMem + "kb. max file size is " + maxFileSize);
this.currentNum = 0;
}
public static FileCounter getInstance() {
return fileCounter;
}
public boolean increament() {
lock.lock();
try {
if (this.currentNum >= maxFileSize)
return false;
this.currentNum++;
return true;
} finally {
lock.unlock();
}
}
public boolean decrement() {
lock.lock();
try {
if (this.currentNum <= 0)
return false;
this.currentNum--;
return true;
} finally {
lock.unlock();
}
}
}

View File

@@ -0,0 +1,386 @@
package io.mycat.backend.mysql.store;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.util.ArrayList;
import java.util.List;
import org.apache.log4j.Logger;
import io.mycat.MycatServer;
import io.mycat.backend.mysql.store.fs.FilePath;
import io.mycat.backend.mysql.store.fs.FileUtils;
import io.mycat.config.ErrorCode;
import io.mycat.util.exception.TmpFileException;
public class FileStore {
private static final String SUFFIX_TEMP_FILE = ".temp.db";
private static Logger logger = Logger.getLogger(FileStore.class);
/**
* The file path name.
*/
protected String name;
private List<String> fileNames;
private List<FileChannel> files;
private long filePos;
private long fileLength;
private final String mode;
private final int mappedFileSize;
private List<FileLock> locks;
/**
* Create a new file using the given settings.
*
* @param handler
* the callback object
* @param name
* the file name
* @param mode
* the access mode ("r", "rw", "rws", "rwd")
*/
public FileStore(String name, String mode) {
this.name = name;
this.fileNames = new ArrayList<String>();
this.files = new ArrayList<FileChannel>();
this.locks = new ArrayList<FileLock>();
this.mode = mode;
this.mappedFileSize = MycatServer.getInstance().getConfig().getSystem().getMappedFileSize();
try {
createFile();
} catch (IOException e) {
throw TmpFileException.get(ErrorCode.ER_FILE_INIT, e, name);
}
}
/**
* Close the file.
*/
public void close() {
if (!this.files.isEmpty()) {
for (FileChannel file : this.files) {
try {
file.close();
} catch (IOException e) {
logger.warn("close file error :", e);
} finally {
// QUESTION_TODO if IOException,memory is release or not
FileCounter.getInstance().decrement();
}
}
this.files.clear();
}
}
/**
* Close the file without throwing any exceptions. Exceptions are simply
* ignored.
*/
public void closeSilently() {
try {
close();
} catch (Exception e) {
// ignore
}
}
public void delete() {
if (!this.fileNames.isEmpty()) {
try {
for (int i = 0; i < this.fileNames.size(); i++) {
String fileName = fileNames.get(i);
FileUtils.tryDelete(fileName);
}
} finally {
this.fileNames.clear();
}
}
}
/**
* Close the file (ignoring exceptions) and delete the file.
*/
public void closeAndDeleteSilently() {
if (!this.files.isEmpty()) {
closeSilently();
delete();
}
name = null;
}
/**
* Read a number of bytes without decrypting.
*
* @param b
* the target buffer
* @param off
* the offset
* @param len
* the number of bytes to read
*/
protected void readFullyDirect(byte[] b, int off, int len) {
readFully(b, off, len);
}
/**
* Read a number of bytes.
*
* @param b
* the target buffer
* @param off
* the offset
* @param len
* the number of bytes to read
*/
public void readFully(byte[] b, int off, int len) {
ByteBuffer buffer = ByteBuffer.wrap(b, off, len);
read(buffer);
}
private int read(ByteBuffer buffer) {
int len = 0;
try {
do {
int index = (int) (filePos / mappedFileSize);
long offset = filePos % mappedFileSize;
if (index > files.size() - 1)
throw TmpFileException.get(ErrorCode.ER_FILE_READ, name);
files.get(index).position(offset);
int r = files.get(index).read(buffer);
len += r;
filePos += r;
if (filePos >= fileLength - 1)
break;
} while (buffer.hasRemaining());
} catch (IOException e) {
throw TmpFileException.get(ErrorCode.ER_FILE_READ, e, name);
}
return len;
}
public int read(ByteBuffer buffer, long endPos) {
long remained = endPos - filePos;
if (remained <= 0)
return 0;
if (remained < buffer.remaining()) {
int newLimit = (int) (buffer.position() + remained);
buffer.limit(newLimit);
}
return read(buffer);
}
/**
* Go to the specified file location.
*
* @param pos
* the location
*/
public void seek(long pos) {
filePos = pos;
}
/**
* Write a number of bytes without encrypting.
*
* @param b
* the source buffer
* @param off
* the offset
* @param len
* the number of bytes to write
*/
protected void writeDirect(byte[] b, int off, int len) {
write(b, off, len);
}
/**
* Write a number of bytes.
*
* @param b
* the source buffer
* @param off
* the offset
* @param len
* the number of bytes to write
*/
public void write(byte[] b, int off, int len) {
write(ByteBuffer.wrap(b, off, len));
}
public void write(ByteBuffer buffer) {
try {
do {
int index = (int) (filePos / mappedFileSize);
if (index > files.size() - 1) {
createFile();
}
long offset = filePos % mappedFileSize;
files.get(index).position(offset);
int w = files.get(index).write(buffer);
filePos += w;
} while (buffer.remaining() > 0);
} catch (IOException e) {
closeFileSilently();
throw TmpFileException.get(ErrorCode.ER_FILE_WRITE, e, name);
}
fileLength = Math.max(filePos, fileLength);
}
private void createFile() throws IOException {
String newName = name;
int index = newName.indexOf(':');
String scheme = newName.substring(0, index);
if (!FileCounter.getInstance().increament() && "nioMapped".equals(scheme)) {
newName = "nio:AresDisk";
}
try {
FilePath path = FilePath.get(newName).createTempFile(SUFFIX_TEMP_FILE, true);
this.fileNames.add(path.toString());
this.files.add(path.open(mode));
} catch (IOException e) {
if (e.getCause() instanceof OutOfMemoryError) {
logger.info("no memory to mapped file,change to disk file");
// memory is used by other user
FileCounter.getInstance().decrement();
newName = "nio:AresDisk";
FilePath path = FilePath.get(newName).createTempFile(SUFFIX_TEMP_FILE, true);
this.files.add(path.open(mode));
this.fileNames.add(path.toString());
} else {
logger.warn("create file error :", e);
throw e;
}
}
}
/**
* Get the file size in bytes.
*
* @return the file size
*/
public long length() {
return fileLength;
}
/**
* Get the current location of the file pointer.
*
* @return the location
*/
public long getFilePointer() {
return filePos;
}
public void force(boolean metaData) {
try {
for (FileChannel file : this.files) {
file.force(metaData);
}
} catch (IOException e) {
closeFileSilently();
throw TmpFileException.get(ErrorCode.ER_FILE_FORCE, e, name);
}
}
/**
* Call fsync. Depending on the operating system and hardware, this may or
* may not in fact write the changes.
*/
public void sync() {
try {
for (FileChannel file : this.files) {
file.force(true);
}
} catch (IOException e) {
closeFileSilently();
throw TmpFileException.get(ErrorCode.ER_FILE_SYNC, e, name);
}
}
/**
* Close the file.
*/
public void closeFile() throws IOException {
for (FileChannel file : this.files) {
file.close();
}
}
/**
* Just close the file, without setting the reference to null. This method
* is called when writing failed. The reference is not set to null so that
* there are no NullPointerExceptions later on.
*/
private void closeFileSilently() {
try {
closeFile();
} catch (IOException e) {
// ignore
}
}
/**
* Re-open the file. The file pointer will be reset to the previous
* location.
*/
public void openFile() throws IOException {
if (this.files.isEmpty()) {
for (String fileName : fileNames) {
this.files.add(FilePath.get(fileName).open(mode));
}
}
}
/**
* Try to lock the file.
*
* @return true if successful
*/
public synchronized boolean tryLock() {
boolean isLocked = true;
try {
for (FileChannel file : this.files) {
FileLock lock = file.tryLock();
if (lock == null) {
isLocked = false;
break;
}
locks.add(lock);
}
} catch (Exception e) {
// ignore OverlappingFileLockException
return false;
} finally {
if (!isLocked) {
for (FileLock lock : locks) {
try {
lock.release();
} catch (IOException e) {
// ignore
}
}
}
}
return isLocked;
}
/**
* Release the file lock.
*/
public synchronized void releaseLock() {
if (!files.isEmpty() && !locks.isEmpty()) {
for (FileLock lock : locks) {
try {
lock.release();
} catch (IOException e) {
// ignore
}
}
locks.clear();
}
}
}

View File

@@ -0,0 +1,188 @@
package io.mycat.backend.mysql.store;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import io.mycat.backend.mysql.nio.handler.query.DMLResponseHandler.HandlerType;
import io.mycat.backend.mysql.nio.handler.query.impl.groupby.directgroupby.DGRowPacket;
import io.mycat.backend.mysql.nio.handler.util.HandlerTool;
import io.mycat.backend.mysql.nio.handler.util.RowDataComparator;
import io.mycat.backend.mysql.store.diskbuffer.GroupResultDiskBuffer;
import io.mycat.backend.mysql.store.result.ResultExternal;
import io.mycat.buffer.BufferPool;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.plan.common.field.Field;
import io.mycat.plan.common.item.function.sumfunc.Aggregator.AggregatorType;
import io.mycat.plan.common.item.function.sumfunc.ItemSum;
import io.mycat.util.RBTreeList;
/**
* groupby is some part like distinct,but it should group by some value when add
* a row
*
*/
public class GroupByLocalResult extends LocalResult {
private RowDataComparator groupCmp;
/**
* the packets contains sums
*/
private List<FieldPacket> fieldPackets;
private List<ItemSum> sumFunctions;
private boolean isAllPushDown;
/**
* store the origin row fields,(already contains the item_sum fields in
* rowpackets we should calculate the item_sums again when next() is
* called!)
*/
private final List<Field> fields;
private final List<ItemSum> sums;
/**
*
* @param pool
* @param fieldsCount
* fieldsCount contains sums
* @param groupCmp
* @param fieldPackets
* fieldPackets contains sums
* @param sumFunctions
* @param isAllPushDown
*/
public GroupByLocalResult(BufferPool pool, int fieldsCount, RowDataComparator groupCmp,
List<FieldPacket> fieldPackets, List<ItemSum> sumFunctions, boolean isAllPushDown, String charset) {
this(DEFAULT_INITIAL_CAPACITY, fieldsCount, pool, groupCmp, fieldPackets, sumFunctions, isAllPushDown, charset);
}
public GroupByLocalResult(int initialCapacity, int fieldsCount, BufferPool pool, RowDataComparator groupCmp,
List<FieldPacket> fieldPackets, List<ItemSum> sumFunctions, boolean isAllPushDown, String charset) {
super(initialCapacity, fieldsCount, pool, charset);
this.groupCmp = groupCmp;
this.fieldPackets = fieldPackets;
this.sumFunctions = sumFunctions;
this.isAllPushDown = isAllPushDown;
this.rows = new RBTreeList<RowDataPacket>(initialCapacity, groupCmp);
/* init item_sums */
this.fields = HandlerTool.createFields(fieldPackets);
this.sums = new ArrayList<ItemSum>();
for (ItemSum sumFunc : sumFunctions) {
ItemSum sum = (ItemSum) (HandlerTool.createItem(sumFunc, this.fields, 0, this.isAllPushDown,
HandlerType.GROUPBY, charset));
this.sums.add(sum);
}
prepare_sum_aggregators(this.sums, true);
}
/* should group sumfunctions when find a row in rows */
@Override
public void add(RowDataPacket row) {
lock.lock();
try {
if (isClosed)
return;
int index = rows.indexOf(row);
int increSize = 0;
if (index >= 0)// find
{
RowDataPacket oldRow = rows.get(index);
int oldRowSizeBefore = getRowMemory(oldRow);
onFoundRow(oldRow, row);
int oldRowSizeAfter = getRowMemory(oldRow);
increSize = oldRowSizeAfter - oldRowSizeBefore;
} else {
onFirstGroupRow(row);
rows.add(row);
rowCount++;
increSize = getRowMemory(row);
}
currentMemory += increSize;
boolean needFlush = false;
if (bufferMC != null) {
if (bufferMC.addSize(increSize) != true) {
needFlush = true;
}
}
else if (!needFlush && currentMemory > maxMemory) {
needFlush = true;
}
if (needFlush) {
if (external == null)
external = makeExternal();
addRowsToDisk();
}
} finally {
lock.unlock();
}
}
@Override
protected ResultExternal makeExternal() {
return new GroupResultDiskBuffer(pool, fieldsCount, groupCmp, fieldPackets, sumFunctions, isAllPushDown,
charset);
}
@Override
protected void doneOnlyMemory() {
Collections.sort(rows, this.groupCmp);
}
@Override
protected void beforeFlushRows() {
// rbtree.toarray() is sorted,so do not need to sort again
}
protected void onFoundRow(RowDataPacket oldRow, RowDataPacket row) {
// we need to calculate group by
init_sum_functions(this.sums, oldRow);
update_sum_func(this.sums, row);
for (int i = 0; i < this.sums.size(); i++) {
ItemSum sum = this.sums.get(i);
Object b = sum.getTransAggObj();
int transSize = sum.getTransSize();
((DGRowPacket) oldRow).setSumTran(i, b, transSize);
}
}
protected void onFirstGroupRow(RowDataPacket row) {
// we need to calculate group by
init_sum_functions(this.sums, row);
for (int i = 0; i < this.sums.size(); i++) {
ItemSum sum = this.sums.get(i);
Object b = sum.getTransAggObj();
int transSize = sum.getTransSize();
((DGRowPacket) row).setSumTran(i, b, transSize);
}
}
/**
* see Sql_executor.cc
*
* @return
*/
protected void prepare_sum_aggregators(List<ItemSum> funcs, boolean need_distinct) {
for (ItemSum func : funcs) {
func.setAggregator(need_distinct && func.has_with_distinct()
? AggregatorType.DISTINCT_AGGREGATOR : AggregatorType.SIMPLE_AGGREGATOR,
null);
}
}
protected void init_sum_functions(List<ItemSum> funcs, RowDataPacket row) {
for (int i = 0; i < funcs.size(); i++) {
ItemSum sum = funcs.get(i);
Object transObj = ((DGRowPacket) row).getSumTran(i);
sum.resetAndAdd(row, transObj);
}
}
protected void update_sum_func(List<ItemSum> funcs, RowDataPacket row) {
for (int index = 0; index < funcs.size(); index++) {
ItemSum sum = funcs.get(index);
Object transObj = ((DGRowPacket) row).getSumTran(index);
sum.aggregatorAdd(row, transObj);
}
}
}

View File

@@ -0,0 +1,221 @@
package io.mycat.backend.mysql.store;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import io.mycat.backend.mysql.store.memalloc.MemSizeController;
import io.mycat.backend.mysql.store.result.ResultExternal;
import io.mycat.buffer.BufferPool;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.plan.common.external.ResultStore;
public abstract class LocalResult implements ResultStore {
protected static final int DEFAULT_INITIAL_CAPACITY = 1024;
protected final int fieldsCount;
protected int maxMemory = 262144;
protected BufferPool pool;
protected List<RowDataPacket> rows;
protected ResultExternal external;
protected int rowId, rowCount;
protected int currentMemory;
protected RowDataPacket currentRow;
protected RowDataPacket lastRow;
protected boolean isClosed;
protected Lock lock;
/* @bug 1208 */
protected String charset = "UTF-8";
protected MemSizeController bufferMC;
public LocalResult(int initialCapacity, int fieldsCount, BufferPool pool, String charset) {
this.rows = new ArrayList<RowDataPacket>(initialCapacity);
this.fieldsCount = fieldsCount;
this.pool = pool;
init();
this.isClosed = false;
this.lock = new ReentrantLock();
this.charset = charset;
}
/**
* add a row into localresult
*
* @param row
*/
public void add(RowDataPacket row) {
lock.lock();
try {
if (this.isClosed)
return;
lastRow = row;
rows.add(row);
rowCount++;
int increSize = getRowMemory(row);
currentMemory += increSize;
boolean needFlush = false;
if (bufferMC != null) {
if (bufferMC.addSize(increSize) != true) {
needFlush = true;
}
}
else if (!needFlush && currentMemory > maxMemory) {
needFlush = true;
}
if (needFlush) {
if (external == null)
external = makeExternal();
addRowsToDisk();
}
} finally {
lock.unlock();
}
}
protected abstract ResultExternal makeExternal();
public RowDataPacket currentRow() {
return currentRow;
}
public RowDataPacket getLastRow() {
return lastRow;
}
public int getRowCount() {
return rowCount;
}
public int getRowId() {
return rowId;
}
/**
*
* @return next row
*/
public RowDataPacket next() {
lock.lock();
try {
if (this.isClosed)
return null;
if (++rowId < rowCount) {
if (external != null) {
currentRow = external.next();
} else {
currentRow = rows.get(rowId);
}
} else {
currentRow = null;
}
return currentRow;
} finally {
lock.unlock();
}
}
/**
* This method is called after all rows have been added.
*/
public void done() {
lock.lock();
try {
if (this.isClosed)
return;
if (external == null)
doneOnlyMemory();
else {
if (!rows.isEmpty())
addRowsToDisk();
external.done();
}
reset();
} finally {
lock.unlock();
}
}
protected abstract void doneOnlyMemory();
public void reset() {
lock.lock();
try {
rowId = -1;
if (external != null) {
external.reset();
}
} finally {
lock.unlock();
}
}
@Override
public void close() {
lock.lock();
try {
if (this.isClosed)
return;
this.isClosed = true;
rows.clear();
if (bufferMC != null)
bufferMC.subSize(currentMemory);
rows = null;
if (external != null) {
external.close();
external = null;
}
} finally {
lock.unlock();
}
}
@Override
public void clear() {
lock.lock();
try {
rows.clear();
if (bufferMC != null)
bufferMC.subSize(currentMemory);
init();
if (external != null) {
external.close();
external = null;
}
} finally {
lock.unlock();
}
}
protected final void addRowsToDisk() {
beforeFlushRows();
rowCount = external.addRows(rows);
rows.clear();
if (bufferMC != null)
bufferMC.subSize(currentMemory);
currentMemory = 0;
}
/**
* job to do before flush rows into disk
*/
protected abstract void beforeFlushRows();
protected int getRowMemory(RowDataPacket row) {
return row.calcPacketSize();
}
private void init() {
this.rowId = -1;
this.rowCount = 0;
this.currentMemory = 0;
this.currentRow = null;
this.lastRow = null;
}
public LocalResult setMemSizeController(MemSizeController bufferMC) {
this.bufferMC = bufferMC;
return this;
}
}

View File

@@ -0,0 +1,39 @@
package io.mycat.backend.mysql.store;
import java.util.Collections;
import io.mycat.backend.mysql.nio.handler.util.RowDataComparator;
import io.mycat.backend.mysql.store.diskbuffer.SortedResultDiskBuffer;
import io.mycat.backend.mysql.store.result.ResultExternal;
import io.mycat.buffer.BufferPool;
public class SortedLocalResult extends LocalResult {
protected RowDataComparator rowcmp;
public SortedLocalResult(BufferPool pool, int fieldsCount, RowDataComparator rowcmp, String charset) {
this(DEFAULT_INITIAL_CAPACITY, fieldsCount, pool, rowcmp, charset);
}
public SortedLocalResult(int initialCapacity, int fieldsCount, BufferPool pool, RowDataComparator rowcmp,
String charset) {
super(initialCapacity, fieldsCount, pool, charset);
this.rowcmp = rowcmp;
}
@Override
protected ResultExternal makeExternal() {
return new SortedResultDiskBuffer(pool, fieldsCount, rowcmp, charset);
}
@Override
protected void beforeFlushRows() {
Collections.sort(rows, this.rowcmp);
}
@Override
protected void doneOnlyMemory() {
Collections.sort(rows, this.rowcmp);
}
}

View File

@@ -0,0 +1,32 @@
package io.mycat.backend.mysql.store;
import io.mycat.backend.mysql.store.diskbuffer.UnSortedResultDiskBuffer;
import io.mycat.backend.mysql.store.result.ResultExternal;
import io.mycat.buffer.BufferPool;
public class UnSortedLocalResult extends LocalResult {
public UnSortedLocalResult(int fieldsCount, BufferPool pool, String charset) {
this(DEFAULT_INITIAL_CAPACITY, fieldsCount, pool, charset);
}
public UnSortedLocalResult(int initialCapacity, int fieldsCount, BufferPool pool, String charset) {
super(initialCapacity, fieldsCount, pool, charset);
}
@Override
protected ResultExternal makeExternal() {
return new UnSortedResultDiskBuffer(pool, fieldsCount, charset);
}
@Override
protected void beforeFlushRows() {
}
@Override
protected void doneOnlyMemory() {
}
}

View File

@@ -0,0 +1,72 @@
package io.mycat.backend.mysql.store.diskbuffer;
import io.mycat.backend.mysql.nio.handler.util.RBTMinHeap;
import io.mycat.backend.mysql.nio.handler.util.RowDataComparator;
import io.mycat.buffer.BufferPool;
import io.mycat.net.mysql.RowDataPacket;
/**
* disk result buffer which show the distinct row result
*
* @author chenzifei
*
*/
public class DistinctResultDiskBuffer extends SortedResultDiskBuffer {
/**
*
* @param pool
* @param columnCount
* @param cmp
* distinct selectable compator
*/
public DistinctResultDiskBuffer(BufferPool pool, int columnCount, RowDataComparator cmp, String charset) {
super(pool, columnCount, cmp, charset);
}
@Override
public RowDataPacket next() {
if (heap.isEmpty())
return null;
TapeItem tapeItem = heap.poll();
addToHeap(tapeItem.tape);
return tapeItem.row;
}
/**
* if heap already contains row, no add into heap
*
* @param row
*/
protected void addToHeap(ResultDiskTape tape) {
while (true) {
RowDataPacket row = tape.nextRow();
if (row == null)
return;
else {
TapeItem tapeItem = new TapeItem(row, tape);
TapeItem oldItem = heap.find(tapeItem);
if (oldItem == null) {
heap.add(tapeItem);
return;
} else {
onFoundRow(oldItem.row, row);
}
}
}
}
protected void onFoundRow(RowDataPacket oldRow, RowDataPacket row) {
}
@Override
protected void resetHeap() {
if (heap == null)
this.heap = new RBTMinHeap<TapeItem>(this.heapCmp);
heap.clear();
for (ResultDiskTape tape : tapes) {
addToHeap(tape);
}
}
}

View File

@@ -0,0 +1,148 @@
package io.mycat.backend.mysql.store.diskbuffer;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang.SerializationUtils;
import org.apache.log4j.Logger;
import io.mycat.backend.mysql.nio.handler.query.DMLResponseHandler.HandlerType;
import io.mycat.backend.mysql.nio.handler.query.impl.groupby.directgroupby.DGRowPacket;
import io.mycat.backend.mysql.nio.handler.util.HandlerTool;
import io.mycat.backend.mysql.nio.handler.util.RowDataComparator;
import io.mycat.backend.mysql.store.FileStore;
import io.mycat.buffer.BufferPool;
import io.mycat.net.mysql.FieldPacket;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.plan.common.field.Field;
import io.mycat.plan.common.item.function.sumfunc.Aggregator.AggregatorType;
import io.mycat.plan.common.item.function.sumfunc.ItemSum;
/**
* the disk buffer which need to group by all the tapes value of it
*
* @author chenzifei
*
*/
public class GroupResultDiskBuffer extends DistinctResultDiskBuffer {
private final Logger logger = Logger.getLogger(GroupResultDiskBuffer.class);
/**
* store the origin row fields,(already contains the item_sum fields in
* rowpackets we should calculate the item_sums again when next() is
* called!)
*/
private final List<Field> fields;
private final List<ItemSum> sums;
/**
*
* @param pool
* @param columnCount
* @param cmp
* group by cmptor
* @param packets
* packets which already contain sum_function's fieldpacket,
* sum_packets are put in the front
* @param sumFunctions
*/
public GroupResultDiskBuffer(BufferPool pool, int fieldsCount, RowDataComparator cmp, List<FieldPacket> packets,
List<ItemSum> sumFunctions, boolean isAllPushDown, String charset) {
super(pool, fieldsCount, cmp, charset);
this.fields = HandlerTool.createFields(packets);
this.sums = new ArrayList<ItemSum>();
for (ItemSum sumFunc : sumFunctions) {
ItemSum sum = (ItemSum) (HandlerTool.createItem(sumFunc, this.fields, 0, isAllPushDown,
HandlerType.GROUPBY, charset));
this.sums.add(sum);
}
logger.info("prepare_sum_aggregators");
prepare_sum_aggregators(this.sums, true);
}
@Override
protected ResultDiskTape makeResultDiskTape() {
return new GroupResultDiskTape(pool, file, columnCount, sums.size());
}
@Override
protected void onFoundRow(RowDataPacket oldRow, RowDataPacket row) {
// we need to calculate group by
init_sum_functions(this.sums, oldRow);
update_sum_func(this.sums, row);
for (int i = 0; i < this.sums.size(); i++) {
ItemSum sum = this.sums.get(i);
Object b = sum.getTransAggObj();
int transSize = sum.getTransSize();
((DGRowPacket) oldRow).setSumTran(i, b, transSize);
}
}
/**
* see Sql_executor.cc
*
* @return
*/
protected void prepare_sum_aggregators(List<ItemSum> funcs, boolean need_distinct) {
for (ItemSum func : funcs) {
func.setAggregator(need_distinct && func.has_with_distinct()
? AggregatorType.DISTINCT_AGGREGATOR : AggregatorType.SIMPLE_AGGREGATOR,
null);
}
}
protected void init_sum_functions(List<ItemSum> funcs, RowDataPacket row) {
for (int i = 0; i < funcs.size(); i++) {
ItemSum sum = funcs.get(i);
Object transObj = ((DGRowPacket) row).getSumTran(i);
sum.resetAndAdd(row, transObj);
}
}
protected void update_sum_func(List<ItemSum> funcs, RowDataPacket row) {
for (int index = 0; index < funcs.size(); index++) {
ItemSum sum = funcs.get(index);
Object transObj = ((DGRowPacket) row).getSumTran(index);
sum.aggregatorAdd(row, transObj);
}
}
/**
* 比原生的resultdisktape要添加sum结果的值
*
* @author zhangyaohua
* @CreateTime 2015年5月20日
*/
static class GroupResultDiskTape extends ResultDiskTape {
private final int orgFieldCount;
private final int sumSize;
public GroupResultDiskTape(BufferPool pool, FileStore file, int fieldCount, int sumSize) {
super(pool, file, sumSize + fieldCount);
this.orgFieldCount = fieldCount;
this.sumSize = sumSize;
}
@Override
public RowDataPacket nextRow() {
RowDataPacket rp = super.nextRow();
if (rp == null)
return null;
else {
DGRowPacket newRow = new DGRowPacket(orgFieldCount, sumSize);
for (int index = 0; index < sumSize; index++) {
byte[] b = rp.getValue(index);
if (b != null) {
Object obj = SerializationUtils.deserialize(b);
newRow.setSumTran(index, obj, b.length);
}
}
for (int index = sumSize; index < this.fieldCount; index++) {
newRow.add(rp.getValue(index));
}
return newRow;
}
}
}
}

View File

@@ -0,0 +1,217 @@
package io.mycat.backend.mysql.store.diskbuffer;
import java.nio.ByteBuffer;
import io.mycat.backend.mysql.store.FileStore;
import io.mycat.backend.mysql.store.result.ResultExternal;
import io.mycat.buffer.BufferPool;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.util.exception.NotSupportException;
/**
* a buffer used to store large amount of data on disk or virtual memory mapped
* on disk
*
* @author chenzifei
*
*/
public abstract class ResultDiskBuffer implements ResultExternal {
protected final int columnCount;
protected final BufferPool pool;
protected ByteBuffer writeBuffer;
protected FileStore file;
protected int rowCount = 0;
/* @bug 1208 */
protected String charset = "UTF-8";
public ResultDiskBuffer(BufferPool pool, int columnCount, String charset) {
this.pool = pool;
this.columnCount = columnCount;
this.writeBuffer = pool.allocate();
this.file = new FileStore("nioMapped:AresMemory", "rw");
if (charset != null)
this.charset = charset;
}
@Override
public void done() {
this.file.seek(0);
}
@Override
public int removeRow(RowDataPacket row) {
throw new NotSupportException("unsupportted remove row");
}
@Override
public boolean contains(RowDataPacket row) {
throw new NotSupportException("unsupportted contains");
}
@Override
public int addRow(RowDataPacket row) {
throw new NotSupportException("unsupportted addRow");
}
@Override
public ResultExternal createShallowCopy() {
throw new NotSupportException("unsupportted createShallowCopy");
}
@Override
public void close() {
if (file != null)
file.closeAndDeleteSilently();
file = null;
pool.recycle(writeBuffer);
}
protected ByteBuffer writeToBuffer(byte[] src, ByteBuffer buffer) {
int offset = 0;
int len = src.length;
int remaining = buffer.remaining();
while (len > 0) {
if (remaining >= len) {
buffer.put(src, offset, len);
break;
} else {
buffer.put(src, offset, remaining);
buffer.flip();
file.write(buffer);
buffer.clear();
offset += remaining;
len -= remaining;
remaining = buffer.remaining();
continue;
}
}
return buffer;
}
static class TapeItem {
RowDataPacket row;
ResultDiskTape tape;
public TapeItem(RowDataPacket row, ResultDiskTape tape) {
this.row = row;
this.tape = tape;
}
}
/**
* Represents a virtual disk tape for the merge sort algorithm. Each virtual
* disk tape is a region of the temp file.
*/
static class ResultDiskTape {
BufferPool pool;
FileStore file;
int fieldCount;
long filePos;
long start;
long end;
long pos;
int readBufferOffset;
ByteBuffer readBuffer;
RowDataPacket currentRow;
public ResultDiskTape(BufferPool pool, FileStore file, int fieldCount) {
this.pool = pool;
this.file = file;
this.fieldCount = fieldCount;
this.readBuffer = pool.allocate();
}
public boolean isEnd() {
return isReadAll() && this.currentRow == null;
}
public RowDataPacket nextRow() {
if (isReadAll())
return null;
byte[] row = getRow();
RowDataPacket currentRow = new RowDataPacket(fieldCount);
currentRow.read(row);
return currentRow;
}
private boolean isReadAll() {
return this.end == this.pos;
}
private void readIntoBuffer() {
file.seek(filePos);
filePos += file.read(readBuffer, end);
}
private byte[] getRow() {
int offset = readBufferOffset, length = 0, position = readBuffer.position();
length = getPacketLength(readBuffer, offset);
while (length == -1 || position < offset + length) {
if (!readBuffer.hasRemaining()) {
checkReadBuffer(offset);
}
// read new data to buffer
readIntoBuffer();
// get new offset for buffer compact
offset = readBufferOffset;
position = readBuffer.position();
if (length == -1) {
length = getPacketLength(readBuffer, offset);
}
}
readBuffer.position(offset);
byte[] data = new byte[length];
readBuffer.get(data, 0, length);
offset += length;
pos += length;
if (position == offset) {
if (readBufferOffset != 0) {
readBufferOffset = 0;
}
readBuffer.clear();
readIntoBuffer();
} else {
readBufferOffset = offset;
readBuffer.position(position);
}
return data;
}
private int getPacketLength(ByteBuffer buffer, int offset) {
if (buffer.position() < offset + 4) {
return -1;
} else {
int length = buffer.get(offset) & 0xff;
length |= (buffer.get(++offset) & 0xff) << 8;
length |= (buffer.get(++offset) & 0xff) << 16;
return length + 4;
}
}
private void checkReadBuffer(int offset) {
// if offset is 0,then expend buffer; else set offset to 0,compact
// buffer
if (offset == 0) {
if (readBuffer.capacity() >= Integer.MAX_VALUE) {
throw new IllegalArgumentException("Packet size over the limit.");
}
int size = readBuffer.capacity() << 1;
size = (size > Integer.MAX_VALUE) ? Integer.MAX_VALUE : size;
ByteBuffer newBuffer = ByteBuffer.allocate(size);
readBuffer.position(offset);
newBuffer.put(readBuffer);
pool.recycle(readBuffer);
readBuffer = newBuffer;
} else {
readBuffer.position(offset);
readBuffer.compact();
readBufferOffset = 0;
}
}
}
}

View File

@@ -0,0 +1,142 @@
package io.mycat.backend.mysql.store.diskbuffer;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import org.apache.log4j.Logger;
import io.mycat.backend.mysql.nio.handler.util.ArrayMinHeap;
import io.mycat.backend.mysql.nio.handler.util.RowDataComparator;
import io.mycat.buffer.BufferPool;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.util.MinHeap;
import io.mycat.util.TimeUtil;
/**
* sort need diskbuffer, when done() is called,users use next() to get the
* result rows which have been sorted already
*
* @author chenzifei
*
*/
public class SortedResultDiskBuffer extends ResultDiskBuffer {
private final Logger logger = Logger.getLogger(SortedResultDiskBuffer.class);
/**
* the tapes to store data, which is sorted each, so we can use minheap to
* sort them
*/
protected final ArrayList<ResultDiskTape> tapes;
/**
* the sort cmptor
*/
private final RowDataComparator comparator;
/**
* the heap used for sorting the sorted tapes
*/
protected MinHeap<TapeItem> heap;
protected Comparator<TapeItem> heapCmp;
public SortedResultDiskBuffer(BufferPool pool, int columnCount, RowDataComparator cmp, String charset) {
super(pool, columnCount, charset);
tapes = new ArrayList<ResultDiskTape>();
this.comparator = cmp;
this.heapCmp = new Comparator<TapeItem>() {
@Override
public int compare(TapeItem o1, TapeItem o2) {
RowDataPacket row1 = o1.row;
RowDataPacket row2 = o2.row;
if (row1 == null || row2 == null) {
if (row1 == row2)
return 0;
if (row1 == null)
return -1;
return 1;
}
return comparator.compare(row1, row2);
}
};
}
@Override
public final int TapeCount() {
return tapes.size();
}
@Override
public final int addRows(List<RowDataPacket> rows) {
/**
* we should make rows sorted first, then write them into file
*/
if (logger.isDebugEnabled()) {
logger.debug(" convert list to array start:" + TimeUtil.currentTimeMillis());
}
RowDataPacket[] rowArray = new RowDataPacket[rows.size()];
rows.toArray(rowArray);
long start = file.getFilePointer();
for (RowDataPacket row : rowArray) {
byte[] b = row.toBytes();
writeBuffer = writeToBuffer(b, writeBuffer);
}
// help for gc
rowArray = null;
writeBuffer.flip();
file.write(writeBuffer);
writeBuffer.clear();
/* make a new tape */
ResultDiskTape tape = makeResultDiskTape();
tape.start = start;
tape.filePos = start;
tape.end = file.getFilePointer();
tapes.add(tape);
rowCount += rows.size();
if (logger.isDebugEnabled()) {
logger.debug("write rows to disk end:" + TimeUtil.currentTimeMillis());
}
return rowCount;
}
/**
* to override by group by
*
* @return
*/
protected ResultDiskTape makeResultDiskTape() {
return new ResultDiskTape(pool, file, columnCount);
}
@Override
public RowDataPacket next() {
if (heap.isEmpty())
return null;
TapeItem tapeItem = heap.poll();
RowDataPacket newRow = tapeItem.tape.nextRow();
if (newRow != null) {
heap.add(new TapeItem(newRow, tapeItem.tape));
}
return tapeItem.row;
}
@Override
public final void reset() {
for (ResultDiskTape tape : tapes) {
tape.filePos = tape.start;
tape.pos = tape.start;
tape.readBufferOffset = 0;
tape.readBuffer.clear();
}
resetHeap();
}
protected void resetHeap() {
if (heap == null)
heap = new ArrayMinHeap<TapeItem>(tapes.size(), this.heapCmp);
heap.clear();
// init heap
for (int i = 0; i < tapes.size(); i++) {
heap.add(new TapeItem(tapes.get(i).nextRow(), tapes.get(i)));
}
}
}

View File

@@ -0,0 +1,68 @@
package io.mycat.backend.mysql.store.diskbuffer;
import java.util.List;
import org.apache.log4j.Logger;
import io.mycat.buffer.BufferPool;
import io.mycat.net.mysql.RowDataPacket;
import io.mycat.util.TimeUtil;
/**
* no sort need diskbufferwhen a new row come in,added it directly
*
* @author chenzifei
*
*/
public class UnSortedResultDiskBuffer extends ResultDiskBuffer {
private final Logger logger = Logger.getLogger(UnSortedResultDiskBuffer.class);
/**
* the tape to store unsorted data
*/
private final ResultDiskTape mainTape;
public UnSortedResultDiskBuffer(BufferPool pool, int columnCount, String charset) {
super(pool, columnCount, charset);
mainTape = new ResultDiskTape(pool, file, columnCount);
}
@Override
public int TapeCount() {
return 1;
}
@Override
public int addRows(List<RowDataPacket> rows) {
if (logger.isDebugEnabled()) {
logger.debug("addRows start:" + TimeUtil.currentTimeMillis());
}
for (RowDataPacket row : rows) {
byte[] b = row.toBytes();
writeBuffer = writeToBuffer(b, writeBuffer);
}
writeBuffer.flip();
file.write(writeBuffer);
writeBuffer.clear();
mainTape.end = file.getFilePointer();
rowCount += rows.size();
if (logger.isDebugEnabled()) {
logger.debug("write rows to disk end:" + TimeUtil.currentTimeMillis());
}
return rowCount;
}
@Override
public void reset() {
mainTape.pos = mainTape.start;
mainTape.filePos = mainTape.start;
mainTape.readBufferOffset = 0;
mainTape.readBuffer.clear();
}
@Override
public RowDataPacket next() {
file.seek(mainTape.pos);
return mainTape.nextRow();
}
}

View File

@@ -0,0 +1,69 @@
package io.mycat.backend.mysql.store.fs;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.nio.channels.ReadableByteChannel;
import java.nio.channels.WritableByteChannel;
/**
* @author zhangyaohua
* @CreateTime 2014-8-21
*/
public abstract class FileBase extends FileChannel {
@Override
public synchronized int read(ByteBuffer dst, long position) throws IOException {
long oldPos = position();
position(position);
int len = read(dst);
position(oldPos);
return len;
}
@Override
public synchronized int write(ByteBuffer src, long position) throws IOException {
long oldPos = position();
position(position);
int len = write(src);
position(oldPos);
return len;
}
@Override
public FileLock lock(long position, long size, boolean shared) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public MappedByteBuffer map(MapMode mode, long position, long size) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long read(ByteBuffer[] dsts, int offset, int length) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long transferFrom(ReadableByteChannel src, long position, long count) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long transferTo(long position, long count, WritableByteChannel target) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public FileLock tryLock(long position, long size, boolean shared) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long write(ByteBuffer[] srcs, int offset, int length) throws IOException {
throw new UnsupportedOperationException();
}
}

View File

@@ -0,0 +1,317 @@
package io.mycat.backend.mysql.store.fs;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.channels.FileChannel;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
/**
* A path to a file. It similar to the Java 7 <code>java.nio.file.Path</code>,
* but simpler, and works with older versions of Java. It also implements the
* relevant methods found in <code>java.nio.file.FileSystem</code> and
* <code>FileSystems</code>
*
* @author zhangyaohua
* @createTime 2013-11-11
*/
public abstract class FilePath {
private static FilePath defaultProvider;
private static Map<String, FilePath> providers;
/**
* The prefix for temporary files.
*/
private static String tempRandom;
private static long tempSequence;
/**
* The complete path (which may be absolute or relative, depending on the
* file system).
*/
protected String name;
/**
* Get the file path object for the given path. Windows-style '\' is
* replaced with '/'.
*
* @param path
* the path
* @return the file path object
*/
public static FilePath get(String path) {
path = path.replace('\\', '/');
int index = path.indexOf(':');
registerDefaultProviders();
if (index < 2) {
// use the default provider if no prefix or
// only a single character (drive name)
return defaultProvider.getPath(path);
}
String scheme = path.substring(0, index);
FilePath p = providers.get(scheme);
if (p == null) {
// provider not found - use the default
p = defaultProvider;
}
return p.getPath(path);
// return p;
}
private static void registerDefaultProviders() {
if (providers == null || defaultProvider == null) {
Map<String, FilePath> map = Collections.synchronizedMap(new HashMap<String, FilePath>());
for (String c : new String[] { "com.actionsky.ares.partition.store.fs.FilePathDisk",
"com.actionsky.ares.partition.store.fs.FilePathNio",
"com.actionsky.ares.partition.store.fs.FilePathNioMapped" }) {
try {
FilePath p = (FilePath) Class.forName(c).newInstance();
map.put(p.getScheme(), p);
if (defaultProvider == null) {
defaultProvider = p;
}
} catch (Exception e) {
// ignore - the files may be excluded in purpose
}
}
providers = map;
}
}
/**
* Register a file provider.
*
* @param provider
* the file provider
*/
public static void register(FilePath provider) {
registerDefaultProviders();
providers.put(provider.getScheme(), provider);
}
/**
* Unregister a file provider.
*
* @param provider
* the file provider
*/
public static void unregister(FilePath provider) {
registerDefaultProviders();
providers.remove(provider.getScheme());
}
/**
* Get the size of a file in bytes
*
* @return the size in bytes
*/
public abstract long size();
/**
* Rename a file if this is allowed.
*
* @param newName
* the new fully qualified file name
*/
public abstract void moveTo(FilePath newName);
/**
* Create a new file.
*
* @return true if creating was successful
*/
public abstract boolean createFile();
/**
* Checks if a file exists.
*
* @return true if it exists
*/
public abstract boolean exists();
/**
* Delete a file or directory if it exists. Directories may only be deleted
* if they are empty.
*/
public abstract void delete();
/**
* List the files and directories in the given directory.
*
* @return the list of fully qualified file names
*/
public abstract List<FilePath> newDirectoryStream();
/**
* Normalize a file name.
*
* @return the normalized file name
*/
public abstract FilePath toRealPath();
/**
* Get the parent directory of a file or directory.
*
* @return the parent directory name
*/
public abstract FilePath getParent();
/**
* Check if it is a file or a directory.
*
* @return true if it is a directory
*/
public abstract boolean isDirectory();
/**
* Check if the file name includes a path.
*
* @return if the file name is absolute
*/
public abstract boolean isAbsolute();
/**
* Get the last modified date of a file
*
* @return the last modified date
*/
public abstract long lastModified();
/**
* Check if the file is writable.
*
* @return if the file is writable
*/
public abstract boolean canWrite();
/**
* Create a directory (all required parent directories already exist).
*/
public abstract void createDirectory();
/**
* Get the file or directory name (the last element of the path).
*
* @return the last element of the path
*/
public String getName() {
int idx = Math.max(name.indexOf(':'), name.lastIndexOf('/'));
return idx < 0 ? name : name.substring(idx + 1);
}
/**
* Create an output stream to write into the file.
*
* @param append
* if true, the file will grow, if false, the file will be
* truncated first
* @return the output stream
*/
public abstract OutputStream newOutputStream(boolean append) throws IOException;
/**
* Open a random access file object.
*
* @param mode
* the access mode. Supported are r, rw, rws, rwd
* @return the file object
*/
public abstract FileChannel open(String mode) throws IOException;
/**
* Create an input stream to read from the file.
*
* @return the input stream
*/
public abstract InputStream newInputStream() throws IOException;
/**
* Disable the ability to write.
*
* @return true if the call was successful
*/
public abstract boolean setReadOnly();
/**
* Create a new temporary file.
*
* @param suffix
* the suffix
* @param deleteOnExit
* if the file should be deleted when the virtual machine exists
* @return the name of the created file
*/
public FilePath createTempFile(String suffix, boolean deleteOnExit) throws IOException {
while (true) {
FilePath p = getPath(name + getNextTempFileNamePart(false) + suffix);
if (p.exists()) {
// in theory, the random number could collide
getNextTempFileNamePart(true);
continue;
}
return p;
}
}
/**
* Get the next temporary file name part (the part in the middle).
*
* @param newRandom
* if the random part of the filename should change
* @return the file name part
*/
protected static synchronized String getNextTempFileNamePart(boolean newRandom) {
if (newRandom || tempRandom == null) {
tempRandom = new Random().nextInt(Integer.MAX_VALUE) + ".";
}
return tempRandom + tempSequence++;
}
/**
* Get the string representation. The returned string can be used to
* construct a new object.
*
* @return the path as a string
*/
@Override
public String toString() {
return name;
}
/**
* Get the scheme (prefix) for this file provider. This is similar to
* <code>java.nio.file.spi.FileSystemProvider.getScheme</code>.
*
* @return the scheme
*/
public abstract String getScheme();
/**
* Convert a file to a path. This is similar to
* <code>java.nio.file.spi.FileSystemProvider.getPath</code>, but may return
* an object even if the scheme doesn't match in case of the the default
* file provider.
*
* @param path
* the path
* @return the file path object
*/
public abstract FilePath getPath(String path);
/**
* Get the unwrapped file name (without wrapper prefixes if wrapping /
* delegating file systems are used).
*
* @return the unwrapped path
*/
public FilePath unwrap() {
return this;
}
}

View File

@@ -0,0 +1,405 @@
package io.mycat.backend.mysql.store.fs;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.RandomAccessFile;
import java.net.URL;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.nio.channels.NonWritableChannelException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.LockSupport;
import io.mycat.config.ErrorCode;
import io.mycat.util.exception.TmpFileException;
/**
* @author zhangyaohua
* @CreateTime 2014-9-8
*/
public class FilePathDisk extends FilePath {
private static final String CLASSPATH_PREFIX = "classpath:";
@Override
public FileChannel open(String mode) throws IOException {
FileDisk f;
try {
f = new FileDisk(name, mode);
} catch (IOException e) {
freeMemoryAndFinalize();
try {
f = new FileDisk(name, mode);
} catch (IOException e2) {
throw e;
}
}
return f;
}
@Override
public String getScheme() {
return "file";
}
@Override
public FilePath getPath(String path) {
FilePathDisk p = new FilePathDisk();
p.name = translateFileName(path);
return p;
}
@Override
public long size() {
return new File(name).length();
}
/**
* Translate the file name to the native format. This will replace '\' with
* '/'.
*
* @param fileName
* the file name
* @return the native file name
*/
protected String translateFileName(String fileName) {
fileName = fileName.replace('\\', '/');
if (fileName.startsWith("file:")) {
fileName = fileName.substring("file:".length());
}
return fileName;
}
@Override
public void moveTo(FilePath newName) {
File oldFile = new File(name);
File newFile = new File(newName.name);
if (oldFile.getAbsolutePath().equals(newFile.getAbsolutePath())) {
return;
}
if (!oldFile.exists()) {
throw TmpFileException.get(ErrorCode.ER_FILE_RENAME, name + " (not found)", newName.name);
}
if (newFile.exists()) {
throw TmpFileException.get(ErrorCode.ER_FILE_RENAME, name, newName + " (exists)");
}
for (int i = 0; i < 2; i++) {
boolean ok = oldFile.renameTo(newFile);
if (ok) {
return;
}
wait(i);
}
throw TmpFileException.get(ErrorCode.ER_FILE_RENAME, new String[] { name, newName.name });
}
protected void wait(int i) {
if (i == 8) {
System.gc();
}
// sleep at most 256 ms
long sleep = Math.min(256, i * i);
LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(sleep));
}
@Override
public boolean createFile() {
File file = new File(name);
for (int i = 0; i < 2; i++) {
try {
return file.createNewFile();
} catch (IOException e) {
// 'access denied' is really a concurrent access problem
wait(i);
}
}
return false;
}
@Override
public boolean exists() {
return new File(name).exists();
}
@Override
public void delete() {
File file = new File(name);
for (int i = 0; i < 2; i++) {
boolean ok = file.delete();
if (ok || !file.exists()) {
return;
}
wait(i);
}
throw TmpFileException.get(ErrorCode.ER_FILE_DELETE, name);
}
@Override
public List<FilePath> newDirectoryStream() {
ArrayList<FilePath> list = new ArrayList<FilePath>();
File f = new File(name);
try {
String[] files = f.list();
if (files != null) {
String base = f.getCanonicalPath();
for (int i = 0, len = files.length; i < len; i++) {
list.add(getPath(base + files[i]));
}
}
return list;
} catch (IOException e) {
throw TmpFileException.convertIOException(ErrorCode.ER_IO_EXCEPTION, e, name);
}
}
@Override
public boolean canWrite() {
return canWriteInternal(new File(name));
}
@Override
public boolean setReadOnly() {
File f = new File(name);
return f.setReadOnly();
}
@Override
public FilePath toRealPath() {
try {
String fileName = new File(name).getCanonicalPath();
return getPath(fileName);
} catch (IOException e) {
throw TmpFileException.convertIOException(ErrorCode.ER_IO_EXCEPTION, e, name);
}
}
@Override
public FilePath getParent() {
String p = new File(name).getParent();
return p == null ? null : getPath(p);
}
@Override
public boolean isDirectory() {
return new File(name).isDirectory();
}
@Override
public boolean isAbsolute() {
return new File(name).isAbsolute();
}
@Override
public long lastModified() {
return new File(name).lastModified();
}
private static boolean canWriteInternal(File file) {
try {
if (!file.canWrite()) {
return false;
}
} catch (Exception e) {
// workaround for GAE which throws a
// java.security.AccessControlException
return false;
}
// File.canWrite() does not respect windows user permissions,
// so we must try to open it using the mode "rw".
// See also http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4420020
RandomAccessFile r = null;
try {
r = new RandomAccessFile(file, "rw");
return true;
} catch (FileNotFoundException e) {
return false;
} finally {
if (r != null) {
try {
r.close();
} catch (IOException e) {
// ignore
}
}
}
}
@Override
public void createDirectory() {
File dir = new File(name);
for (int i = 0; i < 2; i++) {
if (dir.exists()) {
if (dir.isDirectory()) {
return;
}
throw TmpFileException.get(ErrorCode.ER_FILE_CREATE, name + " (a file with this name is already exists)");
} else if (dir.mkdir()) {
return;
}
wait(i);
}
throw TmpFileException.get(ErrorCode.ER_FILE_CREATE, name);
}
@Override
public OutputStream newOutputStream(boolean append) throws IOException {
try {
File file = new File(name);
File parent = file.getParentFile();
if (parent != null) {
FileUtils.createDirectories(parent.getAbsolutePath());
}
FileOutputStream out = new FileOutputStream(name, append);
return out;
} catch (IOException e) {
freeMemoryAndFinalize();
return new FileOutputStream(name);
}
}
@Override
public InputStream newInputStream() throws IOException {
int index = name.indexOf(':');
if (index > 1 && index < 20) {
// if the ':' is in position 1, a windows file access is assumed:
// C:.. or D:, and if the ':' is not at the beginning, assume its a
// file name with a colon
if (name.startsWith(CLASSPATH_PREFIX)) {
String fileName = name.substring(CLASSPATH_PREFIX.length());
if (!fileName.startsWith("/")) {
fileName = "/" + fileName;
}
InputStream in = getClass().getResourceAsStream(fileName);
if (in == null) {
in = Thread.currentThread().getContextClassLoader().getResourceAsStream(fileName);
}
if (in == null) {
throw new FileNotFoundException("resource " + fileName);
}
return in;
}
// otherwise an URL is assumed
URL url = new URL(name);
InputStream in = url.openStream();
return in;
}
FileInputStream in = new FileInputStream(name);
return in;
}
/**
* Call the garbage collection and run finalization. This close all files
* that were not closed, and are no longer referenced.
*/
static void freeMemoryAndFinalize() {
Runtime rt = Runtime.getRuntime();
long mem = rt.freeMemory();
for (int i = 0; i < 16; i++) {
rt.gc();
long now = rt.freeMemory();
rt.runFinalization();
if (now == mem) {
break;
}
mem = now;
}
}
@Override
public FilePath createTempFile(String suffix, boolean deleteOnExit) throws IOException {
String fileName = name + ".";
File dir = new File(fileName).getAbsoluteFile().getParentFile();
FileUtils.createDirectories(dir.getAbsolutePath());
return super.createTempFile(suffix, deleteOnExit);
}
/**
* Uses java.io.RandomAccessFile to access a file.
*/
class FileDisk extends FileBase {
private final RandomAccessFile file;
private final String name;
private final boolean readOnly;
FileDisk(String fileName, String mode) throws FileNotFoundException {
this.file = new RandomAccessFile(fileName, mode);
this.name = fileName;
this.readOnly = mode.equals("r");
}
@Override
public void force(boolean metaData) throws IOException {
file.getChannel().force(metaData);
}
@Override
public FileChannel truncate(long newLength) throws IOException {
// compatibility with JDK FileChannel#truncate
if (readOnly) {
throw new NonWritableChannelException();
}
if (newLength < file.length()) {
file.setLength(newLength);
}
return this;
}
@Override
public synchronized FileLock tryLock(long position, long size, boolean shared) throws IOException {
return file.getChannel().tryLock(position, size, shared);
}
@Override
public void implCloseChannel() throws IOException {
file.close();
}
@Override
public long position() throws IOException {
return file.getFilePointer();
}
@Override
public long size() throws IOException {
return file.length();
}
@Override
public int read(ByteBuffer dst) throws IOException {
int len = file.read(dst.array(), dst.arrayOffset() + dst.position(), dst.remaining());
if (len > 0) {
dst.position(dst.position() + len);
}
return len;
}
@Override
public FileChannel position(long pos) throws IOException {
file.seek(pos);
return this;
}
@Override
public int write(ByteBuffer src) throws IOException {
int len = src.remaining();
file.write(src.array(), src.arrayOffset() + src.position(), len);
src.position(src.position() + len);
return len;
}
@Override
public String toString() {
return name;
}
}
}

View File

@@ -0,0 +1,167 @@
package io.mycat.backend.mysql.store.fs;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.nio.channels.NonWritableChannelException;
import io.mycat.MycatServer;
/**
* <pre>
* This file system stores files on disk and uses java.nio to access the files.
* </pre>
*
* @author zhangyaohua
* @CreateTime 2014-8-21
*/
public class FilePathNio extends FilePathWrapper {
@Override
public FileChannel open(String mode) throws IOException {
return new FileNio(name.substring(getScheme().length() + 1), mode);
}
@Override
public String getScheme() {
return "nio";
}
}
/**
* File which uses NIO FileChannel.
*/
class FileNio extends FileBase {
private final String name;
private final FileChannel channel;
private RandomAccessFile file;
private long fileLength;
private long pos;
FileNio(String fileName, String mode) throws IOException {
this.name = fileName;
this.file = new RandomAccessFile(fileName, mode);
this.channel = file.getChannel();
this.fileLength = MycatServer.getInstance().getConfig().getSystem().getMappedFileSize();
this.pos = 0;
}
@Override
public synchronized void implCloseChannel() throws IOException {
channel.close();
}
@Override
public long position() throws IOException {
return channel.position();
}
@Override
public long size() throws IOException {
return channel.size();
}
@Override
public synchronized int read(ByteBuffer dst) throws IOException {
int len = dst.remaining();
if (len == 0) {
return 0;
}
len = (int) Math.min(len, fileLength - pos);
if (len <= 0) {
return -1;
}
int limit = dst.limit();
dst.limit(dst.position() + len);
channel.read(dst);
pos += len;
dst.limit(limit);
return len;
}
@Override
public FileChannel position(long pos) throws IOException {
channel.position(pos);
this.pos = (int) pos;
return this;
}
@Override
public synchronized int read(ByteBuffer dst, long position) throws IOException {
return channel.read(dst, position);
}
@Override
public synchronized int write(ByteBuffer src, long position) throws IOException {
return channel.write(src, position);
}
@Override
public synchronized FileChannel truncate(long newLength) throws IOException {
try {
long size = channel.size();
if (newLength < size) {
long pos = channel.position();
channel.truncate(newLength);
long newPos = channel.position();
if (pos < newLength) {
// position should stay
// in theory, this should not be needed
if (newPos != pos) {
channel.position(pos);
this.pos = pos;
}
} else if (newPos > newLength) {
// looks like a bug in this FileChannel implementation, as
// the documentation says the position needs to be changed
channel.position(newLength);
this.pos = newLength;
}
}
return this;
} catch (NonWritableChannelException e) {
throw new IOException("read only");
}
}
@Override
public void force(boolean metaData) throws IOException {
channel.force(metaData);
}
@Override
public synchronized int write(ByteBuffer src) throws IOException {
try {
int len;
if (fileLength < pos + src.remaining()) {
int length = (int) (fileLength - pos);
int limit = src.limit();
src.limit(length);
len = channel.write(src);
src.limit(limit);
pos += len;
return len;
} else {
len = channel.write(src);
pos += len;
return len;
}
} catch (NonWritableChannelException e) {
throw new IOException("read only");
}
}
@Override
public synchronized FileLock tryLock(long position, long size, boolean shared) throws IOException {
return channel.tryLock(position, size, shared);
}
@Override
public String toString() {
return "nio:" + name;
}
}

View File

@@ -0,0 +1,280 @@
package io.mycat.backend.mysql.store.fs;
import java.io.EOFException;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.lang.ref.WeakReference;
import java.lang.reflect.Method;
import java.nio.BufferUnderflowException;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import org.apache.log4j.Logger;
import io.mycat.MycatServer;
import sun.nio.ch.DirectBuffer;
/**
* @author zhangyaohua
* @CreateTime 2014-8-21
*/
public class FilePathNioMapped extends FilePathWrapper {
@Override
public FileChannel open(String mode) throws IOException {
return new FileNioMapped(name.substring(getScheme().length() + 1), mode);
}
@Override
public String getScheme() {
return "nioMapped";
}
}
/**
* Uses memory mapped files. The file size is limited to 2 GB.
*/
class FileNioMapped extends FileBase {
private static Logger logger = Logger.getLogger(FileNioMapped.class);
private static final long GC_TIMEOUT_MS = 10000;
private final String name;
private final MapMode mode;
private RandomAccessFile file;
private MappedByteBuffer mapped;
private long fileLength;
private boolean NIO_LOAD_MAPPED = false;
/**
* The position within the file. Can't use the position of the mapped buffer
* because it doesn't support seeking past the end of the file.
*/
private int pos;
FileNioMapped(String fileName, String mode) throws IOException {
if ("r".equals(mode)) {
this.mode = MapMode.READ_ONLY;
} else {
this.mode = MapMode.READ_WRITE;
}
this.name = fileName;
file = new RandomAccessFile(fileName, mode);
try {
reMap();
} catch (IOException e) {
if (file != null) {
file.close();
file = null;
}
throw e;
}
}
private void unMap() throws IOException {
if (mapped == null) {
return;
}
// first write all data
// mapped.force();
// need to dispose old direct buffer, see bug
// http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4724038
boolean useSystemGc = true;
try {
Method cleanerMethod = mapped.getClass().getMethod("cleaner");
cleanerMethod.setAccessible(true);
Object cleaner = cleanerMethod.invoke(mapped);
if (cleaner != null) {
Method clearMethod = cleaner.getClass().getMethod("clean");
clearMethod.invoke(cleaner);
}
useSystemGc = false;
} catch (Throwable e) {
logger.warn("unmap byteBuffer error", e);
// useSystemGc is already true
} finally {
mapped = null;
}
if (useSystemGc) {
WeakReference<MappedByteBuffer> bufferWeakRef = new WeakReference<MappedByteBuffer>(mapped);
mapped = null;
long start = System.currentTimeMillis();
while (bufferWeakRef.get() != null) {
if (System.currentTimeMillis() - start > GC_TIMEOUT_MS) {
throw new IOException(
"Timeout (" + GC_TIMEOUT_MS + " ms) reached while trying to GC mapped buffer");
}
System.gc();
Thread.yield();
}
}
}
/**
* Re-map byte buffer into memory, called when file size has changed or file
* was created.
*/
private void reMap() throws IOException {
int oldPos = 0;
if (mapped != null) {
oldPos = pos;
unMap();
}
fileLength = file.length();
if (fileLength == 0) {
// fileLength = 1024*1024* 1024;
fileLength = MycatServer.getInstance().getConfig().getSystem().getMappedFileSize();
}
checkFileSizeLimit(fileLength);
// maps new MappedByteBuffer; the old one is disposed during GC
mapped = file.getChannel().map(mode, 0, fileLength);
int limit = mapped.limit();
int capacity = mapped.capacity();
if (limit < fileLength || capacity < fileLength) {
throw new IOException("Unable to map: length=" + limit + " capacity=" + capacity + " length=" + fileLength);
}
if (NIO_LOAD_MAPPED) {
mapped.load();
}
this.pos = Math.min(oldPos, (int) fileLength);
}
private static void checkFileSizeLimit(long length) throws IOException {
if (length > Integer.MAX_VALUE) {
throw new IOException("File over 2GB is not supported yet when using this file system");
}
}
@Override
public synchronized void implCloseChannel() throws IOException {
if (file != null) {
unMap();
file.close();
file = null;
}
}
@Override
public long position() {
return pos;
}
@Override
public String toString() {
return "nioMapped:" + name;
}
@Override
public synchronized long size() throws IOException {
return fileLength;
}
@Override
public synchronized int read(ByteBuffer dst) throws IOException {
try {
int len = dst.remaining();
if (len == 0) {
return 0;
}
len = (int) Math.min(len, fileLength - pos);
if (len <= 0) {
return -1;
}
mapped.position(pos);
if (dst instanceof DirectBuffer) {
byte[] temp = new byte[len];
mapped.get(temp, 0, len);
dst.put(temp);
} else {
mapped.get(dst.array(), dst.arrayOffset() + dst.position(), len);
dst.position(dst.position() + len);
}
pos += len;
return len;
} catch (IllegalArgumentException e) {
EOFException e2 = new EOFException("EOF");
e2.initCause(e);
throw e2;
} catch (BufferUnderflowException e) {
EOFException e2 = new EOFException("EOF");
e2.initCause(e);
throw e2;
}
}
@Override
public FileChannel position(long pos) throws IOException {
checkFileSizeLimit(pos);
this.pos = (int) pos;
return this;
}
@Override
public synchronized FileChannel truncate(long newLength) throws IOException {
if (newLength < size()) {
setFileLength(newLength);
}
return this;
}
public synchronized void setFileLength(long newLength) throws IOException {
checkFileSizeLimit(newLength);
int oldPos = pos;
unMap();
for (int i = 0;; i++) {
try {
file.setLength(newLength);
break;
} catch (IOException e) {
if (i > 16 || e.toString().indexOf("user-mapped section open") < 0) {
throw e;
}
}
System.gc();
}
reMap();
pos = (int) Math.min(newLength, oldPos);
}
@Override
public synchronized void force(boolean metaData) throws IOException {
mapped.force();
file.getFD().sync();
}
/**
* don't expand
*/
@Override
public synchronized int write(ByteBuffer src) throws IOException {
int len = src.remaining();
if (mapped.capacity() < pos + len) {
int offset = src.position();
int length = mapped.capacity() - pos;
if (src instanceof DirectBuffer) {
byte[] temp = new byte[length];
src.get(temp, 0, length);
mapped.put(temp, 0, length);
temp = null;
} else {
mapped.put(src.array(), offset, length);
}
src.position(offset + length);
pos += length;
return length;
} else {
mapped.put(src);
pos += len;
return len;
}
}
@Override
public synchronized FileLock tryLock(long position, long size, boolean shared) throws IOException {
return file.getChannel().tryLock(position, size, shared);
}
}

View File

@@ -0,0 +1,158 @@
package io.mycat.backend.mysql.store.fs;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.channels.FileChannel;
import java.util.List;
public abstract class FilePathWrapper extends FilePath {
private FilePath base;
@Override
public FilePathWrapper getPath(String path) {
return create(path, unwrap(path));
}
/**
* Create a wrapped path instance for the given base path.
*
* @param base
* the base path
* @return the wrapped path
*/
public FilePathWrapper wrap(FilePath base) {
return base == null ? null : create(getPrefix() + base.name, base);
}
@Override
public FilePath unwrap() {
return unwrap(name);
}
private FilePathWrapper create(String path, FilePath base) {
try {
FilePathWrapper p = getClass().newInstance();
p.name = path;
p.base = base;
return p;
} catch (Exception e) {
throw new IllegalArgumentException("Path: " + path, e);
}
}
protected String getPrefix() {
return getScheme() + ":";
}
/**
* Get the base path for the given wrapped path.
*
* @param path
* the path including the scheme prefix
* @return the base file path
*/
protected FilePath unwrap(String path) {
return FilePath.get(path.substring(getScheme().length() + 1));
}
protected FilePath getBase() {
return base;
}
@Override
public boolean canWrite() {
return base.canWrite();
}
@Override
public void createDirectory() {
base.createDirectory();
}
@Override
public boolean createFile() {
return base.createFile();
}
@Override
public void delete() {
base.delete();
}
@Override
public boolean exists() {
return base.exists();
}
@Override
public FilePath getParent() {
return wrap(base.getParent());
}
@Override
public boolean isAbsolute() {
return base.isAbsolute();
}
@Override
public boolean isDirectory() {
return base.isDirectory();
}
@Override
public long lastModified() {
return base.lastModified();
}
@Override
public FilePath toRealPath() {
return wrap(base.toRealPath());
}
@Override
public List<FilePath> newDirectoryStream() {
List<FilePath> list = base.newDirectoryStream();
for (int i = 0, len = list.size(); i < len; i++) {
list.set(i, wrap(list.get(i)));
}
return list;
}
@Override
public void moveTo(FilePath newName) {
base.moveTo(((FilePathWrapper) newName).base);
}
@Override
public InputStream newInputStream() throws IOException {
return base.newInputStream();
}
@Override
public OutputStream newOutputStream(boolean append) throws IOException {
return base.newOutputStream(append);
}
@Override
public FileChannel open(String mode) throws IOException {
return base.open(mode);
}
@Override
public boolean setReadOnly() {
return base.setReadOnly();
}
@Override
public long size() {
return base.size();
}
@Override
public FilePath createTempFile(String suffix, boolean deleteOnExit) throws IOException {
return wrap(base.createTempFile(suffix, deleteOnExit));
}
}

View File

@@ -0,0 +1,376 @@
package io.mycat.backend.mysql.store.fs;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.ArrayList;
import java.util.List;
/**
* @author zhangyaohua
* @createTime 2013-11-11
*/
public class FileUtils {
/**
* Checks if a file exists. This method is similar to Java 7
* <code>java.nio.file.Path.exists</code>.
*
* @param fileName
* the file name
* @return true if it exists
*/
public static boolean exists(String fileName) {
return FilePath.get(fileName).exists();
}
/**
* Create a directory (all required parent directories must already exist).
* This method is similar to Java 7
* <code>java.nio.file.Path.createDirectory</code>.
*
* @param directoryName
* the directory name
*/
public static void createDirectory(String directoryName) {
FilePath.get(directoryName).createDirectory();
}
/**
* Create a new file. This method is similar to Java 7
* <code>java.nio.file.Path.createFile</code>, but returns false instead of
* throwing a exception if the file already existed.
*
* @param fileName
* the file name
* @return true if creating was successful
*/
public static boolean createFile(String fileName) {
return FilePath.get(fileName).createFile();
}
/**
* Delete a file or directory if it exists. Directories may only be deleted
* if they are empty. This method is similar to Java 7
* <code>java.nio.file.Path.deleteIfExists</code>.
*
* @param path
* the file or directory name
*/
public static void delete(String path) {
FilePath.get(path).delete();
}
/**
* Get the canonical file or directory name. This method is similar to Java
* 7 <code>java.nio.file.Path.toRealPath</code>.
*
* @param fileName
* the file name
* @return the normalized file name
*/
public static String toRealPath(String fileName) {
return FilePath.get(fileName).toRealPath().toString();
}
/**
* Get the parent directory of a file or directory. This method returns null
* if there is no parent. This method is similar to Java 7
* <code>java.nio.file.Path.getParent</code>.
*
* @param fileName
* the file or directory name
* @return the parent directory name
*/
public static String getParent(String fileName) {
FilePath p = FilePath.get(fileName).getParent();
return p == null ? null : p.toString();
}
/**
* Check if the file name includes a path. This method is similar to Java 7
* <code>java.nio.file.Path.isAbsolute</code>.
*
* @param fileName
* the file name
* @return if the file name is absolute
*/
public static boolean isAbsolute(String fileName) {
return FilePath.get(fileName).isAbsolute();
}
/**
* Rename a file if this is allowed. This method is similar to Java 7
* <code>java.nio.file.Path.moveTo</code>.
*
* @param oldName
* the old fully qualified file name
* @param newName
* the new fully qualified file name
*/
public static void moveTo(String oldName, String newName) {
FilePath.get(oldName).moveTo(FilePath.get(newName));
}
/**
* Get the file or directory name (the last element of the path). This
* method is similar to Java 7 <code>java.nio.file.Path.getName</code>.
*
* @param path
* the directory and file name
* @return just the file name
*/
public static String getName(String path) {
return FilePath.get(path).getName();
}
/**
* List the files and directories in the given directory. This method is
* similar to Java 7 <code>java.nio.file.Path.newDirectoryStream</code>.
*
* @param path
* the directory
* @return the list of fully qualified file names
*/
public static List<String> newDirectoryStream(String path) {
List<FilePath> list = FilePath.get(path).newDirectoryStream();
int len = list.size();
List<String> result = new ArrayList<String>(len);
for (int i = 0; i < len; i++) {
result.add(list.get(i).toString());
}
return result;
}
/**
* Get the last modified date of a file. This method is similar to Java 7
* <code>java.nio.file.attribute.Attributes.
* readBasicFileAttributes(file).lastModified().toMillis()</code>
*
* @param fileName
* the file name
* @return the last modified date
*/
public static long lastModified(String fileName) {
return FilePath.get(fileName).lastModified();
}
/**
* Get the size of a file in bytes This method is similar to Java 7
* <code>java.nio.file.attribute.Attributes.
* readBasicFileAttributes(file).size()</code>
*
* @param fileName
* the file name
* @return the size in bytes
*/
public static long size(String fileName) {
return FilePath.get(fileName).size();
}
/**
* Check if it is a file or a directory.
* <code>java.nio.file.attribute.Attributes.
* readBasicFileAttributes(file).isDirectory()</code>
*
* @param fileName
* the file or directory name
* @return true if it is a directory
*/
public static boolean isDirectory(String fileName) {
return FilePath.get(fileName).isDirectory();
}
/**
* Open a random access file object. This method is similar to Java 7
* <code>java.nio.channels.FileChannel.open</code>.
*
* @param fileName
* the file name
* @param mode
* the access mode. Supported are r, rw, rws, rwd
* @return the file object
*/
public static FileChannel open(String fileName, String mode) throws IOException {
return FilePath.get(fileName).open(mode);
}
/**
* Create an input stream to read from the file. This method is similar to
* Java 7 <code>java.nio.file.Path.newInputStream</code>.
*
* @param fileName
* the file name
* @return the input stream
*/
public static InputStream newInputStream(String fileName) throws IOException {
return FilePath.get(fileName).newInputStream();
}
/**
* Create an output stream to write into the file. This method is similar to
* Java 7 <code>java.nio.file.Path.newOutputStream</code>.
*
* @param fileName
* the file name
* @param append
* if true, the file will grow, if false, the file will be
* truncated first
* @return the output stream
*/
public static OutputStream newOutputStream(String fileName, boolean append) throws IOException {
return FilePath.get(fileName).newOutputStream(append);
}
/**
* Check if the file is writable. This method is similar to Java 7
* <code>java.nio.file.Path.checkAccess(AccessMode.WRITE)</code>
*
* @param fileName
* the file name
* @return if the file is writable
*/
public static boolean canWrite(String fileName) {
return FilePath.get(fileName).canWrite();
}
// special methods =======================================
/**
* Disable the ability to write. The file can still be deleted afterwards.
*
* @param fileName
* the file name
* @return true if the call was successful
*/
public static boolean setReadOnly(String fileName) {
return FilePath.get(fileName).setReadOnly();
}
/**
* Get the unwrapped file name (without wrapper prefixes if wrapping /
* delegating file systems are used).
*
* @param fileName
* the file name
* @return the unwrapped
*/
public static String unwrap(String fileName) {
return FilePath.get(fileName).unwrap().toString();
}
// utility methods =======================================
/**
* Delete a directory or file and all subdirectories and files.
*
* @param path
* the path
* @param tryOnly
* whether errors should be ignored
*/
public static void deleteRecursive(String path, boolean tryOnly) {
if (exists(path)) {
if (isDirectory(path)) {
for (String s : newDirectoryStream(path)) {
deleteRecursive(s, tryOnly);
}
}
if (tryOnly) {
tryDelete(path);
} else {
delete(path);
}
}
}
/**
* Create the directory and all required parent directories.
*
* @param dir
* the directory name
*/
public static void createDirectories(String dir) {
if (dir != null) {
if (exists(dir)) {
if (!isDirectory(dir)) {
// this will fail
createDirectory(dir);
}
} else {
String parent = getParent(dir);
createDirectories(parent);
createDirectory(dir);
}
}
}
/**
* Try to delete a file (ignore errors).
*
* @param fileName
* the file name
* @return true if it worked
*/
public static boolean tryDelete(String fileName) {
try {
FilePath.get(fileName).delete();
return true;
} catch (Exception e) {
return false;
}
}
/**
* Create a new temporary file.
*
* @param prefix
* the prefix of the file name (including directory name if
* required)
* @param suffix
* the suffix
* @param deleteOnExit
* if the file should be deleted when the virtual machine exists
* @param inTempDir
* if the file should be stored in the temporary directory
* @return the name of the created file
*/
public static String createTempFile(String prefix, String suffix, boolean deleteOnExit) throws IOException {
return FilePath.get(prefix).createTempFile(suffix, deleteOnExit).toString();
}
/**
* Fully read from the file. This will read all remaining bytes, or throw an
* EOFException if not successful.
*
* @param channel
* the file channel
* @param dst
* the byte buffer
*/
public static void readFully(FileChannel channel, ByteBuffer dst) throws IOException {
do {
int r = channel.read(dst);
if (r < 0) {
throw new EOFException();
}
} while (dst.remaining() > 0);
}
/**
* Fully write to the file. This will write all remaining bytes.
*
* @param channel
* the file channel
* @param src
* the byte buffer
*/
public static void writeFully(FileChannel channel, ByteBuffer src) throws IOException {
do {
channel.write(src);
} while (src.remaining() > 0);
}
}

View File

@@ -0,0 +1,55 @@
package io.mycat.backend.mysql.store.memalloc;
import java.util.concurrent.atomic.AtomicLong;
/**
* 内存使用大小控制器
*
* @author chenzifei
* @CreateTime 2016年1月19日
*/
public class MemSizeController {
// 如果剩余空间小于当前最小剩余空间时也认为整个size控制器已经到达极限
private static long minLeft = 32;
// 当前内存大小
private AtomicLong size;
private long maxSize;
public MemSizeController(long maxSize) {
this.size = new AtomicLong();
this.maxSize = maxSize;
}
/**
* 增加了大小
*
* @param incre
* @return 是否已经到达内存控制器极限,true:当前是ok的, false:not ok, need flush to disk
*/
public boolean addSize(long incre) {
for (;;) {
long current = size.get();
long next = current + incre;
if (size.compareAndSet(current, next)) {
if (next + minLeft >= maxSize)
return false;
else
return true;
}
}
}
public void subSize(long decre) {
for (;;) {
long current = size.get();
long next = current - decre;
if (next < 0) {
throw new RuntimeException("unexpected!");
}
if (size.compareAndSet(current, next)) {
return;
}
}
}
}

View File

@@ -0,0 +1,80 @@
package io.mycat.backend.mysql.store.result;
import java.util.List;
import io.mycat.net.mysql.RowDataPacket;
public interface ResultExternal {
/**
* Reset the current position of this object.
*/
void reset();
/**
* Get the next row from the result.
*
* @return the next row or null
*/
RowDataPacket next();
/**
* Add a row to this object.
*
* @param values
* the row to add
* @return the new number of rows in this object
*/
int addRow(RowDataPacket row);
/**
* Add a number of rows to the result.
*
* @param rows
* the list of rows to add
* @return the new number of rows in this object
*/
int addRows(List<RowDataPacket> rows);
/**
* This method is called after all rows have been added.
*/
void done();
/**
* Close this object and delete the temporary file.
*/
void close();
/**
* Remove the row with the given values from this object if such a row
* exists.
*
* @param values
* the row
* @return the new row count
*/
int removeRow(RowDataPacket row);
/**
* Check if the given row exists in this object.
*
* @param values
* the row
* @return true if it exists
*/
boolean contains(RowDataPacket row);
/**
* Create a shallow copy of this object if possible.
*
* @return the shallow copy, or null
*/
ResultExternal createShallowCopy();
/**
* count tapes split by resultExternal
*
* @return tape's count
*/
int TapeCount();
}

View File

@@ -11,6 +11,7 @@ import java.util.concurrent.ConcurrentHashMap;
* @time 12:19 2016/5/23
*/
public interface BufferPool {
ByteBuffer allocate();
public ByteBuffer allocate(int size);
public void recycle(ByteBuffer theBuf);
public long capacity();

View File

@@ -1,214 +0,0 @@
package io.mycat.buffer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.ByteBuffer;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
/**
* 仿照Netty的思路针对MyCat内存缓冲策略优化
* ByteBufferArena维护着锁还有所有list
*
* @author Hash Zhang
* @version 1.0
* @time 17:19 2016/5/17
* @see @https://github.com/netty/netty
*/
public class ByteBufferArena implements BufferPool {
private static final Logger LOGGER = LoggerFactory.getLogger(ByteBufferChunkList.class);
private final ByteBufferChunkList q[];
private final AtomicInteger chunkCount = new AtomicInteger(0);
private final AtomicInteger failCount = new AtomicInteger(0);
private static final int FAIL_THRESHOLD = 1000;
private final int pageSize;
private final int chunkSize;
private final AtomicLong capacity;
private final AtomicLong size;
private final ConcurrentHashMap<Thread, Integer> sharedOptsCount;
/**
* 记录对线程ID->该线程的所使用Direct Buffer的size
*/
private final ConcurrentHashMap<Long,Long> memoryUsage;
private final int conReadBuferChunk;
public ByteBufferArena(int chunkSize, int pageSize, int chunkCount, int conReadBuferChunk) {
try {
this.chunkSize = chunkSize;
this.pageSize = pageSize;
this.chunkCount.set(chunkCount);
this.conReadBuferChunk = conReadBuferChunk;
q = new ByteBufferChunkList[6];
q[5] = new ByteBufferChunkList(100, Integer.MAX_VALUE, chunkSize, pageSize, 0);
q[4] = new ByteBufferChunkList(75, 100, chunkSize, pageSize, 0);
q[3] = new ByteBufferChunkList(50, 100, chunkSize, pageSize, 0);
q[2] = new ByteBufferChunkList(25, 75, chunkSize, pageSize, 0);
q[1] = new ByteBufferChunkList(1, 50, chunkSize, pageSize, 0);
q[0] = new ByteBufferChunkList(Integer.MIN_VALUE, 25, chunkSize, pageSize, chunkCount);
q[0].nextList = q[1];
q[1].nextList = q[2];
q[2].nextList = q[3];
q[3].nextList = q[4];
q[4].nextList = q[5];
q[5].nextList = null;
q[5].prevList = q[4];
q[4].prevList = q[3];
q[3].prevList = q[2];
q[2].prevList = q[1];
q[1].prevList = q[0];
q[0].prevList = null;
capacity = new AtomicLong(6 * chunkCount * chunkSize);
size = new AtomicLong(6 * chunkCount * chunkSize);
sharedOptsCount = new ConcurrentHashMap<>();
memoryUsage = new ConcurrentHashMap<>();
} finally {
}
}
@Override
public ByteBuffer allocate(int reqCapacity) {
try {
ByteBuffer byteBuffer = null;
int i = 0, count = 0;
while (byteBuffer == null) {
if (i > 5) {
i = 0;
count = failCount.incrementAndGet();
if (count > FAIL_THRESHOLD) {
try {
expand();
} finally {
}
}
}
byteBuffer = q[i].allocate(reqCapacity);
i++;
}
// if (count > 0) {
// System.out.println("count: " + count);
// System.out.println(failCount.get());
// }
// printList();
capacity.addAndGet(-reqCapacity);
final Thread thread = Thread.currentThread();
final long threadId = thread.getId();
if (memoryUsage.containsKey(threadId)){
memoryUsage.put(threadId,memoryUsage.get(thread.getId())+reqCapacity);
}else {
memoryUsage.put(threadId, (long) reqCapacity);
}
if (sharedOptsCount.contains(thread)) {
int currentCount = sharedOptsCount.get(thread);
currentCount++;
sharedOptsCount.put(thread,currentCount);
} else{
sharedOptsCount.put(thread,0);
}
return byteBuffer;
} finally {
}
}
private void expand() {
LOGGER.warn("Current Buffer Size is not enough! Expanding Byte buffer!");
ByteBufferChunk byteBufferChunk = new ByteBufferChunk(pageSize, chunkSize);
q[0].byteBufferChunks.add(byteBufferChunk);
failCount.set(0);
}
@Override
public void recycle(ByteBuffer byteBuffer) {
final long size = byteBuffer != null?byteBuffer.capacity():0;
try {
int i;
for (i = 0; i < 6; i++) {
if (q[i].free(byteBuffer)) {
break;
}
}
if (i > 5) {
LOGGER.warn("This ByteBuffer is not maintained in ByteBufferArena!");
return;
}
final Thread thread = Thread.currentThread();
final long threadId = thread.getId();
if (memoryUsage.containsKey(threadId)){
memoryUsage.put(threadId,memoryUsage.get(thread.getId())-size);
}
if (sharedOptsCount.contains(thread)) {
int currentCount = sharedOptsCount.get(thread);
currentCount--;
sharedOptsCount.put(thread,currentCount);
} else{
sharedOptsCount.put(thread,0);
}
capacity.addAndGet(byteBuffer.capacity());
return;
} finally {
}
}
private void printList() {
for (int i = 0; i < 6; i++) {
System.out.println(i + ":" + q[i].byteBufferChunks.toString());
}
}
@Override
public long capacity() {
return capacity.get();
}
@Override
public long size() {
return size.get();
}
@Override
public int getConReadBuferChunk() {
return conReadBuferChunk;
}
@Override
public int getSharedOptsCount() {
final Set<Integer> integers = (Set<Integer>) sharedOptsCount.values();
int count = 0;
for(int i : integers){
count += i;
}
return count;
}
/**
* 这里pageSize就是DirectByteBuffer的chunksize
* @return
*/
@Override
public int getChunkSize() {
return pageSize;
}
@Override
public ConcurrentHashMap<Long, Long> getNetDirectMemoryUsage() {
return memoryUsage;
}
@Override
public BufferArray allocateArray() {
return new BufferArray(this);
}
}

View File

@@ -65,7 +65,9 @@ public class DirectByteBufferPool implements BufferPool{
}
return null;
}
public ByteBuffer allocate() {
return allocate(chunkSize);
}
public ByteBuffer allocate(int size) {
final int theChunkCount = size / chunkSize + (size % chunkSize == 0 ? 0 : 1);
int selectedPage = prevAllocatedPage.incrementAndGet() % allPages.length;

View File

@@ -42,6 +42,19 @@ public interface ErrorCode {
public static final int ERR_MULTI_NODE_FAILED = 3011;
public static final int ERR_WRONG_USED = 3012;
public static final int ERR_FOUND_EXCEPION = 3344;
public static final int ER_HANDLE_DATA = 4002;
public static final int ER_OPTIMIZER = 4004;
public static final int ER_QUERYHANDLER = 4005;
public static final int ER_NO_VALID_CONNECTION = 5004;
public static final int ER_FILE_INIT = 5301;
public static final int ER_FILE_FORCE = 5302;
public static final int ER_FILE_SYNC = 5303;
public static final int ER_FILE_READ = 5304;
public static final int ER_FILE_WRITE = 5305;
public static final int ER_FILE_RENAME = 5306;
public static final int ER_FILE_DELETE = 5307;
public static final int ER_IO_EXCEPTION = 5308;
public static final int ER_FILE_CREATE = 5313;
// mysql error code
public static final int ER_HASHCHK = 1000;
public static final int ER_NISAMCHK = 1001;

View File

@@ -47,6 +47,7 @@ import io.mycat.config.model.DataHostConfig;
import io.mycat.config.model.DataNodeConfig;
import io.mycat.config.model.SchemaConfig;
import io.mycat.config.model.TableConfig;
import io.mycat.config.model.TableConfig.TableTypeEnum;
import io.mycat.config.model.TableConfigMap;
import io.mycat.config.model.rule.TableRuleConfig;
import io.mycat.config.util.ConfigException;
@@ -150,7 +151,6 @@ public class XMLSchemaLoader implements SchemaLoader {
//读取各个属性
String name = schemaElement.getAttribute("name");
String dataNode = schemaElement.getAttribute("dataNode");
String checkSQLSchemaStr = schemaElement.getAttribute("checkSQLschema");
String sqlMaxLimitStr = schemaElement.getAttribute("sqlMaxLimit");
String lowerCaseStr = schemaElement.getAttribute("lowerCase");
int sqlMaxLimit = -1;
@@ -186,7 +186,7 @@ public class XMLSchemaLoader implements SchemaLoader {
"schema " + name + " didn't config tables,so you must set dataNode property!");
}
SchemaConfig schemaConfig = new SchemaConfig(name, dataNode,
tables, sqlMaxLimit, !"false".equalsIgnoreCase(checkSQLSchemaStr), lowerCase);
tables, sqlMaxLimit, lowerCase);
schemas.put(name, schemaConfig);
}
}
@@ -312,9 +312,9 @@ public class XMLSchemaLoader implements SchemaLoader {
}
//记录type是否为global
String tableTypeStr = tableElement.hasAttribute("type") ? tableElement.getAttribute("type") : null;
int tableType = TableConfig.TYPE_GLOBAL_DEFAULT;
TableTypeEnum tableType = TableTypeEnum.TYPE_DEFAULT;
if ("global".equalsIgnoreCase(tableTypeStr)) {
tableType = TableConfig.TYPE_GLOBAL_TABLE;
tableType = TableTypeEnum.TYPE_GLOBAL_TABLE;
}
//记录dataNode就是分布在哪些dataNode上
String dataNode = tableElement.getAttribute("dataNode");
@@ -342,8 +342,6 @@ public class XMLSchemaLoader implements SchemaLoader {
if (distTableDns) {
dataNode = dataNode.substring(distPrex.length(), dataNode.length() - 1);
}
//分表功能
String subTables = tableElement.getAttribute("subTables");
for (int j = 0; j < tableNames.length; j++) {
@@ -353,7 +351,7 @@ public class XMLSchemaLoader implements SchemaLoader {
TableConfig table = new TableConfig(tableName, primaryKey,
autoIncrement, needAddLimit, tableType, dataNode,
(tableRule != null) ? tableRule.getRule() : null,
ruleRequired, null, false, null, null,subTables);
ruleRequired, null, false, null, null);
checkDataNodeExists(table.getDataNodes());
// 检查分片表分片规则配置是否合法
@@ -441,14 +439,13 @@ public class XMLSchemaLoader implements SchemaLoader {
if (childTbElement.hasAttribute("needAddLimit")) {
needAddLimit = Boolean.parseBoolean(childTbElement.getAttribute("needAddLimit"));
}
String subTables = childTbElement.getAttribute("subTables");
//子表join键和对应的parent的键父子表通过这个关联
String joinKey = childTbElement.getAttribute("joinKey").toUpperCase();
String parentKey = childTbElement.getAttribute("parentKey").toUpperCase();
TableConfig table = new TableConfig(cdTbName, primaryKey,
autoIncrement, needAddLimit,
TableConfig.TYPE_GLOBAL_DEFAULT, dataNodes, null, false, parentTable, true,
joinKey, parentKey, subTables);
TableTypeEnum.TYPE_DEFAULT, dataNodes, null, false, parentTable, true,
joinKey, parentKey);
if (tables.containsKey(table.getName())) {
throw new ConfigException("table " + table.getName() + " duplicated!");

View File

@@ -0,0 +1,54 @@
package io.mycat.config.model;
public class ERTable {
private final String table;
private final String column;
private final String schema;
public ERTable(String schema, String table, String column) {
if (schema == null)
throw new IllegalArgumentException("ERTable's schema can't be null");
this.schema = schema;
if (table == null)
throw new IllegalArgumentException("ERTable's tableName can't be null");
this.table = table;
if (column == null)
throw new IllegalArgumentException("ERTable's column can't be null");
this.column = column;
}
public String getTable() {
return table;
}
public String getColumn() {
return column;
}
public String getSchema() {
return schema;
}
@Override
public int hashCode() {
final int constant = 37;
int hash = 17;
hash += constant * (schema == null ? 0 : schema.toLowerCase().hashCode());
hash += constant * (table == null ? 0 : table.toLowerCase().hashCode());
hash += constant * (column == null ? 0 : column.toLowerCase().hashCode());
return hash;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj instanceof ERTable) {
ERTable erTable = (ERTable) obj;
return this.schema.equalsIgnoreCase(erTable.getSchema())
&& this.table.equalsIgnoreCase(erTable.getTable())
&& this.column.equalsIgnoreCase(erTable.getColumn());
}
return false;
}
}

View File

@@ -45,7 +45,6 @@ public class SchemaConfig {
* prevent memory problem when return a large result set
*/
private final int defaultMaxLimit;
private final boolean checkSQLSchema;
private final int lowerCase;
/**
@@ -56,11 +55,9 @@ public class SchemaConfig {
private final String[] allDataNodeStrArr;
public SchemaConfig(String name, String dataNode,
Map<String, TableConfig> tables, int defaultMaxLimit,
boolean checkSQLschema, int lowerCase) {
Map<String, TableConfig> tables, int defaultMaxLimit, int lowerCase) {
this.name = name;
this.dataNode = dataNode;
this.checkSQLSchema = checkSQLschema;
this.tables = tables;
this.defaultMaxLimit = defaultMaxLimit;
this.lowerCase = lowerCase;
@@ -81,9 +78,9 @@ public class SchemaConfig {
}
}
public boolean isCheckSQLSchema() {
return checkSQLSchema;
}
// public boolean isCheckSQLSchema() {
// return checkSQLSchema;
// }
public int getDefaultMaxLimit() {
return defaultMaxLimit;

View File

@@ -80,9 +80,15 @@ public final class SystemConfig {
private static final String DEFAULT_TRANSACTION_BASE_DIR = "txlogs";
private static final String DEFAULT_TRANSACTION_BASE_NAME = "mycat-tx";
private static final int DEFAULT_TRANSACTION_ROTATE_SIZE = 16;
private final static long CHECKTABLECONSISTENCYPERIOD = 30 * 60 * 1000;
private static final long CHECKTABLECONSISTENCYPERIOD = 30 * 60 * 1000;
// 全局表一致性检测任务默认24小时调度一次
private static final long DEFAULT_GLOBAL_TABLE_CHECK_PERIOD = 24 * 60 * 60 * 1000L;
private static final int DEFAULT_MERGE_QUEUE_SIZE = 1024;
private static final int DEFAULT_ORDERBY_QUEUE_SIZE = 1024;
private static final int DEFAULT_JOIN_QUEUE_SIZE = 1024;
private static final int DEFAULT_NESTLOOP_ROWS_SIZE = 2000;
private static final int DEFAULT_NESTLOOP_CONN_SIZE = 4;
private static final int DEFAULT_MAPPEDFILE_SIZE = 1024 * 1024 * 64;
private int processorBufferPoolType = 0;
private int processorBufferLocalPercent;
@@ -197,6 +203,13 @@ public final class SystemConfig {
private String transactionLogBaseDir;
private String transactionLogBaseName;
private int transactionRatateSize;
private int mergeQueueSize;
private int orderByQueueSize;
private int joinQueueSize;
private int nestLoopRowsSize;
private int nestLoopConnSize;
private int mappedFileSize;
/**
* 排序时,内存不够时,将已经排序的结果集
* 写入到临时目录
@@ -250,6 +263,12 @@ public final class SystemConfig {
this.transactionLogBaseDir = SystemConfig.getHomePath()+File.separatorChar+DEFAULT_TRANSACTION_BASE_DIR;
this.transactionLogBaseName = DEFAULT_TRANSACTION_BASE_NAME;
this.transactionRatateSize = DEFAULT_TRANSACTION_ROTATE_SIZE;
this.mergeQueueSize = DEFAULT_MERGE_QUEUE_SIZE;
this.orderByQueueSize = DEFAULT_ORDERBY_QUEUE_SIZE;
this.joinQueueSize = DEFAULT_JOIN_QUEUE_SIZE;
this.nestLoopRowsSize = DEFAULT_NESTLOOP_ROWS_SIZE;
this.nestLoopConnSize = DEFAULT_NESTLOOP_CONN_SIZE;
this.mappedFileSize = DEFAULT_MAPPEDFILE_SIZE;
}
public int getTransactionRatateSize() {
@@ -949,6 +968,45 @@ public final class SystemConfig {
public void setUseHandshakeV10(int useHandshakeV10) {
this.useHandshakeV10 = useHandshakeV10;
}
public int getNestLoopRowsSize() {
return nestLoopRowsSize;
}
public void setNestLoopRowsSize(int nestLoopRowsSize) {
this.nestLoopRowsSize = nestLoopRowsSize;
}
public int getJoinQueueSize() {
return joinQueueSize;
}
public void setJoinQueueSize(int joinQueueSize) {
this.joinQueueSize = joinQueueSize;
}
public int getMergeQueueSize() {
return mergeQueueSize;
}
public void setMergeQueueSize(int mergeQueueSize) {
this.mergeQueueSize = mergeQueueSize;
}
public int getMappedFileSize() {
return mappedFileSize;
}
public void setMappedFileSize(int mappedFileSize) {
this.mappedFileSize = mappedFileSize;
}
public int getNestLoopConnSize() {
return nestLoopConnSize;
}
public void setNestLoopConnSize(int nestLoopConnSize) {
this.nestLoopConnSize = nestLoopConnSize;
}
public int getOrderByQueueSize() {
return orderByQueueSize;
}
public void setOrderByQueueSize(int orderByQueueSize) {
this.orderByQueueSize = orderByQueueSize;
}
}

View File

@@ -38,15 +38,17 @@ import io.mycat.util.SplitUtil;
* @author mycat
*/
public class TableConfig {
public static final int TYPE_GLOBAL_TABLE = 1;
public static final int TYPE_GLOBAL_DEFAULT = 0;
public enum TableTypeEnum{
TYPE_DEFAULT, TYPE_GLOBAL_TABLE
}
// public static final int TYPE_GLOBAL_TABLE = 1;
// public static final int TYPE_GLOBAL_DEFAULT = 0;
private final String name;
private final String primaryKey;
private final boolean autoIncrement;
private final boolean needAddLimit;
private final int tableType;
private final TableTypeEnum tableType;
private final ArrayList<String> dataNodes;
private final ArrayList<String> distTables;
private final RuleConfig rule;
private final String partitionColumn;
private final boolean ruleRequired;
@@ -66,10 +68,10 @@ public class TableConfig {
private ReentrantReadWriteLock reentrantReadWriteLock = new ReentrantReadWriteLock(false);
public TableConfig(String name, String primaryKey, boolean autoIncrement,boolean needAddLimit, int tableType,
public TableConfig(String name, String primaryKey, boolean autoIncrement,boolean needAddLimit, TableTypeEnum tableType,
String dataNode, RuleConfig rule, boolean ruleRequired,
TableConfig parentTC, boolean isChildTable, String joinKey,
String parentKey,String subTables) {
String parentKey) {
if (name == null) {
throw new IllegalArgumentException("table name is null");
} else if (dataNode == null) {
@@ -94,19 +96,6 @@ public class TableConfig {
dataNodes.add(dn);
}
if(subTables!=null && !subTables.equals("")){
String sTables[] = SplitUtil.split(subTables, ',', '$', '-');
if (sTables == null || sTables.length <= 0) {
throw new IllegalArgumentException("invalid table subTables");
}
this.distTables = new ArrayList<String>(sTables.length);
for (String table : sTables) {
distTables.add(table);
}
}else{
this.distTables = new ArrayList<String>();
}
this.rule = rule;
this.partitionColumn = (rule == null) ? null : rule.getColumn();
partionKeyIsPrimaryKey=(partitionColumn==null)?primaryKey==null:partitionColumn.equals(primaryKey);
@@ -145,7 +134,7 @@ public class TableConfig {
}
public boolean isGlobalTable() {
return this.tableType == TableConfig.TYPE_GLOBAL_TABLE;
return this.tableType == TableTypeEnum.TYPE_GLOBAL_TABLE;
}
public String genLocateRootParentSQL() {
@@ -187,7 +176,7 @@ public class TableConfig {
return partitionColumn;
}
public int getTableType() {
public TableTypeEnum getTableType() {
return tableType;
}
@@ -254,17 +243,6 @@ public class TableConfig {
return partionKeyIsPrimaryKey;
}
public ArrayList<String> getDistTables() {
return this.distTables;
}
public boolean isDistTable(){
if(this.distTables!=null && !this.distTables.isEmpty() ){
return true;
}
return false;
}
public List<SQLTableElement> getTableElementList() {
return tableElementList;
}

View File

@@ -18,9 +18,6 @@
package io.mycat.memory.environment;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
@@ -28,6 +25,9 @@ import java.io.InputStreamReader;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Convenience class to extract hardware specifics of the computer executing this class
*/
@@ -38,6 +38,7 @@ public class Hardware {
private static final String LINUX_MEMORY_INFO_PATH = "/proc/meminfo";
private static final Pattern LINUX_MEMORY_REGEX = Pattern.compile("^MemTotal:\\s*(\\d+)\\s+kB$");
private static final Pattern LINUX__FREE_MEMORY_REGEX = Pattern.compile("^MemFree:\\s*(\\d+)\\s+kB$");
@@ -110,6 +111,37 @@ public class Hardware {
return -1;
}
}
/**
* Returns the size of the free physical memory in bytes on a Linux-based
* operating system.
*
* @return the size of the free physical memory in bytes or <code>-1</code> if
* the size could not be determined
*/
public static long getFreeSizeOfPhysicalMemoryForLinux() {
try (BufferedReader lineReader = new BufferedReader(new FileReader(LINUX_MEMORY_INFO_PATH))) {
String line;
while ((line = lineReader.readLine()) != null) {
Matcher matcher = LINUX__FREE_MEMORY_REGEX.matcher(line);
if (matcher.matches()) {
String totalMemory = matcher.group(1);
return Long.parseLong(totalMemory) * 1024L; // Convert from kilobyte to byte
}
}
// expected line did not come
LOG.error("Cannot determine the size of the physical memory for Linux host (using '/proc/meminfo'). Unexpected format.");
return -1;
}
catch (NumberFormatException e) {
LOG.error("Cannot determine the size of the physical memory for Linux host (using '/proc/meminfo'). Unexpected format.");
return -1;
}
catch (Throwable t) {
LOG.error("Cannot determine the size of the physical memory for Linux host (using '/proc/meminfo'): " + t.getMessage(), t);
return -1;
}
}
/**
* Returns the size of the physical memory in bytes on a Mac OS-based

View File

@@ -7,6 +7,9 @@ import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.mycat.backend.mysql.BufferUtil;
import io.mycat.config.Fields;
import io.mycat.memory.unsafe.row.UnsafeRow;
@@ -14,9 +17,6 @@ import io.mycat.net.FrontendConnection;
import io.mycat.util.ByteUtil;
import io.mycat.util.DateUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* ProtocolBinary::ResultsetRow:
* row of a binary resultset (COM_STMT_EXECUTE)
@@ -254,7 +254,7 @@ public class BinaryRowDataPacket extends MySQLPacket {
bb = conn.getProcessor().getBufferPool().allocate(totalSize);
BufferUtil.writeUB3(bb, calcPacketSize());
BufferUtil.writeUB3(bb, size);
bb.put(packetId);
bb.put(packetHeader); // packet header [00]
bb.put(nullBitMap); // NULL-Bitmap

View File

@@ -25,6 +25,7 @@ package io.mycat.net.mysql;
import java.nio.ByteBuffer;
import io.mycat.MycatServer;
import io.mycat.backend.mysql.BufferUtil;
import io.mycat.backend.mysql.MySQLMessage;
import io.mycat.buffer.BufferArray;
@@ -95,5 +96,18 @@ public class EOFPacket extends MySQLPacket {
BufferUtil.writeUB2(buffer, warningCount);
BufferUtil.writeUB2(buffer, status);
}
public byte[] toBytes() {
int size = calcPacketSize();
ByteBuffer buffer = MycatServer.getInstance().getBufferPool().allocate(size + packetHeaderSize);
BufferUtil.writeUB3(buffer, size);
buffer.put(packetId);
buffer.put(fieldCount);
BufferUtil.writeUB2(buffer, warningCount);
BufferUtil.writeUB2(buffer, status);
buffer.flip();
byte[] data = new byte[buffer.limit()];
buffer.get(data);
MycatServer.getInstance().getBufferPool().recycle(buffer);
return data;
}
}

View File

@@ -94,8 +94,8 @@ public class ErrorPacket extends MySQLPacket {
return data;
}
public byte[] toBytes() {
ByteBuffer buffer = ByteBuffer.allocate(calcPacketSize()+4);
int size = calcPacketSize();
ByteBuffer buffer = MycatServer.getInstance().getBufferPool().allocate(size + packetHeaderSize);
BufferUtil.writeUB3(buffer, size);
buffer.put(packetId);
buffer.put(fieldCount);

View File

@@ -141,9 +141,9 @@ public class OkPacket extends MySQLPacket {
}
public byte[] toBytes() {
int totalSize = calcPacketSize() + packetHeaderSize;
ByteBuffer buffer = MycatServer.getInstance().getBufferPool().allocate(totalSize);
BufferUtil.writeUB3(buffer, calcPacketSize());
int size = calcPacketSize();
ByteBuffer buffer = MycatServer.getInstance().getBufferPool().allocate(size + packetHeaderSize);
BufferUtil.writeUB3(buffer, size);
buffer.put(packetId);
buffer.put(fieldCount);
BufferUtil.writeLength(buffer, affectedRows);

View File

@@ -27,6 +27,7 @@ import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import io.mycat.MycatServer;
import io.mycat.backend.mysql.BufferUtil;
import io.mycat.backend.mysql.MySQLMessage;
import io.mycat.buffer.BufferArray;
@@ -55,8 +56,8 @@ import io.mycat.net.FrontendConnection;
* @author mycat
*/
public class RowDataPacket extends MySQLPacket {
private static final byte NULL_MARK = (byte) 251;
private static final byte EMPTY_MARK = (byte) 0;
protected static final byte NULL_MARK = (byte) 251;
protected static final byte EMPTY_MARK = (byte) 0;
public byte[] value;
public int fieldCount;
@@ -75,7 +76,15 @@ public class RowDataPacket extends MySQLPacket {
//这里应该修改field
fieldCount=fieldCount+add;
}
public void addAll(List<byte[]> values) {
fieldValues.addAll(values);
}
public byte[] getValue(int index) {
return fieldValues.get(index);
}
public void setValue(int index, byte[] value) {
fieldValues.set(index, value);
}
public void read(byte[] data) {
value = data;
MySQLMessage mm = new MySQLMessage(data);
@@ -147,5 +156,25 @@ public class RowDataPacket extends MySQLPacket {
}
}
}
public byte[] toBytes() {
int size = calcPacketSize();
ByteBuffer buffer = MycatServer.getInstance().getBufferPool().allocate(size + packetHeaderSize);
BufferUtil.writeUB3(buffer, size);
buffer.put(packetId);
for (int i = 0; i < fieldCount; i++) {
byte[] fv = fieldValues.get(i);
if (fv == null) {
buffer.put(RowDataPacket.NULL_MARK);
} else if (fv.length == 0) {
buffer.put(RowDataPacket.EMPTY_MARK);
} else {
BufferUtil.writeWithLength(buffer, fv);
}
}
buffer.flip();
byte[] data = new byte[buffer.limit()];
buffer.get(data);
MycatServer.getInstance().getBufferPool().recycle(buffer);
return data;
}
}

View File

@@ -0,0 +1,48 @@
package io.mycat.plan;
import org.apache.commons.lang.StringUtils;
public class NamedField {
public String table;
public String name;
// 这个field隶属于哪个节点
public final PlanNode planNode;
public NamedField(PlanNode tableNode) {
this(null, null, tableNode);
}
public NamedField(String table, String name, PlanNode planNode) {
this.table = table;
this.name = name;
this.planNode = planNode;
}
@Override
public int hashCode() {
int prime = 2;
int hashCode = table == null ? 0 : table.hashCode();
hashCode = hashCode * prime + (name == null ? 0 : name.toLowerCase().hashCode());
return hashCode;
}
@Override
public boolean equals(Object obj) {
if (obj == null)
return false;
if (obj == this)
return true;
if (!(obj instanceof NamedField))
return false;
NamedField other = (NamedField) obj;
if (StringUtils.equals(table, other.table) && StringUtils.equalsIgnoreCase(name, other.name))
return true;
else
return false;
}
@Override
public String toString() {
return new StringBuilder().append("table:").append(table).append(",name:").append(name).toString();
}
}

View File

@@ -0,0 +1,48 @@
/**
*
*/
package io.mycat.plan;
import com.alibaba.druid.sql.ast.SQLOrderingSpecification;
import io.mycat.plan.common.item.Item;
public class Order {
private Item item;
private SQLOrderingSpecification sortOrder;
public Order(Item item) {
this(item, SQLOrderingSpecification.ASC);
}
public Order(Item item, SQLOrderingSpecification sortOrder) {
this.item = item;
this.sortOrder = sortOrder;
}
public Item getItem() {
return item;
}
public void setItem(Item item) {
this.item = item;
}
public SQLOrderingSpecification getSortOrder() {
return sortOrder;
}
public void setSortOrder(SQLOrderingSpecification sortOrder) {
this.sortOrder = sortOrder;
}
@Override
public String toString() {
return "order by " + item.toString() + " " + sortOrder;
}
public Order copy() {
return new Order(item.cloneStruct(), sortOrder);
}
}

View File

@@ -0,0 +1,685 @@
package io.mycat.plan;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import org.apache.log4j.Logger;
import com.alibaba.druid.sql.ast.SQLOrderingSpecification;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import io.mycat.config.ErrorCode;
import io.mycat.plan.common.context.NameResolutionContext;
import io.mycat.plan.common.context.ReferContext;
import io.mycat.plan.common.exception.MySQLOutPutException;
import io.mycat.plan.common.item.Item;
import io.mycat.plan.common.item.Item.ItemType;
import io.mycat.plan.common.item.ItemField;
import io.mycat.plan.common.item.function.sumfunc.ItemSum;
import io.mycat.plan.common.item.subquery.ItemSubselect;
import io.mycat.plan.node.JoinNode;
import io.mycat.plan.node.TableNode;
public abstract class PlanNode {
private static final Logger logger = Logger.getLogger(PlanNode.class);
public enum PlanNodeType {
NONAME, TABLE, JOIN, MERGE, QUERY, VIEW
}
public abstract PlanNodeType type();
protected String sql;
private boolean isDistinct = false;
/**
* select查询中的列
*/
protected List<Item> columnsSelected = new ArrayList<Item>();
/**
* 显式的由查询接口指定的orderBy注意需要保证顺序
*/
protected List<Order> orderBys = new LinkedList<Order>();
/**
* 显式的由查询接口指定的group by注意需要保证顺序
*/
protected List<Order> groups = new LinkedList<Order>();
/**
* having条件
*/
protected Item havingFilter;
/**
* 上一层父节点,比如子查询会依赖父节点的字段信息
* http://dev.mysql.com/doc/refman/5.0/en/correlated-subqueries.html
*/
private PlanNode parent;
/**
* 子节点
*/
protected List<PlanNode> children = new ArrayList<PlanNode>();
/**
* 从哪里开始
*/
protected long limitFrom = -1;
/**
* 到哪里结束
*/
protected long limitTo = -1;
/**
* filter in where
*/
protected Item whereFilter = null;
/**
* 当前tn的别名用于进行join等操作的时候辨别到底这一行是从哪个subNode来的。
*/
protected String alias;
/**
* 如果出现subQuery内外都存在别名时内部的别名为subAlias外部使用的别名为alias
* tablenode自身的这个Alias属性和基类TreeNode的alias属性的作用如下 按照原本tddl的做法无法区分 select *
* from (select* from test1 t1) t当中的两个alias
* 当tablenode的tableAlias属性有值时我们就把这个语句带上tableAlias下发下去
*/
protected String subAlias;
/**
* 当前节点是否为子查询
*/
protected boolean subQuery;
protected boolean exsitView = false;
/**
* 聚合函数集合
*/
public HashSet<ItemSum> sumFuncs = new HashSet<ItemSum>();
/**
* 子查询集合
*/
public List<ItemSubselect> subSelects = new ArrayList<ItemSubselect>();
protected List<TableNode> referedTableNodes = new ArrayList<TableNode>();
// inner field -> child field
protected Map<NamedField, NamedField> innerFields = new LinkedHashMap<NamedField, NamedField>();
protected Map<NamedField, Item> outerFields = new LinkedHashMap<NamedField, Item>();
protected NameResolutionContext nameContext;
protected ReferContext referContext;
protected PlanNode() {
nameContext = new NameResolutionContext();
referContext = new ReferContext();
nameContext.setPlanNode(this);
referContext.setPlanNode(this);
}
/**
* 依赖的所有列保存的是childnode->iselectable
*/
protected LoadingCache<PlanNode, List<Item>> columnsReferedCache= CacheBuilder.newBuilder()
.build(new CacheLoader<PlanNode, List<Item>>() {
@Override
public List<Item> load(PlanNode tn) {
return new ArrayList<Item>();
}
});
private List<Item> columnsReferList = new ArrayList<Item>();
private boolean existUnPushDownGroup = false;
/**
* 是否可全局表优化 ex. TableNode: isGlobaled当且仅当tablename是globaltablename
* MergeNode: always false; QueryNode: true <-->subchild is true JoinNode:
* 当且仅当所有子节点最多只有一个非globaltable
*/
protected boolean isGlobaled = false;
// 这个node涉及到的unglobal的表的个数
protected int unGlobalTableCount = 0;
protected List<Item> nestLoopFilters = null;
public abstract String getPureName();
/* 当前节点的高度 */
public abstract int getHeight();
public final String getCombinedName() {
if (this.getAlias() != null) {
return this.getAlias();
}
if (this.getSubAlias() != null) {
return this.getSubAlias();
}
return this.getPureName();
}
public PlanNode getChild() {
if (children.isEmpty())
return null;
else
return children.get(0);
}
public void addChild(PlanNode childNode) {
childNode.setParent(this);
this.children.add(childNode);
}
public PlanNode select(List<Item> columnSelected) {
this.columnsSelected = columnSelected;
return this;
}
public PlanNode groupBy(Item c, SQLOrderingSpecification sortOrder) {
Order order = new Order(c, sortOrder);
this.groups.add(order);
return this;
}
public PlanNode orderBy(Item c, SQLOrderingSpecification sortOrder) {
Order order = new Order(c, sortOrder);
if (!this.orderBys.contains(order)) {
this.orderBys.add(order);
}
return this;
}
public void setUpFields() {
sumFuncs.clear();
setUpInnerFields();
setUpSelects();
setUpWhere();
setUpGroupBy();
setUpHaving();
setUpOrderBy();
}
// column refered start
public void setUpRefers(boolean isPushDownNode) {
sumFuncs.clear();
referContext.setPushDownNode(isPushDownNode);
// select
for (Item sel : columnsSelected) {
setUpItemRefer(sel);
}
if (type() == PlanNodeType.JOIN) {
JoinNode jn = (JoinNode) this;
if (!isPushDownNode) {
for (Item bf : jn.getJoinFilter())
setUpItemRefer(bf);
setUpItemRefer(jn.getOtherJoinOnFilter());
}
}
// where, pushdown node时无须where条件
if (!isPushDownNode) {
setUpItemRefer(whereFilter);
}
// group by
for (Order groupBy : groups) {
setUpItemRefer(groupBy.getItem());
}
// having
setUpItemRefer(havingFilter);
// order by
for (Order orderBy : orderBys) {
setUpItemRefer(orderBy.getItem());
}
// make list
for (List<Item> selSet : columnsReferedCache.asMap().values()) {
columnsReferList.addAll(selSet);
}
}
// ==================== helper method =================
public abstract PlanNode copy();
protected final void copySelfTo(PlanNode to) {
to.setAlias(this.alias);
to.setSubAlias(this.subAlias);
to.setDistinct(this.isDistinct);
for (Item selected : this.getColumnsSelected()) {
Item copySel = selected.cloneItem();
copySel.setItemName(selected.getItemName());
to.columnsSelected.add(copySel);
}
for (Order groupBy : this.getGroupBys()) {
to.groups.add(groupBy.copy());
}
for (Order orderBy : this.getOrderBys()) {
to.orderBys.add(orderBy.copy());
}
to.whereFilter = this.whereFilter == null ? null : this.whereFilter.cloneItem();
to.havingFilter = this.havingFilter == null ? null : havingFilter.cloneItem();
to.setLimitFrom(this.limitFrom);
to.setLimitTo(this.limitTo);
to.setSql(this.getSql());
to.setSubQuery(subQuery);
to.setUnGlobalTableCount(unGlobalTableCount);
}
protected void setUpInnerFields() {
innerFields.clear();
for (PlanNode child : children) {
child.setUpFields();
for (NamedField coutField : child.outerFields.keySet()) {
NamedField tmpField = new NamedField(coutField.planNode);
tmpField.table = child.getAlias() == null ? coutField.table : child.getAlias();
// view也会有subAlias
if (subAlias!= null && subAlias.length()!=0)
tmpField.table = subAlias;
tmpField.name = coutField.name;
innerFields.put(tmpField, coutField);
}
}
}
protected void setUpSelects() {
if (columnsSelected.isEmpty()) {
columnsSelected.add(new ItemField(null, null, "*"));
}
boolean withWild = false;
for (Item sel : columnsSelected) {
if (sel.isWild())
withWild = true;
}
if (withWild)
dealStarColumn();
outerFields.clear();
nameContext.setFindInSelect(false);
nameContext.setSelectFirst(false);
for (Item sel : columnsSelected) {
setUpItem(sel);
NamedField field = makeOutNamedField(sel);
if (outerFields.containsKey(field) && getParent() != null)
throw new MySQLOutPutException(ErrorCode.ER_OPTIMIZER, "", "duplicate field");
outerFields.put(field, sel);
}
}
private void setUpWhere() {
nameContext.setFindInSelect(false);
nameContext.setSelectFirst(false);
whereFilter = setUpItem(whereFilter);
}
private void setUpGroupBy() {
nameContext.setFindInSelect(true);
nameContext.setSelectFirst(false);
for (Order order : groups) {
Item item = order.getItem();
if (item.type() == ItemType.INT_ITEM) {
int index = item.valInt().intValue();
if (index >= 1 && index <= getColumnsSelected().size())
order.setItem(getColumnsSelected().get(index - 1));
else
throw new MySQLOutPutException(ErrorCode.ER_OPTIMIZER, "",
"Unknown column '" + index + "' in group statement");
} else {
order.setItem(setUpItem(item));
}
}
}
private void setUpHaving() {
nameContext.setFindInSelect(true);
nameContext.setSelectFirst(true);
havingFilter = setUpItem(havingFilter);
}
private void setUpOrderBy() {
nameContext.setFindInSelect(true);
nameContext.setSelectFirst(true);
for (Order order : orderBys) {
Item item = order.getItem();
if (item.type() == ItemType.INT_ITEM) {
int index = item.valInt().intValue();
if (index >= 1 && index <= getColumnsSelected().size())
order.setItem(getColumnsSelected().get(index - 1));
else
throw new MySQLOutPutException(ErrorCode.ER_OPTIMIZER, "",
"Unknown column '" + index + "' in order statement");
} else {
order.setItem(setUpItem(item));
}
}
}
protected void dealStarColumn() {
List<Item> newSels = new ArrayList<Item>();
for (Item selItem : columnsSelected) {
if (selItem.isWild()) {
ItemField wildField = (ItemField) selItem;
if (wildField.tableName==null || wildField.tableName.length()==0) {
for (NamedField field : innerFields.keySet()) {
ItemField col = new ItemField(null, field.table, field.name);
newSels.add(col);
}
} else {
String selTable = wildField.tableName;
boolean found = false;
for (NamedField field : innerFields.keySet()) {
if (selTable != null && selTable.equals(field.table)
|| (selTable == null && field.table == null)) {
ItemField col = new ItemField(null, field.table, field.name);
newSels.add(col);
found = true;
} else if (found) {
// a.* ->a.id,a.id1,b.id 找到b.id时退出
break;
}
}
if (!found) {
throw new MySQLOutPutException(ErrorCode.ER_OPTIMIZER, "",
"child table " + selTable + " not exist!");
}
}
} else {
newSels.add(selItem);
}
}
columnsSelected = newSels;
}
private NamedField makeOutNamedField(Item sel) {
NamedField tmpField = new NamedField(sel.getTableName(), sel.getItemName(), this);
if (subAlias != null)
tmpField.table = subAlias;
if (tmpField.table == null)// maybe function
tmpField.table = getPureName();
if (sel.getAlias() != null)
tmpField.name = sel.getAlias();
return tmpField;
}
protected Item setUpItem(Item sel) {
if (sel == null)
return null;
return sel.fixFields(nameContext);
}
private void setUpItemRefer(Item sel) {
if (sel != null)
sel.fixRefer(referContext);
}
// --------------------------getter&setter---------------------------
public long getLimitFrom() {
return limitFrom;
}
public PlanNode setLimitFrom(long limitFrom) {
this.limitFrom = limitFrom;
return this;
}
public long getLimitTo() {
return limitTo;
}
public PlanNode setLimitTo(long limitTo) {
this.limitTo = limitTo;
return this;
}
public PlanNode limit(long i, long j) {
this.setLimitFrom(i);
this.setLimitTo(j);
return this;
}
public Item getWhereFilter() {
return whereFilter;
}
public PlanNode query(Item whereFilter) {
this.whereFilter = whereFilter;
return this;
}
public String getAlias() {
return alias;
}
public PlanNode alias(String alias) {
this.alias = alias;
return this;
}
public PlanNode subAlias(String alias) {
this.subAlias = alias;
return this;
}
public List<Order> getGroupBys() {
return this.groups;
}
public PlanNode setGroupBys(List<Order> groups) {
this.groups = groups;
return this;
}
public List<Order> getOrderBys() {
return orderBys;
}
public void setOrderBys(List<Order> orderBys) {
this.orderBys = orderBys;
}
public List<PlanNode> getChildren() {
return this.children;
}
public List<Item> getColumnsSelected() {
return columnsSelected;
}
public PlanNode setColumnsSelected(List<Item> columnsSelected) {
this.columnsSelected = columnsSelected;
return this;
}
public Map<PlanNode, List<Item>> getColumnsReferedMap() {
return this.columnsReferedCache.asMap();
}
public void addSelToReferedMap(PlanNode tn, Item sel) {
// 使得相同的refermap中的sel具有相同的columnname
try {
this.columnsReferedCache.get(tn).add(sel);
} catch (ExecutionException e) {
logger.warn("columnsReferedCache error", e);
}
}
public List<Item> getColumnsRefered() {
return this.columnsReferList;
}
/**
* 设置别名,表级别
*/
public PlanNode setAlias(String string) {
this.alias(string);
return this;
}
/**
* 设置别名,表级别
*/
public PlanNode setSubAlias(String string) {
this.subAlias(string);
return this;
}
public String getSubAlias() {
return subAlias;
}
public boolean isSubQuery() {
return subQuery;
}
public PlanNode setSubQuery(boolean subQuery) {
this.subQuery = subQuery;
return this;
}
public PlanNode having(Item havingFilter) {
this.havingFilter = havingFilter;
return this;
}
public Item getHavingFilter() {
return this.havingFilter;
}
public void setWhereFilter(Item whereFilter) {
this.whereFilter = whereFilter;
}
public boolean isGlobaled() {
return isGlobaled;
}
public void setGlobaled(boolean isGlobaled) {
this.isGlobaled = isGlobaled;
}
public int getUnGlobalTableCount() {
return unGlobalTableCount;
}
public void setUnGlobalTableCount(int unGlobalTableCount) {
this.unGlobalTableCount = unGlobalTableCount;
}
/* 获取改节点下的tablenode集合 */
public List<TableNode> getReferedTableNodes() {
return referedTableNodes;
}
public PlanNode getParent() {
return parent;
}
public void setParent(PlanNode parent) {
this.parent = parent;
if (parent != null) {
parent.referedTableNodes.addAll(referedTableNodes);
parent.exsitView |= this.exsitView;
}
}
/**
* @return the innerFields
*/
public Map<NamedField, NamedField> getInnerFields() {
return innerFields;
}
/**
* @return the outerFields
*/
public Map<NamedField, Item> getOuterFields() {
return outerFields;
}
/**
* @return the exsitView
*/
public boolean isExsitView() {
return exsitView;
}
/**
* @param exsitView
* the exsitView to set
*/
public void setExsitView(boolean exsitView) {
this.exsitView = exsitView;
}
public boolean existUnPushDownGroup() {
return existUnPushDownGroup;
}
public void setExistUnPushDownGroup(boolean existUnPushDownGroup) {
this.existUnPushDownGroup = existUnPushDownGroup;
}
/**
* @return the isDistinct
*/
public boolean isDistinct() {
return isDistinct;
}
/**
* @param isDistinct
* the isDistinct to set
*/
public void setDistinct(boolean isDistinct) {
this.isDistinct = isDistinct;
}
public String getSql() {
return sql;
}
public void setSql(String sql) {
this.sql = sql;
}
/**
* @return the strategyFilters
*/
public List<Item> getNestLoopFilters() {
return nestLoopFilters;
}
/**
* @param nestLoopFilters
* the strategyFilters to set
*/
public void setNestLoopFilters(List<Item> nestLoopFilters) {
this.nestLoopFilters = nestLoopFilters;
}
@Override
public final String toString() {
return this.toString(0);
}
/**
* show visualable plan in tree
*
* @param level
* @return
*/
public abstract String toString(int level);
}

View File

@@ -0,0 +1,14 @@
package io.mycat.plan.common;
public enum CastTarget {
ITEM_CAST_BINARY,
ITEM_CAST_CHAR,
ITEM_CAST_DATE,
ITEM_CAST_DATETIME,
ITEM_CAST_DECIMAL,
//JSON
ITEM_CAST_NCHAR,
ITEM_CAST_SIGNED_INT,
ITEM_CAST_TIME,
ITEM_CAST_UNSIGNED_INT;
}

View File

@@ -0,0 +1,12 @@
/**
*
*/
package io.mycat.plan.common;
public class CastType {
public CastTarget target;
public int length = -1;
public int dec = -1;
}

View File

@@ -0,0 +1,68 @@
package io.mycat.plan.common;
public class Ctype {
public static int _MY_U = 01; /* Upper case */
public static int _MY_L = 02; /* Lower case */
public static int _MY_NMR = 04; /* Numeral (digit) */
public static int _MY_SPC = 010; /* Spacing character */
public static int _MY_PNT = 020; /* Punctuation */
public static int _MY_CTR = 040; /* Control character */
public static int _MY_B = 0100; /* Blank */
public static int _MY_X = 0200; /* heXadecimal digit */
private static byte ctype_latin1[] = { 0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 40, 40, 40, 40, 40, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 72, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, (byte) 132, (byte) 132, (byte) 132, (byte) 132, (byte) 132, (byte) 132, (byte) 132, (byte) 132,
(byte) 132, (byte) 132, 16, 16, 16, 16, 16, 16, 16, (byte) 129, (byte) 129, (byte) 129, (byte) 129,
(byte) 129, (byte) 129, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 16, 16, 16, 16, 16, 16,
(byte) 130, (byte) 130, (byte) 130, (byte) 130, (byte) 130, (byte) 130, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 16, 16, 16, 16, 32, 16, 0, 16, 2, 16, 16, 16, 16, 16, 16, 1, 16, 1, 0, 1, 0, 0, 16,
16, 16, 16, 16, 16, 16, 16, 16, 2, 16, 2, 0, 2, 1, 72, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 16, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 16, 2, 2, 2, 2, 2, 2, 2, 2 };
/**
* 目前仅作latin的
*
* @param charset
* @param c
* @return
*/
public static boolean isDigit( // String charset,
char c) {
int index = (int) c + 1;
return (ctype_latin1[index] & _MY_NMR) != 0;
}
public static boolean my_isalpha(char c) {
int index = (int) c + 1;
return (ctype_latin1[index] & (_MY_U | _MY_L)) != 0;
}
public static boolean spaceChar(char c) {
int index = (int) c + 1;
return (ctype_latin1[index] & _MY_SPC) != 0;
}
public static boolean isPunct(char c) {
int index = (int) c + 1;
return (ctype_latin1[index] & _MY_PNT) != 0;
}
/**
* compare cs[start~count]==cs2[start~count] see
* ctype-simple.c[my_strnncoll_simple]
*
* @param cs
* @param start
* @param count
* @param cs2
* @param start
* @param count
*/
public static int my_strnncoll(char[] css, int sbegin, int slen, char[] cst, int tbegin, int tlen) {
return new String(css, sbegin, slen).compareTo(new String(cst, tbegin, tlen));
}
}

Some files were not shown because too many files have changed in this diff Show More