findbugs:change for style

This commit is contained in:
yanhuqing666
2017-08-13 13:06:59 +08:00
parent 28d45519ce
commit 83f12a03bf
56 changed files with 456 additions and 573 deletions

View File

@@ -1,72 +1,125 @@
<FindBugsFilter>
<Match>
<Class name="io.mycat.meta.protocol.StructureMeta" />
</Match>
<Match>
<Class name="io.mycat.meta.protocol.StructureMeta$ColumnMeta" />
</Match>
<Match>
<Class name="io.mycat.meta.protocol.StructureMeta$IndexMeta" />
</Match>
<Match>
<Class name="io.mycat.meta.protocol.StructureMeta$TableMeta" />
</Match>
<Match>
<Bug category="MALICIOUS_CODE,I18N,STYLE,CORRECTNESS" />
</Match>
<!--
MALICIOUS_CODE set ARRAT
I18N STRING DECODE
-->
<!--MALICIOUS_CODE 90 -->
<!-- I18N 147-->
<!-- STYLE 146-->
<!-- PERFORMANCE start: 24 -->
<Bug category="PERFORMANCE" />
<Bug pattern="UUF_UNUSED_FIELD,URF_UNREAD_FIELD,DM_GC" />
<!-- PERFORMANCE end -->
<!-- CORRECTNESS start num:1,other 12 need to debug -->
<Match>
<Bug category="CORRECTNESS" />
<Class name="io.mycat.plan.node.JoinNode" />
<Method name="setRightNode" />
<Bug pattern="NP_NULL_PARAM_DEREF_ALL_TARGETS_DANGEROUS" />
</Match>
<!-- CORRECTNESS end -->
<!-- MT_CORRECTNESS start num:7 -->
<Match>
<Bug category="MT_CORRECTNESS" />
<Class name="io.mycat.MycatServer" />
<Method name="genXATXID" />
<Bug pattern="JLM_JSR166_UTILCONCURRENT_MONITORENTER" />
</Match>
<Match>
<Bug category="MT_CORRECTNESS" />
<Class name="io.mycat.backend.mysql.xa.recovery.impl.FileSystemRepository" />
<Bug pattern="IS2_INCONSISTENT_SYNC" />
</Match>
<Match>
<Bug category="MT_CORRECTNESS" />
<Class name="io.mycat.server.handler.ServerLoadDataInfileHandler" />
<Bug pattern="IS2_INCONSISTENT_SYNC" />
</Match>
<Match>
<Bug category="MT_CORRECTNESS" />
<Class name="io.mycat.memory.unsafe.map.BytesToBytesMap$MapIterator" />
<Bug pattern="IS2_INCONSISTENT_SYNC" />
</Match>
<!-- MT_CORRECTNESS end -->
<!-- BAD_PRACTICE start ,num:9-->
<Match>
<Bug category="BAD_PRACTICE" />
<Bug pattern="RR_NOT_CHECKED,RV_RETURN_VALUE_IGNORED_BAD_PRACTICE" />
</Match>
<!-- BAD_PRACTICE end-->
<!-- protocol:ignore -->
<Match>
<Class name="io.mycat.meta.protocol.StructureMeta"/>
</Match>
<Match>
<Class name="io.mycat.meta.protocol.StructureMeta$ColumnMeta"/>
</Match>
<Match>
<Class name="io.mycat.meta.protocol.StructureMeta$IndexMeta"/>
</Match>
<Match>
<Class name="io.mycat.meta.protocol.StructureMeta$TableMeta"/>
</Match>
<Match>
<Class name="io.mycat.meta.protocol.StructureMeta$ColumnMeta$Builder"/>
</Match>
<Match>
<Class name="io.mycat.meta.protocol.StructureMeta$IndexMeta$Builder"/>
</Match>
<!-- protocol:ignore -->
<!-- need refactor -->
<Match>
<Class name="io.mycat.sqlengine.mpp.UnsafeRowGrouper"/>
</Match>
<Match>
<Bug category="MALICIOUS_CODE,CORRECTNESS"/>
</Match>
<!--MALICIOUS_CODE 90 -->
<!-- I18N new String(byte[]) UTF-8 OR EVENT SET? -->
<Match>
<Bug category="I18N"/>
<Bug pattern="DM_DEFAULT_ENCODING"/>
</Match>
<!-- STYLE start: 50+ -->
<!-- switch without default:TODO-->
<Match>
<Bug category="STYLE"/>
<Bug pattern="SF_SWITCH_NO_DEFAULT"/>
</Match>
<!-- always throw new exception-->
<Match>
<Bug category="STYLE"/>
<Bug pattern="REC_CATCH_EXCEPTION"/>
</Match>
<!-- feature -->
<Match>
<Bug category="STYLE"/>
<Bug pattern="DB_DUPLICATE_BRANCHES"/>
</Match>
<Match>
<Bug category="STYLE"/>
<Class name="io.mycat.plan.common.time.MyTime"/>
<Method name="extract_date_time"/>
<Bug pattern="SF_SWITCH_FALLTHROUGH"/>
</Match>
<Match>
<Bug category="STYLE"/>
<Class name="io.mycat.memory.unsafe.utils.sort.TimSort"/>
<Method name="binarySort"/>
<Bug pattern="SF_SWITCH_FALLTHROUGH"/>
</Match>
<Match>
<Bug category="STYLE"/>
<Class name="io.mycat.net.mysql.BinaryRowDataPacket"/>
<Method name="read"/>
<Bug pattern="NP_LOAD_OF_KNOWN_NULL_VALUE"/>
</Match>
<!-- STYLE end -->
<!-- PERFORMANCE start: 24 -->
<Match>
<Bug category="PERFORMANCE"/>
<Bug pattern="UUF_UNUSED_FIELD,URF_UNREAD_FIELD,DM_GC"/>
</Match>
<!-- PERFORMANCE end -->
<!-- CORRECTNESS start num:1,other 12 need to debug -->
<Match>
<Bug category="CORRECTNESS"/>
<Class name="io.mycat.plan.node.JoinNode"/>
<Method name="setRightNode"/>
<Bug pattern="NP_NULL_PARAM_DEREF_ALL_TARGETS_DANGEROUS"/>
</Match>
<!-- CORRECTNESS end -->
<!-- MT_CORRECTNESS start num:7 -->
<Match>
<Bug category="MT_CORRECTNESS"/>
<Class name="io.mycat.MycatServer"/>
<Method name="genXATXID"/>
<Bug pattern="JLM_JSR166_UTILCONCURRENT_MONITORENTER"/>
</Match>
<Match>
<Bug category="MT_CORRECTNESS"/>
<Class name="io.mycat.backend.mysql.xa.recovery.impl.FileSystemRepository"/>
<Bug pattern="IS2_INCONSISTENT_SYNC"/>
</Match>
<Match>
<Bug category="MT_CORRECTNESS"/>
<Class name="io.mycat.server.handler.ServerLoadDataInfileHandler"/>
<Bug pattern="IS2_INCONSISTENT_SYNC"/>
</Match>
<Match>
<Bug category="MT_CORRECTNESS"/>
<Class name="io.mycat.memory.unsafe.map.BytesToBytesMap$MapIterator"/>
<Bug pattern="IS2_INCONSISTENT_SYNC"/>
</Match>
<!-- MT_CORRECTNESS end -->
<!-- BAD_PRACTICE start ,num:9-->
<Match>
<Bug category="BAD_PRACTICE"/>
<Bug pattern="RR_NOT_CHECKED,RV_RETURN_VALUE_IGNORED_BAD_PRACTICE"/>
</Match>
<!-- BAD_PRACTICE end-->
</FindBugsFilter>

View File

@@ -160,9 +160,8 @@ public class MySQLDataSource extends PhysicalDatasource {
case OkPacket.FIELD_COUNT:
break;
case ErrorPacket.FIELD_COUNT:
ErrorPacket err = new ErrorPacket();
err.read(bin2);
isConnected = false;
break;
case EOFPacket.FIELD_COUNT:
// 发送323响应认证数据包
Reply323Packet r323 = new Reply323Packet();

View File

@@ -27,10 +27,6 @@ import io.mycat.MycatServer;
import io.mycat.backend.BackendConnection;
import io.mycat.backend.datasource.PhysicalDBNode;
import io.mycat.backend.mysql.nio.handler.transaction.AutoTxOperation;
import io.mycat.backend.mysql.nio.handler.transaction.normal.NormalAutoCommitNodesHandler;
import io.mycat.backend.mysql.nio.handler.transaction.normal.NormalAutoRollbackNodesHandler;
import io.mycat.backend.mysql.nio.handler.transaction.xa.XAAutoCommitNodesHandler;
import io.mycat.backend.mysql.nio.handler.transaction.xa.XAAutoRollbackNodesHandler;
import io.mycat.config.ErrorCode;
import io.mycat.config.MycatConfig;
import io.mycat.log.transaction.TxnLogHelper;
@@ -291,9 +287,6 @@ public class MultiNodeDdlHandler extends MultiNodeHandler {
@Override
public void fieldEofResponse(byte[] header, List<byte[]> fields, List<FieldPacket> fieldPacketsnull, byte[] eof,
boolean isLeft, BackendConnection conn) {
for (int i = 0, len = fields.size(); i < len; ++i) {
byte[] field = fields.get(i);
}
}
@Override
public boolean rowResponse(final byte[] row, RowDataPacket rowPacketnull, boolean isLeft, BackendConnection conn) {

View File

@@ -13,7 +13,7 @@ import io.mycat.route.RouteResultset;
import io.mycat.server.NonBlockingSession;
import io.mycat.server.ServerConnection;
import io.mycat.server.response.ShowTables;
import io.mycat.server.util.ShowCreateStmtInfo;
import io.mycat.server.response.ShowCreateStmtInfo;
import io.mycat.util.StringUtil;
import java.util.ArrayList;

View File

@@ -83,7 +83,7 @@ public class HandlerTool {
*/
public static Item createItem(Item sel, List<Field> fields, int startIndex, boolean allPushDown, HandlerType type,
String charset) {
Item ret = null;
Item ret;
if (sel.basicConstItem())
return sel;
Item.ItemType i = sel.type();
@@ -109,10 +109,6 @@ public class HandlerTool {
} else {
ret = createFieldItem(sel, fields, startIndex);
}
if (ret == null)
throw new MySQLOutPutException(ErrorCode.ER_QUERYHANDLER, "", "item not found:" + sel);
if (ret.getItemName() == null)
ret.setItemName(sel.getPushDownName() == null ? sel.getItemName() : sel.getPushDownName());
ret.fixFields();
return ret;
}

View File

@@ -198,8 +198,7 @@ public abstract class ResultDiskBuffer implements ResultExternal {
if (readBuffer.capacity() >= Integer.MAX_VALUE) {
throw new IllegalArgumentException("Packet size over the limit.");
}
int size = readBuffer.capacity() << 1;
size = (size > Integer.MAX_VALUE) ? Integer.MAX_VALUE : size;
int size = readBuffer.capacity() > (Integer.MAX_VALUE >>1) ? Integer.MAX_VALUE : readBuffer.capacity() << 1;
ByteBuffer newBuffer = ByteBuffer.allocate(size);
readBuffer.position(offset);
newBuffer.put(readBuffer);

View File

@@ -51,15 +51,12 @@ public class DirectByteBufferPool implements BufferPool{
int oldCapacity = buffer.capacity();
int newCapacity = oldCapacity << 1;
ByteBuffer newBuffer = allocate(newCapacity);
if(newBuffer != null){
int newPosition = buffer.position();
buffer.flip();
newBuffer.put(buffer);
newBuffer.position(newPosition);
recycle(buffer);
return newBuffer;
}
return null;
int newPosition = buffer.position();
buffer.flip();
newBuffer.put(buffer);
newBuffer.position(newPosition);
recycle(buffer);
return newBuffer;
}
public ByteBuffer allocate() {
return allocate(chunkSize);

View File

@@ -20,21 +20,21 @@ public class LevelDBCachePooFactory extends CachePoolFactory {
@Override
public CachePool createCachePool(String poolName, int cacheSize,
int expireSeconds) {
Options options = new Options();
options.cacheSize(cacheSize * 1048576);//cacheSize M 大小
options.createIfMissing(true);
DB db =null;
String filePath = "leveldb\\"+poolName;
try {
db=factory.open(new File(filePath), options);
// Use the db in here....
} catch (IOException e) {
logger.info("factory try to open file "+filePath+" failed ");
// Make sure you close the db to shutdown the
// database and avoid resource leaks.
// db.close();
}
return new LevelDBPool(poolName,db,cacheSize);
Options options = new Options();
options.cacheSize(1048576L * cacheSize);//cacheSize M 大小
options.createIfMissing(true);
DB db = null;
String filePath = "leveldb\\" + poolName;
try {
db = factory.open(new File(filePath), options);
// Use the db in here....
} catch (IOException e) {
logger.info("factory try to open file " + filePath + " failed ");
// Make sure you close the db to shutdown the
// database and avoid resource leaks.
// db.close();
}
return new LevelDBPool(poolName, db, cacheSize);
}
}

View File

@@ -51,7 +51,7 @@ public class MycatPrivileges implements FrontendPrivileges {
private static final Logger ALARM = LoggerFactory.getLogger("alarm");
private static boolean check = false;
private boolean check = false;
private final static ThreadLocal<WallProvider> contextLocal = new ThreadLocal<WallProvider>();
public static MycatPrivileges instance() {

View File

@@ -32,9 +32,9 @@ public abstract class Versions {
public static final byte PROTOCOL_VERSION = 10;
/**服务器版本**/
public static byte[] SERVER_VERSION = "5.6.29-mycat-2.17.08.0-dev-20170811003734".getBytes();
public static byte[] SERVER_VERSION = "5.6.29-mycat-2.17.08.0-dev-20170813130155".getBytes();
public static byte[] VERSION_COMMENT = "MyCat Server (OpenCloundDB)".getBytes();
public static String ANNOTATION_NAME = "mycat:";
public static final String ANNOTATION_NAME = "mycat:";
public static final String ROOT_PREFIX = "mycat";
public static void setServerVersion(String version) {

View File

@@ -34,7 +34,7 @@ public abstract class Versions {
/**服务器版本**/
public static byte[] SERVER_VERSION = "@server-version@".getBytes();
public static byte[] VERSION_COMMENT = "@version-comment@".getBytes();
public static String ANNOTATION_NAME = "@annotation-name@";
public static final String ANNOTATION_NAME = "@annotation-name@";
public static final String ROOT_PREFIX = "@root_prefix@";
public static void setServerVersion(String version) {

View File

@@ -50,7 +50,7 @@ public final class FirewallConfig {
private WallConfig wallConfig = new WallConfig();
private static WallProvider provider ;
private WallProvider provider ;
public FirewallConfig() { }
@@ -114,10 +114,6 @@ public final class FirewallConfig {
}
return false ;
}
public static void setProvider(WallProvider provider) {
FirewallConfig.provider = provider;
}
public void setWallConfig(WallConfig wallConfig) {
this.wallConfig = wallConfig;

View File

@@ -64,13 +64,7 @@ public class UserPrivilegesConfig {
}
public TablePrivilege getTablePrivilege(String tableName) {
TablePrivilege tablePrivilege = tablePrivileges.get( tableName );
if ( tablePrivilege == null ) {
tablePrivilege = new TablePrivilege();
tablePrivilege.setName(tableName);
tablePrivilege.setDml(dml);
}
return tablePrivilege;
return tablePrivileges.get( tableName );
}
}

View File

@@ -39,7 +39,7 @@ public class DailyRotateLogStore {
this.suffix = suffix;
this.fileName = this.prefix + "." + suffix;
this.mode = "rw";
this.maxFileSize = rolateSize * 1024 * 1024;
this.maxFileSize = 1024L * 1024 * rolateSize;
this.nextCheckTime = System.currentTimeMillis() - 1;
this.cal = Calendar.getInstance();
this.dateFormat = new SimpleDateFormat("yyyy-MM-dd");

View File

@@ -64,7 +64,7 @@ public final class ConfFileHandler {
header.packetId = ++packetId;
fields[i] = PacketUtil.getField("DATA", Fields.FIELD_TYPE_VAR_STRING);
fields[i++].packetId = ++packetId;
fields[i].packetId = ++packetId;
eof.packetId = ++packetId;
}
@@ -297,20 +297,20 @@ public final class ConfFileHandler {
SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm");
try {
int i = 1;
File[] file = new File(SystemConfig.getHomePath(), "conf")
.listFiles();
for (File f : file) {
if (f.isFile()) {
RowDataPacket row = new RowDataPacket(FIELD_COUNT);
row.add(StringUtil.encode(
(i++) + " : " + f.getName() + " time:"
+ df.format(new Date(f.lastModified())),
c.getCharset()));
row.packetId = ++packetId;
buffer = row.write(buffer, c,true);
File[] file = new File(SystemConfig.getHomePath(), "conf").listFiles();
if (file != null) {
for (File f : file) {
if (f.isFile()) {
RowDataPacket row = new RowDataPacket(FIELD_COUNT);
row.add(StringUtil.encode(
(i++) + " : " + f.getName() + " time:"
+ df.format(new Date(f.lastModified())),
c.getCharset()));
row.packetId = ++packetId;
buffer = row.write(buffer, c, true);
}
}
}
bufINf.buffer = buffer;
bufINf.packetId = packetId;
return bufINf;

View File

@@ -43,10 +43,8 @@ import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public final class ShowServerLog {
@@ -62,7 +60,7 @@ public final class ShowServerLog {
header.packetId = ++packetId;
fields[i] = PacketUtil.getField("LOG", Fields.FIELD_TYPE_VAR_STRING);
fields[i++].packetId = ++packetId;
fields[i].packetId = ++packetId;
eof.packetId = ++packetId;
}
@@ -191,13 +189,14 @@ public final class ShowServerLog {
private static PackageBufINf showLogSum(ManagerConnection c,
ByteBuffer buffer, byte packetId) {
PackageBufINf bufINf = new PackageBufINf();
File[] logFiles = new File(SystemConfig.getHomePath(), "logs")
.listFiles();
File[] logFiles = new File(SystemConfig.getHomePath(), "logs").listFiles();
StringBuilder fileNames = new StringBuilder();
for (File f : logFiles) {
if (f.isFile()) {
fileNames.append(" " );
fileNames.append(f.getName());
if (logFiles != null) {
for (File f : logFiles) {
if (f.isFile()) {
fileNames.append(" ");
fileNames.append(f.getName());
}
}
}
@@ -429,12 +428,6 @@ public final class ShowServerLog {
}
throw new Exception();
}
public static void main(String[] args){
Map x = getCondPair("log @@file = mysql.log limit = rowLimit key = 'keyWord' regex = regexStr");
return ;
}
}
class PackageBufINf {

View File

@@ -118,9 +118,6 @@ public final class ShowDataSource {
byte packetId = eof.packetId;
MycatConfig conf = MycatServer.getInstance().getConfig();
Map<String, List<PhysicalDatasource>> dataSources = new HashMap<String, List<PhysicalDatasource>>();
if (null != name) {
PhysicalDBNode dn = conf.getDataNodes().get(name);
for(PhysicalDatasource w:dn.getDbPool().getAllDataSources()){

View File

@@ -56,7 +56,7 @@ public final class ShowDatabase {
header.packetId = ++packetId;
fields[i] = PacketUtil.getField("DATABASE", Fields.FIELD_TYPE_VAR_STRING);
fields[i++].packetId = ++packetId;
fields[i].packetId = ++packetId;
eof.packetId = ++packetId;
}
@@ -78,7 +78,7 @@ public final class ShowDatabase {
// write rows
byte packetId = eof.packetId;
Map<String, SchemaConfig> schemas = MycatServer.getInstance().getConfig().getSchemas();
for (String name : new TreeSet<String>(schemas.keySet())) {
for (String name : new TreeSet<>(schemas.keySet())) {
RowDataPacket row = new RowDataPacket(FIELD_COUNT);
row.add(StringUtil.encode(name, c.getCharset()));
row.packetId = ++packetId;

View File

@@ -136,7 +136,7 @@ public class ShowSysLog {
while ((line=in.readLine()) != null && i<numLines) {
lines[end-i] = line;
i++;
}
}
numLines = start + i;
} catch (FileNotFoundException ex) {

View File

@@ -53,7 +53,7 @@ public final class ShowTime {
header.packetId = ++packetId;
fields[i] = PacketUtil.getField("TIMESTAMP", Fields.FIELD_TYPE_LONGLONG);
fields[i++].packetId = ++packetId;
fields[i].packetId = ++packetId;
eof.packetId = ++packetId;
}

View File

@@ -51,7 +51,7 @@ public final class ShowVersion {
header.packetId = ++packetId;
fields[i] = PacketUtil.getField("VERSION", Fields.FIELD_TYPE_STRING);
fields[i++].packetId = ++packetId;
fields[i].packetId = ++packetId;
eof.packetId = ++packetId;
}

View File

@@ -38,7 +38,6 @@ public class DiskRowWriter extends OutputStream {
private FileChannel channel = null;
private OutputStream bs = null;
private FileOutputStream fos = null;
private TimeTrackingOutputStream ts = null;
private SerializationStream objOut = null;
private boolean initialized = false;
private boolean hasBeenClosed = false;
@@ -106,9 +105,8 @@ public class DiskRowWriter extends OutputStream {
}
fos = new FileOutputStream(file,true);
ts = new TimeTrackingOutputStream(/**writeMetrics,*/ fos);
channel = fos.getChannel();
bs = new BufferedOutputStream(ts,bufferSize);
bs = new BufferedOutputStream(fos,bufferSize);
objOut = serializerInstance.serializeStream(bs);
initialized = true;
@@ -136,7 +134,6 @@ public class DiskRowWriter extends OutputStream {
channel = null;
bs = null;
fos = null;
ts = null;
objOut = null;
initialized = false;
hasBeenClosed = true;
@@ -199,18 +196,7 @@ public class DiskRowWriter extends OutputStream {
}
}
/**
* Writes a key-value pair.
*/
private void write(Object key, Object value) throws IOException {
if (!initialized) {
open();
}
objOut.writeKey(key);
objOut.writeValue(value);
recordWritten();
}
@Override
public void write(int b){
throw new UnsupportedOperationException();

View File

@@ -20,11 +20,4 @@ public abstract class SerializationStream{
public abstract void flush();
public abstract void close();
public <T> SerializationStream writeAll(Iterator<T> iter){
while (iter.hasNext()) {
writeObject(iter.next());
}
return this;
}
}

View File

@@ -1,50 +0,0 @@
package io.mycat.memory.unsafe.storage;
import java.io.IOException;
import java.io.OutputStream;
/**
* Intercepts write calls and tracks total time spent writing in order to update shuffle write
* metrics. Not thread safe.
*/
public final class TimeTrackingOutputStream extends OutputStream {
/**private final ShuffleWriteMetrics writeMetrics;*/
private final OutputStream outputStream;
public TimeTrackingOutputStream(OutputStream outputStream) {
this.outputStream = outputStream;
}
@Override
public void write(int b) throws IOException {
final long startTime = System.nanoTime();
outputStream.write(b);
}
@Override
public void write(byte[] b) throws IOException {
final long startTime = System.nanoTime();
outputStream.write(b);
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
final long startTime = System.nanoTime();
outputStream.write(b, off, len);
}
@Override
public void flush() throws IOException {
final long startTime = System.nanoTime();
outputStream.flush();
}
@Override
public void close() throws IOException {
final long startTime = System.nanoTime();
outputStream.close();
}
}

View File

@@ -25,8 +25,6 @@ import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
@@ -274,19 +272,19 @@ public class JavaUtils {
String unit = null;
if (size >= 2*TB) {
value = size/TB;
value = (double)size/TB;
unit = "TB";
} else if (size >= 2*GB) {
value = size/GB;
value = (double)size/GB;
unit = "GB";
} else if (size >= 2*MB) {
value = size/MB;
value = (double)size/MB;
unit = "MB";
} else if (size >= 2*KB) {
value = size/KB;
value = (double)size/KB;
unit = "KB";
} else {
value = size;
value = (double)size;
unit = "B";
}

View File

@@ -23,232 +23,228 @@ import io.mycat.memory.unsafe.array.LongArray;
public class RadixSort {
/**
* Sorts a given array of longs using least-significant-digit radix sort. This routine assumes
* you have extra space at the end of the array at least equal to the number of records. The
* sort is destructive and may relocate the data positioned within the array.
*
* @param array array of long elements followed by at least that many empty slots.
* @param numRecords number of data records in the array.
* @param startByteIndex the first byte (in range [0, 7]) to sort each long by, counting from the
* least significant byte.
* @param endByteIndex the last byte (in range [0, 7]) to sort each long by, counting from the
* least significant byte. Must be greater than startByteIndex.
* @param desc whether this is a descending (binary-order) sort.
* @param signed whether this is a signed (two's complement) sort.
*
* @return The starting index of the sorted data within the given array. We return this instead
* of always copying the data back to position zero for efficiency.
*/
public static int sort(
LongArray array, int numRecords, int startByteIndex, int endByteIndex,
boolean desc, boolean signed) {
assert startByteIndex >= 0 : "startByteIndex (" + startByteIndex + ") should >= 0";
assert endByteIndex <= 7 : "endByteIndex (" + endByteIndex + ") should <= 7";
assert endByteIndex > startByteIndex;
assert numRecords * 2 <= array.size();
int inIndex = 0;
int outIndex = numRecords;
if (numRecords > 0) {
long[][] counts = getCounts(array, numRecords, startByteIndex, endByteIndex);
for (int i = startByteIndex; i <= endByteIndex; i++) {
if (counts[i] != null) {
sortAtByte(
array, numRecords, counts[i], i, inIndex, outIndex,
desc, signed && i == endByteIndex);
int tmp = inIndex;
inIndex = outIndex;
outIndex = tmp;
}
}
}
return inIndex;
}
/**
* Sorts a given array of longs using least-significant-digit radix sort. This routine assumes
* you have extra space at the end of the array at least equal to the number of records. The
* sort is destructive and may relocate the data positioned within the array.
*
* @param array array of long elements followed by at least that many empty slots.
* @param numRecords number of data records in the array.
* @param startByteIndex the first byte (in range [0, 7]) to sort each long by, counting from the
* least significant byte.
* @param endByteIndex the last byte (in range [0, 7]) to sort each long by, counting from the
* least significant byte. Must be greater than startByteIndex.
* @param desc whether this is a descending (binary-order) sort.
* @param signed whether this is a signed (two's complement) sort.
* @return The starting index of the sorted data within the given array. We return this instead
* of always copying the data back to position zero for efficiency.
*/
public static int sort(
LongArray array, int numRecords, int startByteIndex, int endByteIndex,
boolean desc, boolean signed) {
assert startByteIndex >= 0 : "startByteIndex (" + startByteIndex + ") should >= 0";
assert endByteIndex <= 7 : "endByteIndex (" + endByteIndex + ") should <= 7";
assert endByteIndex > startByteIndex;
assert numRecords * 2L <= array.size();
int inIndex = 0;
int outIndex = numRecords;
if (numRecords > 0) {
long[][] counts = getCounts(array, numRecords, startByteIndex, endByteIndex);
for (int i = startByteIndex; i <= endByteIndex; i++) {
if (counts[i] != null) {
sortAtByte(
array, numRecords, counts[i], i, inIndex, outIndex,
desc, signed && i == endByteIndex);
int tmp = inIndex;
inIndex = outIndex;
outIndex = tmp;
}
}
}
return inIndex;
}
/**
* Performs a partial sort by copying data into destination offsets for each byte value at the
* specified byte offset.
*
* @param array array to partially sort.
* @param numRecords number of data records in the array.
* @param counts counts for each byte value. This routine destructively modifies this array.
* @param byteIdx the byte in a long to sort at, counting from the least significant byte.
* @param inIndex the starting index in the array where input data is located.
* @param outIndex the starting index where sorted output data should be written.
* @param desc whether this is a descending (binary-order) sort.
* @param signed whether this is a signed (two's complement) sort (only applies to last byte).
*/
private static void sortAtByte(
LongArray array, int numRecords, long[] counts, int byteIdx, int inIndex, int outIndex,
boolean desc, boolean signed) {
assert counts.length == 256;
long[] offsets = transformCountsToOffsets(
counts, numRecords, array.getBaseOffset() + outIndex * 8, 8, desc, signed);
Object baseObject = array.getBaseObject();
long baseOffset = array.getBaseOffset() + inIndex * 8;
long maxOffset = baseOffset + numRecords * 8;
for (long offset = baseOffset; offset < maxOffset; offset += 8) {
long value = Platform.getLong(baseObject, offset);
int bucket = (int)((value >>> (byteIdx * 8)) & 0xff);
Platform.putLong(baseObject, offsets[bucket], value);
offsets[bucket] += 8;
}
}
/**
* Performs a partial sort by copying data into destination offsets for each byte value at the
* specified byte offset.
*
* @param array array to partially sort.
* @param numRecords number of data records in the array.
* @param counts counts for each byte value. This routine destructively modifies this array.
* @param byteIdx the byte in a long to sort at, counting from the least significant byte.
* @param inIndex the starting index in the array where input data is located.
* @param outIndex the starting index where sorted output data should be written.
* @param desc whether this is a descending (binary-order) sort.
* @param signed whether this is a signed (two's complement) sort (only applies to last byte).
*/
private static void sortAtByte(
LongArray array, int numRecords, long[] counts, int byteIdx, int inIndex, int outIndex,
boolean desc, boolean signed) {
assert counts.length == 256;
long[] offsets = transformCountsToOffsets(
counts, numRecords, array.getBaseOffset() + outIndex * 8L, 8, desc, signed);
Object baseObject = array.getBaseObject();
long baseOffset = array.getBaseOffset() + inIndex * 8L;
long maxOffset = baseOffset + numRecords * 8L;
for (long offset = baseOffset; offset < maxOffset; offset += 8) {
long value = Platform.getLong(baseObject, offset);
int bucket = (int) ((value >>> (byteIdx * 8)) & 0xff);
Platform.putLong(baseObject, offsets[bucket], value);
offsets[bucket] += 8;
}
}
/**
* Computes a value histogram for each byte in the given array.
*
* @param array array to count records in.
* @param numRecords number of data records in the array.
* @param startByteIndex the first byte to compute counts for (the prior are skipped).
* @param endByteIndex the last byte to compute counts for.
*
* @return an array of eight 256-byte count arrays, one for each byte starting from the least
* significant byte. If the byte does not need sorting the array will be null.
*/
private static long[][] getCounts(
LongArray array, int numRecords, int startByteIndex, int endByteIndex) {
long[][] counts = new long[8][];
// Optimization: do a fast pre-pass to determine which byte indices we can skip for sorting.
// If all the byte values at a particular index are the same we don't need to count it.
long bitwiseMax = 0;
long bitwiseMin = -1L;
long maxOffset = array.getBaseOffset() + numRecords * 8;
Object baseObject = array.getBaseObject();
for (long offset = array.getBaseOffset(); offset < maxOffset; offset += 8) {
long value = Platform.getLong(baseObject, offset);
bitwiseMax |= value;
bitwiseMin &= value;
}
long bitsChanged = bitwiseMin ^ bitwiseMax;
// Compute counts for each byte index.
for (int i = startByteIndex; i <= endByteIndex; i++) {
if (((bitsChanged >>> (i * 8)) & 0xff) != 0) {
counts[i] = new long[256];
// TODO(ekl) consider computing all the counts in one pass.
for (long offset = array.getBaseOffset(); offset < maxOffset; offset += 8) {
counts[i][(int)((Platform.getLong(baseObject, offset) >>> (i * 8)) & 0xff)]++;
}
}
}
return counts;
}
/**
* Computes a value histogram for each byte in the given array.
*
* @param array array to count records in.
* @param numRecords number of data records in the array.
* @param startByteIndex the first byte to compute counts for (the prior are skipped).
* @param endByteIndex the last byte to compute counts for.
* @return an array of eight 256-byte count arrays, one for each byte starting from the least
* significant byte. If the byte does not need sorting the array will be null.
*/
private static long[][] getCounts(LongArray array, int numRecords, int startByteIndex, int endByteIndex) {
long[][] counts = new long[8][];
// Optimization: do a fast pre-pass to determine which byte indices we can skip for sorting.
// If all the byte values at a particular index are the same we don't need to count it.
long bitwiseMax = 0;
long bitwiseMin = -1L;
long maxOffset = array.getBaseOffset() + 8L * numRecords;
Object baseObject = array.getBaseObject();
for (long offset = array.getBaseOffset(); offset < maxOffset; offset += 8) {
long value = Platform.getLong(baseObject, offset);
bitwiseMax |= value;
bitwiseMin &= value;
}
long bitsChanged = bitwiseMin ^ bitwiseMax;
// Compute counts for each byte index.
for (int i = startByteIndex; i <= endByteIndex; i++) {
if (((bitsChanged >>> (i * 8)) & 0xff) != 0) {
counts[i] = new long[256];
// TODO(ekl) consider computing all the counts in one pass.
for (long offset = array.getBaseOffset(); offset < maxOffset; offset += 8) {
counts[i][(int) ((Platform.getLong(baseObject, offset) >>> (i * 8)) & 0xff)]++;
}
}
}
return counts;
}
/**
* Transforms counts into the proper unsafe output offsets for the sort type.
*
* @param counts counts for each byte value. This routine destructively modifies this array.
* @param numRecords number of data records in the original data array.
* @param outputOffset output offset in bytes from the base array object.
* @param bytesPerRecord size of each record (8 for plain sort, 16 for key-prefix sort).
* @param desc whether this is a descending (binary-order) sort.
* @param signed whether this is a signed (two's complement) sort.
*
* @return the input counts array.
*/
private static long[] transformCountsToOffsets(
long[] counts, int numRecords, long outputOffset, int bytesPerRecord,
boolean desc, boolean signed) {
assert counts.length == 256;
int start = signed ? 128 : 0; // output the negative records first (values 129-255).
if (desc) {
int pos = numRecords;
for (int i = start; i < start + 256; i++) {
pos -= counts[i & 0xff];
counts[i & 0xff] = outputOffset + pos * bytesPerRecord;
}
} else {
int pos = 0;
for (int i = start; i < start + 256; i++) {
long tmp = counts[i & 0xff];
counts[i & 0xff] = outputOffset + pos * bytesPerRecord;
pos += tmp;
}
}
return counts;
}
/**
* Transforms counts into the proper unsafe output offsets for the sort type.
*
* @param counts counts for each byte value. This routine destructively modifies this array.
* @param numRecords number of data records in the original data array.
* @param outputOffset output offset in bytes from the base array object.
* @param bytesPerRecord size of each record (8 for plain sort, 16 for key-prefix sort).
* @param desc whether this is a descending (binary-order) sort.
* @param signed whether this is a signed (two's complement) sort.
* @return the input counts array.
*/
private static long[] transformCountsToOffsets(
long[] counts, int numRecords, long outputOffset, int bytesPerRecord,
boolean desc, boolean signed) {
assert counts.length == 256;
int start = signed ? 128 : 0; // output the negative records first (values 129-255).
if (desc) {
long pos = numRecords;
for (int i = start; i < start + 256; i++) {
pos -= counts[i & 0xff];
counts[i & 0xff] = outputOffset + pos * bytesPerRecord;
}
} else {
long pos = 0;
for (int i = start; i < start + 256; i++) {
long tmp = counts[i & 0xff];
counts[i & 0xff] = outputOffset + pos * bytesPerRecord;
pos += tmp;
}
}
return counts;
}
/**
* Specialization of sort() for key-prefix arrays. In this type of array, each record consists
* of two longs, only the second of which is sorted on.
*/
public static int sortKeyPrefixArray(
LongArray array,
int numRecords,
int startByteIndex,
int endByteIndex,
boolean desc,
boolean signed) {
assert startByteIndex >= 0 : "startByteIndex (" + startByteIndex + ") should >= 0";
assert endByteIndex <= 7 : "endByteIndex (" + endByteIndex + ") should <= 7";
assert endByteIndex > startByteIndex;
assert numRecords * 4 <= array.size();
int inIndex = 0;
int outIndex = numRecords * 2;
if (numRecords > 0) {
long[][] counts = getKeyPrefixArrayCounts(array, numRecords, startByteIndex, endByteIndex);
for (int i = startByteIndex; i <= endByteIndex; i++) {
if (counts[i] != null) {
sortKeyPrefixArrayAtByte(
array, numRecords, counts[i], i, inIndex, outIndex,
desc, signed && i == endByteIndex);
int tmp = inIndex;
inIndex = outIndex;
outIndex = tmp;
}
}
}
return inIndex;
}
/**
* Specialization of sort() for key-prefix arrays. In this type of array, each record consists
* of two longs, only the second of which is sorted on.
*/
public static int sortKeyPrefixArray(
LongArray array,
int numRecords,
int startByteIndex,
int endByteIndex,
boolean desc,
boolean signed) {
assert startByteIndex >= 0 : "startByteIndex (" + startByteIndex + ") should >= 0";
assert endByteIndex <= 7 : "endByteIndex (" + endByteIndex + ") should <= 7";
assert endByteIndex > startByteIndex;
assert numRecords * 4L <= array.size();
int inIndex = 0;
int outIndex = numRecords * 2;
if (numRecords > 0) {
long[][] counts = getKeyPrefixArrayCounts(array, numRecords, startByteIndex, endByteIndex);
for (int i = startByteIndex; i <= endByteIndex; i++) {
if (counts[i] != null) {
sortKeyPrefixArrayAtByte(
array, numRecords, counts[i], i, inIndex, outIndex,
desc, signed && i == endByteIndex);
int tmp = inIndex;
inIndex = outIndex;
outIndex = tmp;
}
}
}
return inIndex;
}
/**
* Specialization of getCounts() for key-prefix arrays. We could probably combine this with
* getCounts with some added parameters but that seems to hurt in benchmarks.
*/
private static long[][] getKeyPrefixArrayCounts(
LongArray array, int numRecords, int startByteIndex, int endByteIndex) {
long[][] counts = new long[8][];
long bitwiseMax = 0;
long bitwiseMin = -1L;
long limit = array.getBaseOffset() + numRecords * 16;
Object baseObject = array.getBaseObject();
for (long offset = array.getBaseOffset(); offset < limit; offset += 16) {
long value = Platform.getLong(baseObject, offset + 8);
bitwiseMax |= value;
bitwiseMin &= value;
}
long bitsChanged = bitwiseMin ^ bitwiseMax;
for (int i = startByteIndex; i <= endByteIndex; i++) {
if (((bitsChanged >>> (i * 8)) & 0xff) != 0) {
counts[i] = new long[256];
for (long offset = array.getBaseOffset(); offset < limit; offset += 16) {
counts[i][(int)((Platform.getLong(baseObject, offset + 8) >>> (i * 8)) & 0xff)]++;
}
}
}
return counts;
}
/**
* Specialization of getCounts() for key-prefix arrays. We could probably combine this with
* getCounts with some added parameters but that seems to hurt in benchmarks.
*/
private static long[][] getKeyPrefixArrayCounts(
LongArray array, int numRecords, int startByteIndex, int endByteIndex) {
long[][] counts = new long[8][];
long bitwiseMax = 0;
long bitwiseMin = -1L;
long limit = array.getBaseOffset() + numRecords * 16L;
Object baseObject = array.getBaseObject();
for (long offset = array.getBaseOffset(); offset < limit; offset += 16) {
long value = Platform.getLong(baseObject, offset + 8);
bitwiseMax |= value;
bitwiseMin &= value;
}
long bitsChanged = bitwiseMin ^ bitwiseMax;
for (int i = startByteIndex; i <= endByteIndex; i++) {
if (((bitsChanged >>> (i * 8)) & 0xff) != 0) {
counts[i] = new long[256];
for (long offset = array.getBaseOffset(); offset < limit; offset += 16) {
counts[i][(int) ((Platform.getLong(baseObject, offset + 8) >>> (i * 8)) & 0xff)]++;
}
}
}
return counts;
}
/**
* Specialization of sortAtByte() for key-prefix arrays.
*/
private static void sortKeyPrefixArrayAtByte(
LongArray array, int numRecords, long[] counts, int byteIdx, int inIndex, int outIndex,
boolean desc, boolean signed) {
assert counts.length == 256;
long[] offsets = transformCountsToOffsets(
counts, numRecords, array.getBaseOffset() + outIndex * 8, 16, desc, signed);
Object baseObject = array.getBaseObject();
long baseOffset = array.getBaseOffset() + inIndex * 8;
long maxOffset = baseOffset + numRecords * 16;
for (long offset = baseOffset; offset < maxOffset; offset += 16) {
long key = Platform.getLong(baseObject, offset);
long prefix = Platform.getLong(baseObject, offset + 8);
int bucket = (int)((prefix >>> (byteIdx * 8)) & 0xff);
long dest = offsets[bucket];
Platform.putLong(baseObject, dest, key);
Platform.putLong(baseObject, dest + 8, prefix);
offsets[bucket] += 16;
}
}
/**
* Specialization of sortAtByte() for key-prefix arrays.
*/
private static void sortKeyPrefixArrayAtByte(
LongArray array, int numRecords, long[] counts, int byteIdx, int inIndex, int outIndex,
boolean desc, boolean signed) {
assert counts.length == 256;
long[] offsets = transformCountsToOffsets(
counts, numRecords, array.getBaseOffset() + outIndex * 8L, 16, desc, signed);
Object baseObject = array.getBaseObject();
long baseOffset = array.getBaseOffset() + inIndex * 8L;
long maxOffset = baseOffset + numRecords * 16L;
for (long offset = baseOffset; offset < maxOffset; offset += 16) {
long key = Platform.getLong(baseObject, offset);
long prefix = Platform.getLong(baseObject, offset + 8);
int bucket = (int) ((prefix >>> (byteIdx * 8)) & 0xff);
long dest = offsets[bucket];
Platform.putLong(baseObject, dest, key);
Platform.putLong(baseObject, dest + 8, prefix);
offsets[bucket] += 16;
}
}
}

View File

@@ -75,10 +75,10 @@ public final class UnsafeSortDataFormat
public void copyRange(LongArray src, int srcPos, LongArray dst, int dstPos, int length) {
Platform.copyMemory(
src.getBaseObject(),
src.getBaseOffset() + srcPos * 16,
src.getBaseOffset() + srcPos * 16L,
dst.getBaseObject(),
dst.getBaseOffset() + dstPos * 16,
length * 16);
dst.getBaseOffset() + dstPos * 16L,
length * 16L);
}
@Override

View File

@@ -23,7 +23,7 @@ public class MetaHelper {
public static enum INDEX_TYPE{
PRI,UNI,MUL
}
public static String PRIMARY ="PRIMARY";
public static final String PRIMARY ="PRIMARY";
public static TableMeta initTableMeta(String table, SQLCreateTableStatement createStment, long timeStamp) {
TableMeta.Builder tmBuilder = TableMeta.newBuilder();
tmBuilder.setTableName(table);

View File

@@ -350,7 +350,7 @@ public class MySQLcom {
public static String getFullString(String charsetName, byte[] buff) throws UnsupportedEncodingException {
if (buff == null || charsetName == null)
return null;
if ((charsetName != null) && (Charset.isSupported(charsetName))) {
if (Charset.isSupported(charsetName)) {
return new String(buff, charsetName);
} else {
String msg = "unsupported character set :" + charsetName;

View File

@@ -352,50 +352,44 @@ public abstract class Item {
* @retval >0 In error.
*/
public TypeConversionStatus saveInField(Field field, boolean noConversions) {
TypeConversionStatus error = null;
try {
if (resultType() == ItemResult.STRING_RESULT) {
String result = valStr();
if (nullValue) {
field.setPtr(null);
error = TypeConversionStatus.TYPE_OK;
return error;
return TypeConversionStatus.TYPE_OK;
}
field.setPtr(result.getBytes(charset()));
} else if (resultType() == ItemResult.REAL_RESULT && field.resultType() == ItemResult.STRING_RESULT) {
BigDecimal nr = valReal();
if (nullValue) {
field.setPtr(null);
error = TypeConversionStatus.TYPE_OK;
return error;
return TypeConversionStatus.TYPE_OK;
}
field.setPtr(nr.toString().getBytes());
} else if (resultType() == ItemResult.REAL_RESULT) {
BigDecimal nr = valReal();
if (nullValue) {
field.setPtr(null);
error = TypeConversionStatus.TYPE_OK;
return error;
return TypeConversionStatus.TYPE_OK;
}
field.setPtr(nr.toString().getBytes());
} else if (resultType() == ItemResult.DECIMAL_RESULT) {
BigDecimal value = valDecimal();
if (nullValue) {
field.setPtr(null);
error = TypeConversionStatus.TYPE_OK;
return error;
return TypeConversionStatus.TYPE_OK;
}
field.setPtr(value.toString().getBytes());
} else {
BigInteger nr = valInt();
if (nullValue) {
field.setPtr(null);
error = TypeConversionStatus.TYPE_OK;
return error;
return TypeConversionStatus.TYPE_OK;
}
field.setPtr(nr.toString().getBytes());
}
return error != null ? error : TypeConversionStatus.TYPE_ERR_BAD_VALUE;
return TypeConversionStatus.TYPE_ERR_BAD_VALUE;
} catch (Exception e) {
return TypeConversionStatus.TYPE_ERR_BAD_VALUE;
}

View File

@@ -45,7 +45,7 @@ public class ItemCharTypecast extends ItemStrFunc {
return null;
}
nullValue = false;
if (cast_length != -1 && cast_length < res.length()) {
if (cast_length < res.length()) {
res = res.substring(0, cast_length);
}
if(charSetName != null){

View File

@@ -82,7 +82,7 @@ public class AggregatorDistinct extends Aggregator {
item_sum.clear();
if (distinctRows != null) {
distinctRows.done();
if (distinctRows != null && !endup_done) {
if (!endup_done) {
use_distinct_values = true;
RowDataPacket row = null;
while ((row = distinctRows.next()) != null) {

View File

@@ -54,7 +54,7 @@ public class TableNode extends PlanNode {
}
this.referedTableNodes.add(this);
this.tableMeta = MycatServer.getInstance().getTmManager().getSyncTableMeta(this.schema, this.tableName);
boolean isGlobaled = tableConfig != null && (tableConfig.getTableType() == TableTypeEnum.TYPE_GLOBAL_TABLE);
boolean isGlobaled = (tableConfig.getTableType() == TableTypeEnum.TYPE_GLOBAL_TABLE);
if (!isGlobaled) {
this.unGlobalTableCount = 1;
}

View File

@@ -407,7 +407,7 @@ public class ERJoinChooser {
}
for (PlanNode child : jn.getChildren()) {
if (!isUnit(child) & child.type().equals(PlanNodeType.JOIN)) {
if ((!isUnit(child)) && (child.type().equals(PlanNodeType.JOIN))) {
// a join b on a.id=b.id and a.id+b.id=10 join c on
// a.id=c.id将a.id+b.id提上来
JoinNode jnChild = (JoinNode) child;

View File

@@ -490,6 +490,7 @@ public class MySQLItemVisitor extends MySqlASTVisitorAdapter {
break;
case "STDDEV":
item = new ItemSumStd(args, 0, false, null);
break;
}
}
@Override

View File

@@ -31,7 +31,7 @@ public abstract class AbstractRouteStrategy implements RouteStrategy {
}
if (schema == null) {
rrs = routeNormalSqlWithAST(schema, origSQL, rrs, charset, cachePool, sc);
rrs = routeNormalSqlWithAST(null, origSQL, rrs, charset, cachePool, sc);
} else {
if(sqlType==ServerParse.SHOW){
rrs.setStatement(origSQL);

View File

@@ -107,7 +107,6 @@ public final class ManagerParseHeartbeat {
if( (c7 == 'N' || c7 == 'n') && (c8 == 'A' || c8 == 'a') && (c9 == 'M' || c9 == 'm')
&& (c10 == 'E' || c10 == 'e')){
offset = ManagerParseShow.trim(++offset,s);
char x = s.charAt(offset);
if(s.charAt(offset) == '='){
offset = ManagerParseShow.trim(++offset,s);
String name = s.substring(offset).trim();

View File

@@ -30,7 +30,7 @@ public class IncrSequenceMySQLHandler implements SequenceHandler {
private static final String SEQUENCE_DB_PROPS = "sequence_db_conf.properties";
protected static final String errSeqResult = "-999999999,null";
protected static Map<String, String> latestErrors = new ConcurrentHashMap<String, String>();
protected static final Map<String, String> latestErrors = new ConcurrentHashMap<String, String>();
private final FetchMySQLSequnceHandler mysqlSeqFetcher = new FetchMySQLSequnceHandler();
private static final IncrSequenceMySQLHandler instance = new IncrSequenceMySQLHandler();

View File

@@ -11,7 +11,7 @@ public class IncrSequenceTimeHandler implements SequenceHandler {
private static final String SEQUENCE_TIME_PROPS = "sequence_time_conf.properties";
private static final IncrSequenceTimeHandler instance = new IncrSequenceTimeHandler();
private static IdWorker workey = new IdWorker(1,1);
private IdWorker workey;
public static IncrSequenceTimeHandler getInstance() {
@@ -134,10 +134,4 @@ public class IncrSequenceTimeHandler implements SequenceHandler {
}
}
public static void main(String[] args) {
int i;
for (i = 0; i < 10; i++) {
System.out.println(workey.nextId());
}
}
}

View File

@@ -63,8 +63,7 @@ public class RouterUtil {
return stmt;
}
int strtPos = 0;
int index = 0;
boolean flag = false;
boolean flag;
int firstE = forCmpStmt.indexOf("'");
int endE = forCmpStmt.lastIndexOf("'");
StringBuilder result = new StringBuilder();
@@ -80,19 +79,17 @@ public class RouterUtil {
flag = false;
}
if (flag) {
index = indx2;
result.append(stmt.substring(strtPos, index));
strtPos = index + maySchema2.length();
if (index > firstE && index < endE && countChar(stmt, index) % 2 == 1) {
result.append(stmt.substring(index, strtPos));
result.append(stmt.substring(strtPos, indx2));
strtPos = indx2 + maySchema2.length();
if (indx2 > firstE && indx2 < endE && countChar(stmt, indx2) % 2 != 0) {
result.append(stmt.substring(indx2, strtPos));
}
indx2 = forCmpStmt.indexOf(maySchema2, strtPos);
} else {
index = indx1;
result.append(stmt.substring(strtPos, index));
strtPos = index + maySchema1.length();
if (index > firstE && index < endE && countChar(stmt, index) % 2 == 1) {
result.append(stmt.substring(index, strtPos));
result.append(stmt.substring(strtPos, indx1));
strtPos = indx1 + maySchema1.length();
if (indx1 > firstE && indx1 < endE && countChar(stmt, indx1) % 2 !=0) {
result.append(stmt.substring(indx1, strtPos));
}
indx1 = forCmpStmt.indexOf(maySchema1, strtPos);
}

View File

@@ -900,17 +900,15 @@ public final class ServerLoadDataInfileHandler implements LoadDataInfileHandler
return;
}
File[] fileList = fileDirToDel.listFiles();
for (int i = 0; i < fileList.length; i++)
{
File file = fileList[i];
if (file.isFile()&&file.exists())
{
boolean delete = file.delete();
} else if (file.isDirectory())
{
deleteFile(file.getAbsolutePath());
file.delete();
if (fileList != null) {
for (int i = 0; i < fileList.length; i++) {
File file = fileList[i];
if (file.isFile() && file.exists()) {
boolean delete = file.delete();
} else if (file.isDirectory()) {
deleteFile(file.getAbsolutePath());
file.delete();
}
}
}
fileDirToDel.delete();

View File

@@ -26,7 +26,7 @@ package io.mycat.server.parser;
import io.mycat.route.parser.util.ParseUtil;
import io.mycat.server.response.ShowColumns;
import io.mycat.server.response.ShowIndex;
import io.mycat.server.util.ShowCreateStmtInfo;
import io.mycat.server.response.ShowCreateStmtInfo;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

View File

@@ -47,7 +47,7 @@ public class SelectDatabase {
byte packetId = 0;
header.packetId = ++packetId;
fields[i] = PacketUtil.getField("DATABASE()", Fields.FIELD_TYPE_VAR_STRING);
fields[i++].packetId = ++packetId;
fields[i].packetId = ++packetId;
eof.packetId = ++packetId;
}

View File

@@ -49,7 +49,7 @@ public class SelectTxReadOnly {
byte packetId = 0;
header.packetId = ++packetId;
fields[i] = PacketUtil.getField("@@session.tx_read_only", Fields.FIELD_TYPE_LONG);
fields[i++].packetId = ++packetId;
fields[i].packetId = ++packetId;
eof.packetId = ++packetId;
}

View File

@@ -51,7 +51,7 @@ public class SelectUser {
byte packetId = 0;
header.packetId = ++packetId;
fields[i] = PacketUtil.getField("USER()", Fields.FIELD_TYPE_VAR_STRING);
fields[i++].packetId = ++packetId;
fields[i].packetId = ++packetId;
eof.packetId = ++packetId;
}

View File

@@ -48,7 +48,7 @@ public class SelectVersion {
byte packetId = 0;
header.packetId = ++packetId;
fields[i] = PacketUtil.getField("VERSION()", Fields.FIELD_TYPE_VAR_STRING);
fields[i++].packetId = ++packetId;
fields[i].packetId = ++packetId;
eof.packetId = ++packetId;
}

View File

@@ -48,7 +48,7 @@ public class SelectVersionComment {
byte packetId = 0;
header.packetId = ++packetId;
fields[i] = PacketUtil.getField("@@VERSION_COMMENT", Fields.FIELD_TYPE_VAR_STRING);
fields[i++].packetId = ++packetId;
fields[i].packetId = ++packetId;
eof.packetId = ++packetId;
}

View File

@@ -48,7 +48,7 @@ public class SessionIncrement {
byte packetId = 0;
header.packetId = ++packetId;
fields[i] = PacketUtil.getField("@@session.auto_increment_increment", Fields.FIELD_TYPE_LONG);
fields[i++].packetId = ++packetId;
fields[i].packetId = ++packetId;
eof.packetId = ++packetId;
}

View File

@@ -49,7 +49,7 @@ public class SessionIsolation {
byte packetId = 0;
header.packetId = ++packetId;
fields[i] = PacketUtil.getField("@@session.tx_isolation", Fields.FIELD_TYPE_STRING);
fields[i++].packetId = ++packetId;
fields[i].packetId = ++packetId;
eof.packetId = ++packetId;
}

View File

@@ -1,4 +1,4 @@
package io.mycat.server.util;
package io.mycat.server.response;
import com.alibaba.druid.sql.ast.SQLExpr;
import com.alibaba.druid.sql.ast.SQLStatement;
@@ -19,7 +19,7 @@ public class ShowCreateStmtInfo {
"(\\s+(from|in)\\s+([a-zA-Z_0-9]+))?" +
"((\\s+(like)\\s+'((. *)*)'\\s*)|(\\s+(where)\\s+((. *)*)\\s*))?" +
"\\s*$";
public static Pattern pattern = Pattern.compile(TABLE_PAT, Pattern.CASE_INSENSITIVE);
public static final Pattern pattern = Pattern.compile(TABLE_PAT, Pattern.CASE_INSENSITIVE);
private final boolean isFull;
private final String schema;
private final String cond;

View File

@@ -54,7 +54,7 @@ public class ShowDatabases {
byte packetId = 0;
header.packetId = ++packetId;
fields[i] = PacketUtil.getField("DATABASE", Fields.FIELD_TYPE_VAR_STRING);
fields[i++].packetId = ++packetId;
fields[i].packetId = ++packetId;
eof.packetId = ++packetId;
}

View File

@@ -23,7 +23,7 @@ public class ShowIndex {
"(\\s+(from|in)\\s+([a-zA-Z_0-9]+))?" +
"(\\s+(where)\\s+((. *)*)\\s*)?" +
"\\s*$";
public static Pattern pattern = Pattern.compile(INDEX_PAT, Pattern.CASE_INSENSITIVE);
public static final Pattern pattern = Pattern.compile(INDEX_PAT, Pattern.CASE_INSENSITIVE);
public static void response(ServerConnection c, String stmt){
try {
String table;

View File

@@ -24,7 +24,6 @@ import io.mycat.route.RouteResultset;
import io.mycat.route.util.RouterUtil;
import io.mycat.server.ServerConnection;
import io.mycat.server.parser.ServerParse;
import io.mycat.server.util.ShowCreateStmtInfo;
import io.mycat.util.StringUtil;
import java.nio.ByteBuffer;

View File

@@ -114,7 +114,7 @@ public class KVPathUtil {
return getSequencesPath() + SEPARATOR + "leader" ;
}
//depth:3,sequences path:base_path/sequences/common
public static String SEQUENCE_COMMON = "common";
public static final String SEQUENCE_COMMON = "common";
public static String getSequencesCommonPath() {
return getSequencesPath() + SEPARATOR + SEQUENCE_COMMON + SEPARATOR;
}

View File

@@ -657,45 +657,6 @@ public final class MysqlDefs {
mysqlToJavaType(FIELD_TYPE_GEOMETRY)));
}
static final void appendJdbcTypeMappingQuery(StringBuffer buf,
String mysqlTypeColumnName) {
buf.append("CASE ");
Map<String, Integer> typesMap = new HashMap<String, Integer>();
typesMap.putAll(mysqlToJdbcTypesMap);
typesMap.put("BINARY", Integer.valueOf(Types.BINARY));
typesMap.put("VARBINARY", Integer.valueOf(Types.VARBINARY));
Iterator<String> mysqlTypes = typesMap.keySet().iterator();
for(Map.Entry<String, Integer> entry:typesMap.entrySet()) {
String mysqlTypeName = entry.getKey();
buf.append(" WHEN ");
buf.append(mysqlTypeColumnName);
buf.append("='");
buf.append(mysqlTypeName);
buf.append("' THEN ");
buf.append(entry.getValue());
if (mysqlTypeName.equalsIgnoreCase("DOUBLE")
|| mysqlTypeName.equalsIgnoreCase("FLOAT")
|| mysqlTypeName.equalsIgnoreCase("DECIMAL")
|| mysqlTypeName.equalsIgnoreCase("NUMERIC")) {
buf.append(" WHEN ");
buf.append(mysqlTypeColumnName);
buf.append("='");
buf.append(mysqlTypeName);
buf.append(" unsigned' THEN ");
buf.append(entry.getValue());
}
}
buf.append(" ELSE ");
buf.append(Types.OTHER);
buf.append(" END ");
}
public static final String SQL_STATE_BASE_TABLE_NOT_FOUND = "S0002"; //$NON-NLS-1$
public static final String SQL_STATE_BASE_TABLE_OR_VIEW_ALREADY_EXISTS = "S0001"; //$NON-NLS-1$

View File

@@ -1,5 +1,5 @@
BuildTime 2017-08-10 16:37:30
GitVersion 4d67c810942844ea92fd1db5d37838b242921754
BuildTime 2017-08-13 05:01:29
GitVersion 167ff28fb4c8332098acb199118939a916d10641
MavenVersion 2.17.08.0-dev
GitUrl https://github.com/MyCATApache/Mycat-Server.git
MyCatSite http://www.mycat.org.cn