mirror of
https://github.com/actiontech/dble.git
synced 2026-01-05 20:30:40 -06:00
findbugs:change for BAD_PRACTICE
This commit is contained in:
27
findbugs-exclude.xml
Normal file
27
findbugs-exclude.xml
Normal file
@@ -0,0 +1,27 @@
|
||||
<FindBugsFilter>
|
||||
|
||||
<Match>
|
||||
<Class name="io.mycat.meta.protocol.StructureMeta" />
|
||||
</Match>
|
||||
<Match>
|
||||
<Class name="io.mycat.meta.protocol.StructureMeta$ColumnMeta" />
|
||||
</Match>
|
||||
<Match>
|
||||
<Class name="io.mycat.meta.protocol.StructureMeta$IndexMeta" />
|
||||
</Match>
|
||||
<Match>
|
||||
<Class name="io.mycat.meta.protocol.StructureMeta$TableMeta" />
|
||||
</Match>
|
||||
<Match>
|
||||
<Bug category="MALICIOUS_CODE,I18N,MT_CORRECTNESS,STYLE,PERFORMANCE,CORRECTNESS" />
|
||||
</Match>
|
||||
<Match>
|
||||
<Bug category="BAD_PRACTICE" />
|
||||
<Bug pattern="RR_NOT_CHECKED,RV_RETURN_VALUE_IGNORED_BAD_PRACTICE" />
|
||||
</Match>
|
||||
<!--
|
||||
MALICIOUS_CODE set ARRAT
|
||||
I18N STRING DECODE
|
||||
-->
|
||||
|
||||
</FindBugsFilter>
|
||||
14
pom.xml
14
pom.xml
@@ -673,6 +673,18 @@
|
||||
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<reporting>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>findbugs-maven-plugin</artifactId>
|
||||
<configuration>
|
||||
<excludeFilterFile>findbugs-exclude.xml</excludeFilterFile>
|
||||
<findbugsXmlOutput>true</findbugsXmlOutput>
|
||||
<xmlOutput>true</xmlOutput>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</reporting>
|
||||
|
||||
</project>
|
||||
|
||||
@@ -339,7 +339,7 @@ public class MycatServer {
|
||||
totalNetWorkBufferSize = bufferPoolPageSize * bufferPoolPageNumber;
|
||||
if(totalNetWorkBufferSize> Platform.getMaxDirectMemory()){
|
||||
LOGGER .error("Direct BufferPool size lager than MaxDirectMemory");
|
||||
java.lang.System.exit(-1);
|
||||
throw new IOException("Direct BufferPool size lager than MaxDirectMemory");
|
||||
}
|
||||
bufferPool = new DirectByteBufferPool(bufferPoolPageSize,bufferPoolChunkSize, bufferPoolPageNumber);
|
||||
|
||||
@@ -644,7 +644,9 @@ public class MycatServer {
|
||||
|
||||
File parent = file.getParentFile();
|
||||
if (parent != null && !parent.exists()) {
|
||||
parent.mkdirs();
|
||||
if(!parent.mkdirs()) {
|
||||
throw new IOException("mkdir " + parent.getAbsolutePath() + " error");
|
||||
}
|
||||
}
|
||||
|
||||
fileOut = new FileOutputStream(file);
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
package io.mycat.backend.mysql.nio.handler.query;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import io.mycat.backend.BackendConnection;
|
||||
import io.mycat.net.mysql.FieldPacket;
|
||||
import io.mycat.server.NonBlockingSession;
|
||||
import io.mycat.util.ConcurrentHashSet;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
public abstract class BaseDMLHandler implements DMLResponseHandler {
|
||||
private static Logger logger = Logger.getLogger(BaseDMLHandler.class);
|
||||
@@ -34,7 +34,7 @@ public abstract class BaseDMLHandler implements DMLResponseHandler {
|
||||
public BaseDMLHandler(long id, NonBlockingSession session) {
|
||||
this.id = id;
|
||||
this.session = session;
|
||||
this.merges = new ConcurrentHashSet<DMLResponseHandler>();
|
||||
this.merges = Collections.newSetFromMap(new ConcurrentHashMap<DMLResponseHandler, Boolean>());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -232,7 +232,7 @@ public class MultiNodeMergeHandler extends OwnThreadDMLHandler {
|
||||
if (terminate.get())
|
||||
return;
|
||||
HeapItem top = heap.peak();
|
||||
if (top.IsNullItem()) {
|
||||
if (top.isNullItem()) {
|
||||
heap.poll();
|
||||
} else {
|
||||
BlockingQueue<HeapItem> topitemQueue = queues.get(top.getIndex());
|
||||
|
||||
@@ -7,16 +7,16 @@ public class HeapItem {
|
||||
private byte[] row;
|
||||
private RowDataPacket rowPacket;
|
||||
private MySQLConnection hashIndex;
|
||||
private boolean isNullItem = false;
|
||||
private boolean isNull = false;
|
||||
|
||||
public static HeapItem NULLITEM() {
|
||||
HeapItem NULLITEM = new HeapItem(null, null, null);
|
||||
NULLITEM.isNullItem = true;
|
||||
NULLITEM.isNull = true;
|
||||
return NULLITEM;
|
||||
}
|
||||
|
||||
public boolean IsNullItem() {
|
||||
if (row == null && isNullItem == true)
|
||||
public boolean isNullItem() {
|
||||
if (row == null && isNull == true)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ public class TwoTableComparator implements Comparator<RowDataPacket> {
|
||||
if (isAsc) {
|
||||
rs = cmptor.compare();
|
||||
} else {
|
||||
rs = -cmptor.compare();
|
||||
rs = -1* cmptor.compare();
|
||||
}
|
||||
if (rs != 0 || ascs.size() == (i + 1)) {
|
||||
return rs;
|
||||
|
||||
@@ -60,7 +60,7 @@ public class SortedResultDiskBuffer extends ResultDiskBuffer {
|
||||
}
|
||||
|
||||
@Override
|
||||
public final int TapeCount() {
|
||||
public final int tapeCount() {
|
||||
return tapes.size();
|
||||
}
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ public class UnSortedResultDiskBuffer extends ResultDiskBuffer {
|
||||
}
|
||||
|
||||
@Override
|
||||
public int TapeCount() {
|
||||
public int tapeCount() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
@@ -76,5 +76,5 @@ public interface ResultExternal {
|
||||
*
|
||||
* @return tape's count
|
||||
*/
|
||||
int TapeCount();
|
||||
int tapeCount();
|
||||
}
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
package io.mycat.backend.mysql.xa;
|
||||
|
||||
import io.mycat.backend.mysql.xa.recovery.LogException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.nio.channels.FileLock;
|
||||
import java.nio.channels.OverlappingFileLockException;
|
||||
|
||||
/**
|
||||
* Created by zhangchao on 2016/10/17.
|
||||
*/
|
||||
public class LogFileLock {
|
||||
public static final Logger logger = LoggerFactory
|
||||
.getLogger(LogFileLock.class);
|
||||
private static final String FILE_SEPARATOR = String.valueOf(File.separatorChar);
|
||||
private File lockfileToPreventDoubleStartup_;
|
||||
private FileOutputStream lockfilestream_ = null;
|
||||
private FileLock lock_ = null;
|
||||
|
||||
private String dir;
|
||||
|
||||
private String fileName;
|
||||
|
||||
public LogFileLock(String dir, String fileName) {
|
||||
if(!dir.endsWith(FILE_SEPARATOR)) {
|
||||
dir += FILE_SEPARATOR;
|
||||
}
|
||||
this.dir = dir;
|
||||
this.fileName = fileName;
|
||||
}
|
||||
|
||||
public void acquireLock() throws LogException {
|
||||
try {
|
||||
File parent = new File(dir);
|
||||
if(!parent.exists()) {
|
||||
parent.mkdir();
|
||||
}
|
||||
lockfileToPreventDoubleStartup_ = new File(dir, fileName + ".lck");
|
||||
lockfilestream_ = new FileOutputStream(lockfileToPreventDoubleStartup_);
|
||||
lock_ = lockfilestream_.getChannel().tryLock();
|
||||
lockfileToPreventDoubleStartup_.deleteOnExit();
|
||||
} catch (OverlappingFileLockException failedToGetLock) {
|
||||
// happens on windows
|
||||
lock_ = null;
|
||||
} catch (IOException failedToGetLock) {
|
||||
// happens on windows
|
||||
lock_ = null;
|
||||
}
|
||||
if (lock_ == null) {
|
||||
logger.error("ERROR: the specified log seems to be in use already: " + fileName + " in " + dir + ". Make sure that no other instance is running, or kill any pending process if needed.");
|
||||
throw new LogException("Log already in use? " + fileName + " in "+ dir);
|
||||
}
|
||||
}
|
||||
|
||||
public void releaseLock() {
|
||||
try {
|
||||
if (lock_ != null) {
|
||||
lock_.release();
|
||||
}
|
||||
if (lockfilestream_ != null)
|
||||
lockfilestream_.close();
|
||||
} catch (IOException e) {
|
||||
logger.warn("Error releasing file lock: " + e.getMessage());
|
||||
} finally {
|
||||
lock_ = null;
|
||||
}
|
||||
|
||||
if (lockfileToPreventDoubleStartup_ != null) {
|
||||
lockfileToPreventDoubleStartup_.delete();
|
||||
lockfileToPreventDoubleStartup_ = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,17 +2,21 @@ package io.mycat.cache.impl;
|
||||
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.iq80.leveldb.impl.Iq80DBFactory.factory;
|
||||
|
||||
import io.mycat.cache.CacheService;
|
||||
import org.iq80.leveldb.DB;
|
||||
import org.iq80.leveldb.Options;
|
||||
|
||||
import io.mycat.cache.CachePool;
|
||||
import io.mycat.cache.CachePoolFactory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class LevelDBCachePooFactory extends CachePoolFactory {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(LevelDBCachePooFactory.class);
|
||||
@Override
|
||||
public CachePool createCachePool(String poolName, int cacheSize,
|
||||
int expireSeconds) {
|
||||
@@ -20,11 +24,13 @@ public class LevelDBCachePooFactory extends CachePoolFactory {
|
||||
options.cacheSize(cacheSize * 1048576);//cacheSize M 大小
|
||||
options.createIfMissing(true);
|
||||
DB db =null;
|
||||
String filePath = "leveldb\\"+poolName;
|
||||
try {
|
||||
db=factory.open(new File("leveldb\\"+poolName), options);
|
||||
db=factory.open(new File(filePath), options);
|
||||
// Use the db in here....
|
||||
} catch (Exception e) {
|
||||
// Make sure you close the db to shutdown the
|
||||
} catch (IOException e) {
|
||||
logger.info("factory try to open file "+filePath+" failed ");
|
||||
// Make sure you close the db to shutdown the
|
||||
// database and avoid resource leaks.
|
||||
// db.close();
|
||||
}
|
||||
|
||||
@@ -104,11 +104,11 @@ public class MycatPrivileges implements FrontendPrivileges {
|
||||
public Boolean isReadOnly(String user) {
|
||||
MycatConfig conf = MycatServer.getInstance().getConfig();
|
||||
UserConfig uc = conf.getUsers().get(user);
|
||||
Boolean result = null;
|
||||
if (uc != null) {
|
||||
return uc.isReadOnly();
|
||||
} else {
|
||||
return null;
|
||||
result= uc.isReadOnly();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -32,8 +32,8 @@ public abstract class Versions {
|
||||
public static final byte PROTOCOL_VERSION = 10;
|
||||
|
||||
/**服务器版本**/
|
||||
public static byte[] SERVER_VERSION = "5.6.29-mycat-2.xx.xx.x-20170703165824".getBytes();
|
||||
public static byte[] VERSION_COMMENT = "Mycat Server".getBytes();
|
||||
public static byte[] SERVER_VERSION = "5.6.29-mycat-2.17.08.0-dev-20170810110228".getBytes();
|
||||
public static byte[] VERSION_COMMENT = "MyCat Server (OpenCloundDB)".getBytes();
|
||||
public static String ANNOTATION_NAME = "mycat:";
|
||||
public static final String ROOT_PREFIX = "mycat";
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ public enum ZkParamCfg {
|
||||
|
||||
;
|
||||
|
||||
private ZkParamCfg(String key) {
|
||||
ZkParamCfg(String key) {
|
||||
this.key = key;
|
||||
}
|
||||
|
||||
@@ -49,8 +49,5 @@ public enum ZkParamCfg {
|
||||
return key;
|
||||
}
|
||||
|
||||
public void setKey(String key) {
|
||||
this.key = key;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ public enum ParseParamEnum {
|
||||
*/
|
||||
private String key;
|
||||
|
||||
private ParseParamEnum(String key) {
|
||||
ParseParamEnum(String key) {
|
||||
this.key = key;
|
||||
}
|
||||
|
||||
@@ -35,8 +35,4 @@ public enum ParseParamEnum {
|
||||
return key;
|
||||
}
|
||||
|
||||
public void setKey(String key) {
|
||||
this.key = key;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -238,7 +238,7 @@ public final class ReloadConfig {
|
||||
config.reload(newUsers, newSchemas, newDataNodes, newDataHosts, newErRelations, newFirewall, true);
|
||||
|
||||
/* 2.4、 处理旧的资源 */
|
||||
LOGGER.info("1. clear old backend connection(size): " + NIOProcessor.backends_old.size());
|
||||
LOGGER.info("1.clear old backend connection(size): " + NIOProcessor.backends_old.size());
|
||||
|
||||
// 清除前一次 reload 转移出去的 old Cons
|
||||
Iterator<BackendConnection> iter = NIOProcessor.backends_old.iterator();
|
||||
@@ -265,7 +265,7 @@ public final class ReloadConfig {
|
||||
}
|
||||
}
|
||||
}
|
||||
LOGGER.info("2、to be recycled old backend connection(size): " + NIOProcessor.backends_old.size());
|
||||
LOGGER.info("2.to be recycled old backend connection(size): " + NIOProcessor.backends_old.size());
|
||||
|
||||
|
||||
//清理缓存
|
||||
|
||||
@@ -129,7 +129,18 @@ public final class ShowDataNode {
|
||||
keys.addAll(sc.getAllDataNodes());
|
||||
}
|
||||
}
|
||||
Collections.sort(keys, new Comparators<String>());
|
||||
Collections.sort(keys, new Comparator<String>(){
|
||||
@Override
|
||||
public int compare(String o1, String o2) {
|
||||
Pair<String, Integer> p1 = PairUtil.splitIndex(o1, '[', ']');
|
||||
Pair<String, Integer> p2 = PairUtil.splitIndex(o2, '[', ']');
|
||||
if (p1.getKey().compareTo(p2.getKey()) == 0) {
|
||||
return p1.getValue() - p2.getValue();
|
||||
} else {
|
||||
return p1.getKey().compareTo(p2.getKey());
|
||||
}
|
||||
}
|
||||
});
|
||||
for (String key : keys) {
|
||||
RowDataPacket row = getRow(dataNodes.get(key), c.getCharset());
|
||||
row.packetId = ++packetId;
|
||||
@@ -173,17 +184,5 @@ public final class ShowDataNode {
|
||||
return row;
|
||||
}
|
||||
|
||||
private static final class Comparators<T> implements Comparator<String> {
|
||||
@Override
|
||||
public int compare(String s1, String s2) {
|
||||
Pair<String, Integer> p1 = PairUtil.splitIndex(s1, '[', ']');
|
||||
Pair<String, Integer> p2 = PairUtil.splitIndex(s2, '[', ']');
|
||||
if (p1.getKey().compareTo(p2.getKey()) == 0) {
|
||||
return p1.getValue() - p2.getValue();
|
||||
} else {
|
||||
return p1.getKey().compareTo(p2.getKey());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -67,9 +67,6 @@ public class DataNodeFileManager {
|
||||
|
||||
subDirsPerLocalDir = conf.getInt("server.diskStore.subDirectories", 64);
|
||||
localDirs = createLocalDirs(conf);
|
||||
if (localDirs.isEmpty()) {
|
||||
System.exit(-1);
|
||||
}
|
||||
subDirs = new ConcurrentHashMap<Integer,ArrayList<File>>(localDirs.size());
|
||||
|
||||
|
||||
@@ -151,6 +148,9 @@ public class DataNodeFileManager {
|
||||
}
|
||||
}
|
||||
|
||||
if (dirs.isEmpty()) {
|
||||
throw new RuntimeException("can't createLocalDirs in " + rootDirs);
|
||||
}
|
||||
return dirs;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package io.mycat.memory.unsafe.utils.sort;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
/**
|
||||
* Created by zagnix 2016/6/6.
|
||||
@@ -13,7 +14,7 @@ public class AbstractScalaRowIterator<T> implements Iterator<T> {
|
||||
|
||||
@Override
|
||||
public T next() {
|
||||
return null;
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -328,7 +328,10 @@ public abstract class FrontendConnection extends AbstractConnection {
|
||||
|
||||
// 执行查询
|
||||
if (queryHandler != null) {
|
||||
queryHandler.setReadOnly(privileges.isReadOnly(user));
|
||||
Boolean result = privileges.isReadOnly(user);
|
||||
if (result != null) {
|
||||
queryHandler.setReadOnly(result);
|
||||
}
|
||||
queryHandler.query(sql);
|
||||
} else {
|
||||
writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, "Query unsupported!");
|
||||
|
||||
@@ -38,27 +38,6 @@ public class FieldDate extends FieldTemporaWithDate {
|
||||
return isNull() ? BigInteger.ZERO : BigInteger.valueOf(MyTime.TIME_to_ulonglong_date(ltime));
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compare(byte[] v1, byte[] v2) {
|
||||
if (v1 == null && v2 == null)
|
||||
return 0;
|
||||
else if (v1 == null) {
|
||||
return -1;
|
||||
} else if (v2 == null) {
|
||||
return 1;
|
||||
} else
|
||||
try {
|
||||
String sval1 = MySQLcom.getFullString(charsetName, v1);
|
||||
String sval2 = MySQLcom.getFullString(charsetName, v2);
|
||||
MySQLTime ltime1 = new MySQLTime();
|
||||
MySQLTime ltime2 = new MySQLTime();
|
||||
MyTime.str_to_datetime_with_warn(sval1, ltime1, MyTime.TIME_FUZZY_DATE);
|
||||
MyTime.str_to_datetime_with_warn(sval2, ltime2, MyTime.TIME_FUZZY_DATE);
|
||||
return ltime1.compareTo(ltime2);
|
||||
} catch (Exception e) {
|
||||
logger.info("String to biginteger exception!", e);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
@@ -38,27 +38,4 @@ public class FieldDatetime extends FieldTemporalWithDateAndTime {
|
||||
return isNull() ? BigInteger.ZERO : BigInteger.valueOf(MyTime.TIME_to_ulonglong_datetime(ltime));
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compare(byte[] v1, byte[] v2) {
|
||||
if (v1 == null && v2 == null)
|
||||
return 0;
|
||||
else if (v1 == null) {
|
||||
return -1;
|
||||
} else if (v2 == null) {
|
||||
return 1;
|
||||
} else
|
||||
try {
|
||||
String sval1 = MySQLcom.getFullString(charsetName, v1);
|
||||
String sval2 = MySQLcom.getFullString(charsetName, v2);
|
||||
MySQLTime ltime1 = new MySQLTime();
|
||||
MySQLTime ltime2 = new MySQLTime();
|
||||
MyTime.str_to_datetime_with_warn(sval1, ltime1, MyTime.TIME_FUZZY_DATE);
|
||||
MyTime.str_to_datetime_with_warn(sval2, ltime2, MyTime.TIME_FUZZY_DATE);
|
||||
return ltime1.compareTo(ltime2);
|
||||
} catch (Exception e) {
|
||||
logger.info("String to biginteger exception!", e);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
package io.mycat.plan.common.field.temporal;
|
||||
|
||||
import io.mycat.plan.common.MySQLcom;
|
||||
import io.mycat.plan.common.time.MySQLTime;
|
||||
import io.mycat.plan.common.time.MyTime;
|
||||
|
||||
@@ -34,5 +35,26 @@ public abstract class FieldTemporaWithDate extends FieldTemporal {
|
||||
internalJob();
|
||||
return isNull() ? true : getDate(time, MyTime.TIME_FUZZY_DATE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compare(byte[] v1, byte[] v2) {
|
||||
if (v1 == null && v2 == null)
|
||||
return 0;
|
||||
else if (v1 == null) {
|
||||
return -1;
|
||||
} else if (v2 == null) {
|
||||
return 1;
|
||||
} else
|
||||
try {
|
||||
String sval1 = MySQLcom.getFullString(charsetName, v1);
|
||||
String sval2 = MySQLcom.getFullString(charsetName, v2);
|
||||
MySQLTime ltime1 = new MySQLTime();
|
||||
MySQLTime ltime2 = new MySQLTime();
|
||||
MyTime.str_to_datetime_with_warn(sval1, ltime1, MyTime.TIME_FUZZY_DATE);
|
||||
MyTime.str_to_datetime_with_warn(sval2, ltime2, MyTime.TIME_FUZZY_DATE);
|
||||
return ltime1.getCompareResult(ltime2);
|
||||
} catch (Exception e) {
|
||||
logger.info("String to biginteger exception!", e);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ public abstract class FieldTemporal extends Field {
|
||||
this.internalJob();
|
||||
other2.internalJob();
|
||||
MySQLTime ltime2 = other2.ltime;
|
||||
return ltime.compareTo(ltime2);
|
||||
return ltime.getCompareResult(ltime2);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ public class FieldTime extends FieldTemporal {
|
||||
MySQLTime ltime2 = new MySQLTime();
|
||||
MyTime.str_to_time_with_warn(sval1, ltime1);
|
||||
MyTime.str_to_time_with_warn(sval2, ltime2);
|
||||
return ltime1.compareTo(ltime2);
|
||||
return ltime1.getCompareResult(ltime2);
|
||||
} catch (Exception e) {
|
||||
logger.info("String to biginteger exception!", e);
|
||||
return -1;
|
||||
|
||||
@@ -37,27 +37,5 @@ public class FieldTimestamp extends FieldTemporalWithDateAndTime {
|
||||
MyTime.str_to_datetime_with_warn(ptr_str, ltime, MyTime.TIME_FUZZY_DATE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compare(byte[] v1, byte[] v2) {
|
||||
if (v1 == null && v2 == null)
|
||||
return 0;
|
||||
else if (v1 == null) {
|
||||
return -1;
|
||||
} else if (v2 == null) {
|
||||
return 1;
|
||||
} else
|
||||
try {
|
||||
String sval1 = MySQLcom.getFullString(charsetName, v1);
|
||||
String sval2 = MySQLcom.getFullString(charsetName, v2);
|
||||
MySQLTime ltime1 = new MySQLTime();
|
||||
MySQLTime ltime2 = new MySQLTime();
|
||||
MyTime.str_to_datetime_with_warn(sval1, ltime1, MyTime.TIME_FUZZY_DATE);
|
||||
MyTime.str_to_datetime_with_warn(sval2, ltime2, MyTime.TIME_FUZZY_DATE);
|
||||
return ltime1.compareTo(ltime2);
|
||||
} catch (Exception e) {
|
||||
logger.info("String to biginteger exception!", e);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -20,12 +20,6 @@ public class ItemInt extends ItemNum {
|
||||
maxLength = String.valueOf(value).length();
|
||||
}
|
||||
|
||||
public ItemInt(Integer value) {
|
||||
this.value = BigInteger.valueOf(value);
|
||||
fixed = true;
|
||||
maxLength = String.valueOf(value).length();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ItemType type() {
|
||||
return ItemType.INT_ITEM;
|
||||
@@ -96,7 +90,7 @@ public class ItemInt extends ItemNum {
|
||||
return true;
|
||||
if (obj == null)
|
||||
return false;
|
||||
if (!(obj instanceof ItemFloat))
|
||||
if (!(obj instanceof ItemInt))
|
||||
return false;
|
||||
ItemInt other = (ItemInt) obj;
|
||||
if (value == null || other.value == null)
|
||||
|
||||
@@ -99,7 +99,7 @@ public class ItemString extends ItemBasicConstant {
|
||||
return true;
|
||||
if (obj == null)
|
||||
return false;
|
||||
if (!(obj instanceof ItemFloat))
|
||||
if (!(obj instanceof ItemString))
|
||||
return false;
|
||||
ItemString other = (ItemString) obj;
|
||||
if (value == null || other.value == null)
|
||||
|
||||
@@ -207,7 +207,7 @@ public abstract class ItemFuncMinMax extends ItemFunc {
|
||||
String res2 = args.get(i).valStr();
|
||||
if (res2 != null) {
|
||||
int cmp = res.compareTo(res2);
|
||||
if ((cmp_sign < 0 ? cmp : -cmp) < 0)
|
||||
if ((cmp_sign < 0 ? cmp : -1 * cmp) < 0)
|
||||
res = res2;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -433,7 +433,7 @@ public class ArgComparator {
|
||||
res1 = ac.a.valStr();
|
||||
res2 = ac.b.valStr();
|
||||
if (res1 == null || res2 == null)
|
||||
return (res1 == res2) ? 1 : 0;
|
||||
return (res1 == null && res2 == null) ? 1 : 0;
|
||||
return (res1.compareTo(res2) == 0) ? 1 : 0;
|
||||
}
|
||||
}
|
||||
@@ -446,7 +446,7 @@ public class ArgComparator {
|
||||
res1 = ac.a.valStr();
|
||||
res2 = ac.b.valStr();
|
||||
if (res1 == null || res2 == null)
|
||||
return (res1 == res2) ? 1 : 0;
|
||||
return (res1 == null && res2 == null) ? 1 : 0;
|
||||
return MySQLcom.memcmp(res1.getBytes(), res2.getBytes(), Math.min(res1.length(), res2.length())) == 0 ? 1
|
||||
: 0;
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ public abstract class Aggregator {
|
||||
SIMPLE_AGGREGATOR, DISTINCT_AGGREGATOR
|
||||
};
|
||||
|
||||
public abstract AggregatorType Aggrtype();
|
||||
public abstract AggregatorType aggrType();
|
||||
|
||||
/**
|
||||
* Called before adding the first row. Allocates and sets up the internal
|
||||
|
||||
@@ -21,7 +21,7 @@ public class AggregatorDistinct extends Aggregator {
|
||||
}
|
||||
|
||||
@Override
|
||||
public AggregatorType Aggrtype() {
|
||||
public AggregatorType aggrType() {
|
||||
return AggregatorType.DISTINCT_AGGREGATOR;
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ public class AggregatorSimple extends Aggregator {
|
||||
}
|
||||
|
||||
@Override
|
||||
public AggregatorType Aggrtype() {
|
||||
public AggregatorType aggrType() {
|
||||
return AggregatorType.SIMPLE_AGGREGATOR;
|
||||
}
|
||||
|
||||
|
||||
@@ -220,7 +220,7 @@ public abstract class ItemSum extends ItemResultField {
|
||||
* times during query optimization. In this case, the type may change,
|
||||
* so we delete the old aggregator, and create a new one.
|
||||
*/
|
||||
if (aggr != null && aggregator == aggr.Aggrtype()) {
|
||||
if (aggr != null && aggregator == aggr.aggrType()) {
|
||||
aggr.clear();
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
@@ -32,8 +32,8 @@ public class ItemAllanySubselect extends ItemSubselect {
|
||||
}
|
||||
|
||||
@Override
|
||||
public subSelectType substype() {
|
||||
return isAll ? subSelectType.ALL_SUBS : subSelectType.ANY_SUBS;
|
||||
public SubSelectType substype() {
|
||||
return isAll ? SubSelectType.ALL_SUBS : SubSelectType.ANY_SUBS;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -27,8 +27,8 @@ public class ItemSinglerowSubselect extends ItemSubselect {
|
||||
}
|
||||
|
||||
@Override
|
||||
public subSelectType substype() {
|
||||
return subSelectType.SINGLEROW_SUBS;
|
||||
public SubSelectType substype() {
|
||||
return SubSelectType.SINGLEROW_SUBS;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -19,12 +19,12 @@ public abstract class ItemSubselect extends ItemResultField {
|
||||
protected SQLSelectQuery query;
|
||||
private String currentDb;
|
||||
private PlanNode planNode;
|
||||
public enum subSelectType {
|
||||
public enum SubSelectType {
|
||||
UNKNOWN_SUBS, SINGLEROW_SUBS, EXISTS_SUBS, IN_SUBS, ALL_SUBS, ANY_SUBS
|
||||
};
|
||||
|
||||
public subSelectType substype() {
|
||||
return subSelectType.UNKNOWN_SUBS;
|
||||
public SubSelectType substype() {
|
||||
return SubSelectType.UNKNOWN_SUBS;
|
||||
}
|
||||
|
||||
public ItemSubselect(String currentDb, SQLSelectQuery query) {
|
||||
|
||||
@@ -48,7 +48,7 @@ public class MySQLTime implements Serializable {
|
||||
second_part = (cal.getTimeInMillis() % 1000) * 1000;
|
||||
}
|
||||
|
||||
public int compareTo(MySQLTime other) {
|
||||
public int getCompareResult(MySQLTime other) {
|
||||
if (other == null)
|
||||
return 1;
|
||||
long lt1 = MyTime.TIME_to_longlong_datetime_packed(this);
|
||||
|
||||
@@ -509,7 +509,12 @@ public class ERJoinChooser {
|
||||
tn = null;
|
||||
cm = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hash = this.key.getTableName().hashCode();
|
||||
hash = hash * 31 + this.key.getItemName().toLowerCase().hashCode();
|
||||
return hash;
|
||||
}
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (o == null)
|
||||
|
||||
@@ -45,7 +45,7 @@ public final class RouteResultset implements Serializable {
|
||||
private String table;
|
||||
private final int sqlType;
|
||||
private RouteResultsetNode[] nodes; // 路由结果节点
|
||||
private SQLStatement sqlStatement;
|
||||
private transient SQLStatement sqlStatement;
|
||||
|
||||
private boolean needOptimizer;
|
||||
private int limitStart;
|
||||
|
||||
@@ -26,6 +26,7 @@ package io.mycat.route.function;
|
||||
import java.io.BufferedReader;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.Serializable;
|
||||
import java.util.LinkedList;
|
||||
|
||||
import io.mycat.config.model.rule.RuleAlgorithm;
|
||||
@@ -191,7 +192,7 @@ public class AutoPartitionByLong extends AbstractPartitionAlgorithm implements R
|
||||
this.defaultNode = defaultNode;
|
||||
}
|
||||
|
||||
static class LongRange {
|
||||
static class LongRange implements Serializable {
|
||||
public final int nodeIndx;
|
||||
public final long valueStart;
|
||||
public final long valueEnd;
|
||||
|
||||
@@ -34,7 +34,7 @@ public class PartitionByDate extends AbstractPartitionAlgorithm implements RuleA
|
||||
private long endDate;
|
||||
private int nCount;
|
||||
private int defaultNode = -1;
|
||||
private ThreadLocal<SimpleDateFormat> formatter;
|
||||
private transient ThreadLocal<SimpleDateFormat> formatter;
|
||||
|
||||
private static final long oneDay = 86400000;
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ package io.mycat.route.function;
|
||||
import java.io.BufferedReader;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.Serializable;
|
||||
import java.util.LinkedList;
|
||||
import java.util.HashSet;
|
||||
import java.util.regex.Pattern;
|
||||
@@ -229,7 +230,7 @@ public class PartitionByPattern extends AbstractPartitionAlgorithm implements Ru
|
||||
}
|
||||
}
|
||||
|
||||
static class LongRange {
|
||||
static class LongRange implements Serializable{
|
||||
public final int nodeIndx;
|
||||
public final long valueStart;
|
||||
public final long valueEnd;
|
||||
|
||||
@@ -96,7 +96,6 @@ public class DruidAlterTableParser extends DefaultDruidParser {
|
||||
rrs.setSrcStatement(sql);
|
||||
sql = RouterUtil.removeSchema(sql, schemaInfo.schema);
|
||||
rrs.setStatement(sql);
|
||||
rrs.setSqlStatement(alterTable);
|
||||
}
|
||||
RouterUtil.routeToDDLNode(schemaInfo, rrs);
|
||||
return schemaInfo.schemaConfig;
|
||||
|
||||
@@ -61,7 +61,6 @@ public class DruidCreateTableParser extends DefaultDruidParser {
|
||||
rrs.setSrcStatement(sql);
|
||||
sql = RouterUtil.removeSchema(sql, schemaInfo.schema);
|
||||
rrs.setStatement(sql);
|
||||
rrs.setSqlStatement(createStmt);
|
||||
}
|
||||
try {
|
||||
RouterUtil.routeToDDLNode(schemaInfo, rrs);
|
||||
|
||||
@@ -155,7 +155,7 @@ public class DistributedSequenceHandler extends LeaderSelectorListenerAdapter im
|
||||
client.create().creatingParentContainersIfNeeded().forPath(INSTANCE_PATH);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
// do nothing
|
||||
throw new RuntimeException("create instance path " + INSTANCE_PATH + "error", e);
|
||||
}
|
||||
this.leaderSelector = new LeaderSelector(client, KVPathUtil.getSequencesLeaderPath(), this);
|
||||
this.leaderSelector.autoRequeue();
|
||||
|
||||
@@ -25,12 +25,14 @@ package io.mycat.route.util;
|
||||
|
||||
import io.mycat.util.StringUtil;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* 数据分区工具
|
||||
*
|
||||
* @author mycat
|
||||
*/
|
||||
public final class PartitionUtil {
|
||||
public final class PartitionUtil implements Serializable {
|
||||
|
||||
// 分区最大长度:数据段分布定义,如果取模的数是2^n, 使用x % 2^n == x & (2^n - 1)等式,来优化性能。
|
||||
private static final int MAX_PARTITION_LENGTH = 2880;
|
||||
|
||||
@@ -52,7 +52,7 @@ public class ServerQueryHandler implements FrontendQueryHandler {
|
||||
.getLogger(ServerQueryHandler.class);
|
||||
|
||||
private final ServerConnection source;
|
||||
protected Boolean readOnly;
|
||||
protected Boolean readOnly = true;
|
||||
|
||||
public void setReadOnly(Boolean readOnly) {
|
||||
this.readOnly = readOnly;
|
||||
|
||||
@@ -182,7 +182,7 @@ public class RowDataPacketSorter {
|
||||
case ColMeta.COL_TYPE_DATETIME:
|
||||
case ColMeta.COL_TYPE_NEWDATE:
|
||||
case ColMeta.COL_TYPE_BIT:
|
||||
// return BytesTools.compareTo(left,right);
|
||||
// return BytesTools.getCompareResult(left,right);
|
||||
return ByteUtil.compareNumberByte(left, right);
|
||||
case ColMeta.COL_TYPE_VAR_STRING:
|
||||
case ColMeta.COL_TYPE_STRING:
|
||||
|
||||
@@ -76,7 +76,13 @@ public class SqlFrequency implements Comparable<SqlFrequency>{
|
||||
long para2 = o.lastTime - lastTime;
|
||||
return para == 0L ? (int)(para2 == 0L ? o.allExecuteTime - allExecuteTime : para2) : (int)para ;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
long hash = count.get();
|
||||
hash = hash * 31 + lastTime;
|
||||
hash = hash * 31 + allExecuteTime;
|
||||
return (int)hash;
|
||||
}
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if(obj instanceof SqlFrequency) {
|
||||
|
||||
@@ -112,6 +112,13 @@ public class TableStat implements Comparable<TableStat> {
|
||||
return para == 0? (para2 == 0? o.getTable().hashCode() - getTable().hashCode() :(int) para2) : (int)para ;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
long hash = getCount();
|
||||
hash = hash * 31 + getLastExecuteTime();
|
||||
hash = hash * 31 + getTable().hashCode();
|
||||
return (int) hash;
|
||||
}
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if(obj instanceof TableStat) {
|
||||
|
||||
@@ -109,7 +109,7 @@ public class TableStatAnalyzer implements QueryResultListener {
|
||||
return list;
|
||||
}
|
||||
|
||||
public void ClearTable() {
|
||||
public void clearTable() {
|
||||
tableStatMap.clear();
|
||||
}
|
||||
|
||||
|
||||
@@ -91,7 +91,10 @@ public class UserSqlLastStat {
|
||||
long st1 = o == null ? 0 : o.getStartTime();
|
||||
return (int) ( st1 - this.getStartTime());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return (int) getStartTime();
|
||||
}
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if(obj instanceof SqlLast) {
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
package io.mycat.util;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.AbstractSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
public class ConcurrentHashSet<E> extends AbstractSet<E> implements Set<E>, Serializable {
|
||||
|
||||
|
||||
private static final long serialVersionUID = 8966440120855782809L;
|
||||
|
||||
private transient Map<E, Object> map;
|
||||
|
||||
// Dummy value to associate with an Object in the backing Map
|
||||
private static final Object PRESENT = new Object();
|
||||
|
||||
public ConcurrentHashSet() {
|
||||
this.map = new ConcurrentHashMap<E, Object>();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEmpty() {
|
||||
return map.isEmpty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean contains(Object o) {
|
||||
return map.containsKey(o);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean add(E e) {
|
||||
return map.put(e, PRESENT) == null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean remove(Object o) {
|
||||
return map.remove(o) == PRESENT;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
map.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<E> iterator() {
|
||||
return map.keySet().iterator();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size() {
|
||||
return map.size();
|
||||
}
|
||||
|
||||
}
|
||||
@@ -53,7 +53,7 @@ public class FastByteOperations
|
||||
|
||||
public static int compareUnsigned(byte[] b1, int s1, int l1, ByteBuffer b2)
|
||||
{
|
||||
return -BestHolder.BEST.compare(b2, b1, s1, l1);
|
||||
return -1 *BestHolder.BEST.compare(b2, b1, s1, l1);
|
||||
}
|
||||
|
||||
public static int compareUnsigned(ByteBuffer b1, ByteBuffer b2)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
BuildTime 2017-07-18 07:46:30
|
||||
GitVersion a5d0d6c5e8ebec7a06e30261c7f940195c39807d
|
||||
MavenVersion 2.17.07.0-dev
|
||||
BuildTime 2017-08-10 03:02:24
|
||||
GitVersion 6963e4a1993f45c3054c30f9e1761e5ed193e8ba
|
||||
MavenVersion 2.17.08.0-dev
|
||||
GitUrl https://github.com/MyCATApache/Mycat-Server.git
|
||||
MyCatSite http://www.mycat.org.cn
|
||||
QQGroup 106088787
|
||||
|
||||
Reference in New Issue
Block a user