feat: Add management commands dble_table series (#2037)

* feat: Add management commands

dble_sharding_node, dble_schema, dble_thread_usage

* refactor: Modify variable name

* feat: Add management commands dble_table series

* perf: judge empty

* perf: judge empty

* fix: Increase judgment
This commit is contained in:
LUA
2020-08-17 19:50:36 +08:00
committed by GitHub
parent bf525ef26f
commit 34626b96f9
11 changed files with 504 additions and 2 deletions
@@ -28,6 +28,7 @@ import java.io.InputStream;
import java.lang.reflect.InvocationTargetException;
import java.util.*;
import java.util.Map.Entry;
import java.util.concurrent.atomic.AtomicInteger;
import static com.actiontech.dble.backend.datasource.check.GlobalCheckJob.GLOBAL_TABLE_CHECK_DEFAULT;
import static com.actiontech.dble.backend.datasource.check.GlobalCheckJob.GLOBAL_TABLE_CHECK_DEFAULT_CRON;
@@ -44,6 +45,7 @@ public class XMLShardingLoader {
private final Map<String, AbstractPartitionAlgorithm> functions;
private final boolean lowerCaseNames;
private ProblemReporter problemReporter;
private AtomicInteger tableIndex = new AtomicInteger();
public XMLShardingLoader(String shardingFile, boolean lowerCaseNames, ProblemReporter problemReporter) {
this.functions = new HashMap<>();
@@ -59,6 +61,10 @@ public class XMLShardingLoader {
this(null, lowerCaseNames, problemReporter);
}
public int addTableIndex() {
return tableIndex.incrementAndGet();
}
public Map<String, ShardingNodeConfig> getShardingNode() {
return (Map<String, ShardingNodeConfig>) (shardingNode.isEmpty() ? Collections.emptyMap() : shardingNode);
}
@@ -294,6 +300,7 @@ public class XMLShardingLoader {
if (tables.containsKey(table.getName())) {
throw new ConfigException("table " + tableName + " duplicated!");
}
table.setId(addTableIndex());
tables.put(table.getName(), table);
}
// child table must know its unique father
@@ -353,6 +360,7 @@ public class XMLShardingLoader {
if (tables.containsKey(table.getName())) {
throw new ConfigException("table " + tableName + " duplicated!");
}
table.setId(addTableIndex());
tables.put(table.getName(), table);
}
}
@@ -394,6 +402,7 @@ public class XMLShardingLoader {
if (tables.containsKey(table.getName())) {
throw new ConfigException("table " + tableName + " duplicated!");
}
table.setId(addTableIndex());
tables.put(table.getName(), table);
}
}
@@ -433,6 +442,7 @@ public class XMLShardingLoader {
if (tables.containsKey(table.getName())) {
throw new ConfigException("table " + table.getName() + " duplicated!");
}
table.setId(addTableIndex());
tables.put(table.getName(), table);
//child table may also have children
processChildTables(tables, table, lstShardingNode, childTbElement, isLowerCaseNames, schemaMaxLimit);
@@ -141,6 +141,9 @@ public class SchemaConfig {
public Map<String, BaseTableConfig> getTables() {
return tables;
}
public BaseTableConfig getTable(String tableName) {
return tables.get(tableName);
}
private Map<String, BaseTableConfig> getLowerCaseTables() {
Map<String, BaseTableConfig> newTables = new HashMap<>();
@@ -9,6 +9,7 @@ import java.util.List;
public abstract class BaseTableConfig {
private int id;
protected final String name;
protected final int maxLimit;
protected final List<String> shardingNodes;
@@ -19,6 +20,13 @@ public abstract class BaseTableConfig {
this.shardingNodes = shardingNodes;
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getName() {
return name;
@@ -63,6 +63,7 @@ public class ProxyMetaManager {
private volatile Repository repository = null;
private AtomicInteger version = new AtomicInteger(0);
private long timestamp;
private AtomicInteger tableIndex = new AtomicInteger();
public ProxyMetaManager() {
this.catalogs = new ConcurrentHashMap<>();
@@ -79,6 +80,7 @@ public class ProxyMetaManager {
this.metaCount = origin.metaCount;
this.repository = origin.repository;
this.version = origin.version;
this.tableIndex = origin.tableIndex;
for (Map.Entry<String, SchemaMeta> entry : origin.catalogs.entrySet()) {
catalogs.put(entry.getKey(), entry.getValue().metaCopy());
}
@@ -198,6 +200,7 @@ public class ProxyMetaManager {
String tbName = tm.getTableName();
SchemaMeta schemaMeta = catalogs.get(schema);
if (schemaMeta != null) {
tm.setId(tableIndex.incrementAndGet());
schemaMeta.addTableMeta(tbName, tm);
}
}
@@ -4,6 +4,8 @@ import java.util.List;
public final class TableMeta {
private int id;
private String schemaName;
private String tableName;
private long version;
private String createSql;
@@ -19,6 +21,24 @@ public final class TableMeta {
version = newVersion;
}
public TableMeta(int id, String schemaName, String tableName) {
this.id = id;
this.schemaName = schemaName;
this.tableName = tableName;
}
public String getSchemaName() {
return schemaName;
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getTableName() {
return tableName;
}
@@ -61,8 +81,8 @@ public final class TableMeta {
}
TableMeta temp = (TableMeta) other;
return tableName.equals(temp.getTableName()) && version == temp.getVersion() &&
createSql.equals(temp.getCreateSql()) && columns.equals(temp.getColumns());
return id == temp.getId() && schemaName.equals(temp.getSchemaName()) && tableName.equals(temp.getTableName()) &&
version == temp.getVersion() && createSql.equals(temp.getCreateSql()) && columns.equals(temp.getColumns());
}
@Override
@@ -71,6 +91,10 @@ public final class TableMeta {
result = 31 * result + (int) (version ^ (version >>> 32));
result = 31 * result + createSql.hashCode();
result = 31 * result + columns.hashCode();
result = 31 * result + Integer.hashCode(id);
if (null != schemaName) {
result = 31 * result + schemaName.hashCode();
}
return result;
}
@@ -42,6 +42,11 @@ public final class ManagerSchemaInfo {
registerTable(new DbleReloadStatus());
registerTable(new DbleXaSession());
registerTable(new DbleDdlLock());
registerTable(new DbleTable());
registerTable(new DbleGlobalTable());
registerTable(new DbleShardingTable());
registerTable(new DbleChildTable());
registerTable(new DbleTableShardingNode());
}
@@ -0,0 +1,56 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.services.manager.information.tables;
import com.actiontech.dble.config.Fields;
import com.actiontech.dble.meta.ColumnMeta;
import com.actiontech.dble.services.manager.information.ManagerBaseTable;
import java.util.LinkedHashMap;
import java.util.List;
public class DbleChildTable extends ManagerBaseTable {
private static final String TABLE_NAME = "dble_child_table";
private static final String COLUMN_ID = "id";
private static final String COLUMN_PARENT_ID = "parent_id";
private static final String COLUMN_INCREMENT_COLUMN = "increment_column";
private static final String COLUMN_JOIN_COLUMN = "join_column";
private static final String COLUMN_PAREN_COLUMN = "paren_column";
public DbleChildTable() {
super(TABLE_NAME, 5);
}
@Override
protected void initColumnAndType() {
columns.put(COLUMN_ID, new ColumnMeta(COLUMN_ID, "varchar(64)", false, true));
columnsType.put(COLUMN_ID, Fields.FIELD_TYPE_VAR_STRING);
columns.put(COLUMN_PARENT_ID, new ColumnMeta(COLUMN_PARENT_ID, "int(11)", false));
columnsType.put(COLUMN_PARENT_ID, Fields.FIELD_TYPE_LONG);
columns.put(COLUMN_INCREMENT_COLUMN, new ColumnMeta(COLUMN_INCREMENT_COLUMN, "varchar(64)", true));
columnsType.put(COLUMN_INCREMENT_COLUMN, Fields.FIELD_TYPE_VAR_STRING);
columns.put(COLUMN_JOIN_COLUMN, new ColumnMeta(COLUMN_JOIN_COLUMN, "varchar(64)", false));
columnsType.put(COLUMN_JOIN_COLUMN, Fields.FIELD_TYPE_VAR_STRING);
columns.put(COLUMN_PAREN_COLUMN, new ColumnMeta(COLUMN_PAREN_COLUMN, "varchar(64)", false));
columnsType.put(COLUMN_PAREN_COLUMN, Fields.FIELD_TYPE_VAR_STRING);
}
@Override
protected List<LinkedHashMap<String, String>> getRows() {
return DbleTable.getTableByType(DbleTable.TableType.CHILD);
}
}
@@ -0,0 +1,51 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.services.manager.information.tables;
import com.actiontech.dble.config.Fields;
import com.actiontech.dble.meta.ColumnMeta;
import com.actiontech.dble.services.manager.information.ManagerBaseTable;
import java.util.LinkedHashMap;
import java.util.List;
public class DbleGlobalTable extends ManagerBaseTable {
private static final String TABLE_NAME = "dble_global_table";
private static final String COLUMN_ID = "id";
private static final String COLUMN_CHECK = "check";
private static final String COLUMN_CHECK_CLASS = "check_class";
private static final String COLUMN_CRON = "cron";
public DbleGlobalTable() {
super(TABLE_NAME, 4);
}
@Override
protected void initColumnAndType() {
columns.put(COLUMN_ID, new ColumnMeta(COLUMN_ID, "varchar(64)", false, true));
columnsType.put(COLUMN_ID, Fields.FIELD_TYPE_VAR_STRING);
columns.put(COLUMN_CHECK, new ColumnMeta(COLUMN_CHECK, "varchar(5)", false));
columnsType.put(COLUMN_CHECK, Fields.FIELD_TYPE_VAR_STRING);
columns.put(COLUMN_CHECK_CLASS, new ColumnMeta(COLUMN_CHECK_CLASS, "varchar(64)", true));
columnsType.put(COLUMN_CHECK_CLASS, Fields.FIELD_TYPE_VAR_STRING);
columns.put(COLUMN_CRON, new ColumnMeta(COLUMN_CRON, "varchar(32)", true));
columnsType.put(COLUMN_CRON, Fields.FIELD_TYPE_VAR_STRING);
}
@Override
protected List<LinkedHashMap<String, String>> getRows() {
return DbleTable.getTableByType(DbleTable.TableType.GLOBAL);
}
}
@@ -0,0 +1,56 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.services.manager.information.tables;
import com.actiontech.dble.config.Fields;
import com.actiontech.dble.meta.ColumnMeta;
import com.actiontech.dble.services.manager.information.ManagerBaseTable;
import java.util.LinkedHashMap;
import java.util.List;
public class DbleShardingTable extends ManagerBaseTable {
private static final String TABLE_NAME = "dble_sharding_table";
private static final String COLUMN_ID = "id";
private static final String COLUMN_INCREMENT_COLUMN = "increment_column";
private static final String COLUMN_SHARDING_COLUMN = "sharding_column";
private static final String COLUMN_SQL_REQUIRED_SHARDING = "sql_required_sharding";
private static final String COLUMN_ALGORITHM_NAME = "algorithm_name";
public DbleShardingTable() {
super(TABLE_NAME, 5);
}
@Override
protected void initColumnAndType() {
columns.put(COLUMN_ID, new ColumnMeta(COLUMN_ID, "varchar(64)", false, true));
columnsType.put(COLUMN_ID, Fields.FIELD_TYPE_VAR_STRING);
columns.put(COLUMN_INCREMENT_COLUMN, new ColumnMeta(COLUMN_INCREMENT_COLUMN, "varchar(64)", true));
columnsType.put(COLUMN_INCREMENT_COLUMN, Fields.FIELD_TYPE_VAR_STRING);
columns.put(COLUMN_SHARDING_COLUMN, new ColumnMeta(COLUMN_SHARDING_COLUMN, "varchar(64)", false));
columnsType.put(COLUMN_SHARDING_COLUMN, Fields.FIELD_TYPE_VAR_STRING);
columns.put(COLUMN_SQL_REQUIRED_SHARDING, new ColumnMeta(COLUMN_SQL_REQUIRED_SHARDING, "varchar(5)", false));
columnsType.put(COLUMN_SQL_REQUIRED_SHARDING, Fields.FIELD_TYPE_VAR_STRING);
columns.put(COLUMN_ALGORITHM_NAME, new ColumnMeta(COLUMN_ALGORITHM_NAME, "varchar(32)", false));
columnsType.put(COLUMN_ALGORITHM_NAME, Fields.FIELD_TYPE_VAR_STRING);
}
@Override
protected List<LinkedHashMap<String, String>> getRows() {
return DbleTable.getTableByType(DbleTable.TableType.SHARDING);
}
}
@@ -0,0 +1,217 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.services.manager.information.tables;
import com.actiontech.dble.DbleServer;
import com.actiontech.dble.config.Fields;
import com.actiontech.dble.config.model.sharding.SchemaConfig;
import com.actiontech.dble.config.model.sharding.table.*;
import com.actiontech.dble.meta.ColumnMeta;
import com.actiontech.dble.meta.SchemaMeta;
import com.actiontech.dble.meta.TableMeta;
import com.actiontech.dble.services.manager.information.ManagerBaseTable;
import com.actiontech.dble.singleton.ProxyMeta;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import java.util.*;
public class DbleTable extends ManagerBaseTable {
private static final String TABLE_NAME = "dble_table";
public static final String COLUMN_ID = "id";
public static final String COLUMN_NAME = "name";
public static final String COLUMN_SCHEMA = "schema";
private static final String COLUMN_MAX_LIMIT = "max_limit";
public static final String COLUMN_TYPE = "type";
private static final String COLUMN_GLOBAL_CHECK = "check";
private static final String COLUMN_GLOBAL_CHECK_CLASS = "check_class";
private static final String COLUMN_GLOBAL_CRON = "cron";
private static final String COLUMN_SHARDING_INCREMENT_COLUMN = "increment_column";
private static final String COLUMN_SHARDING_COLUMN = "sharding_column";
private static final String COLUMN_SHARDING_SQL_REQUIRED_SHARDING = "sql_required_sharding";
private static final String COLUMN_SHARDING_ALGORITHM_NAME = "algorithm_name";
private static final String COLUMN_CHILD_PARENT_ID = "parent_id";
private static final String COLUMN_CHILD_INCREMENT_COLUMN = "increment_column";
private static final String COLUMN_CHILD_JOIN_COLUMN = "join_column";
private static final String COLUMN_CHILD_PAREN_COLUMN = "paren_column";
public static final String PREFIX_CONFIG = "C";
public static final String PREFIX_METADATA = "M";
public DbleTable() {
super(TABLE_NAME, 5);
}
@Override
protected void initColumnAndType() {
columns.put(COLUMN_ID, new ColumnMeta(COLUMN_ID, "varchar(64)", false, true));
columnsType.put(COLUMN_ID, Fields.FIELD_TYPE_VAR_STRING);
columns.put(COLUMN_NAME, new ColumnMeta(COLUMN_NAME, "varchar(64)", false));
columnsType.put(COLUMN_NAME, Fields.FIELD_TYPE_VAR_STRING);
columns.put(COLUMN_SCHEMA, new ColumnMeta(COLUMN_SCHEMA, "varchar(64)", false));
columnsType.put(COLUMN_SCHEMA, Fields.FIELD_TYPE_VAR_STRING);
columns.put(COLUMN_MAX_LIMIT, new ColumnMeta(COLUMN_MAX_LIMIT, "int(11)", true));
columnsType.put(COLUMN_MAX_LIMIT, Fields.FIELD_TYPE_LONG);
columns.put(COLUMN_TYPE, new ColumnMeta(COLUMN_TYPE, "varchar(10)", false));
columnsType.put(COLUMN_TYPE, Fields.FIELD_TYPE_VAR_STRING);
}
@Override
protected List<LinkedHashMap<String, String>> getRows() {
return getTableByType(null);
}
protected static List<LinkedHashMap<String, String>> getTableByType(TableType tableType) {
List<LinkedHashMap<String, String>> rowList = Lists.newLinkedList();
List<String> nameList = Lists.newArrayList();
//config
TreeMap<String, SchemaConfig> schemaMap = new TreeMap<>(DbleServer.getInstance().getConfig().getSchemas());
if (null != tableType) {
schemaMap.entrySet().stream().forEach(e -> e.getValue().getTables().entrySet().stream().filter(p -> tableType.equals(distinguishType(p.getValue())) && !nameList.contains(e.getValue().getName() + "-" + p.getKey())).sorted((a, b) -> Integer.valueOf(a.getValue().getId()).compareTo(b.getValue().getId())).forEach(t -> {
BaseTableConfig baseTableConfig = t.getValue();
SchemaConfig schemaConfig = e.getValue();
LinkedHashMap map = initMap(schemaConfig, baseTableConfig, tableType, null, schemaMap);
rowList.add(map);
nameList.add(schemaConfig.getName() + "-" + baseTableConfig.getName());
}));
} else {
schemaMap.entrySet().stream().forEach(e -> e.getValue().getTables().entrySet().stream().filter(p -> !nameList.contains(e.getValue().getName() + "-" + p.getKey())).sorted((a, b) -> Integer.valueOf(a.getValue().getId()).compareTo(b.getValue().getId())).forEach(t -> {
SchemaConfig schemaConfig = e.getValue();
BaseTableConfig baseTableConfig = t.getValue();
LinkedHashMap map = initMap(schemaConfig, baseTableConfig, null, null, schemaMap);
rowList.add(map);
nameList.add(schemaConfig.getName() + "-" + baseTableConfig.getName());
}));
}
if (null != tableType) {
return rowList;
}
//metadata-no sharding
List<TableMeta> tableMetaList = Lists.newArrayList();
Map<String, SchemaMeta> schemaMetaMap = ProxyMeta.getInstance().getTmManager().getCatalogs();
schemaMetaMap.entrySet().stream().forEach(e -> e.getValue().getTableMetas().entrySet().stream().filter(p -> !nameList.contains(e.getKey() + "-" + p.getKey())).forEach(p -> tableMetaList.add(new TableMeta(p.getValue().getId(), e.getKey(), p.getKey()))));
tableMetaList.stream().sorted(Comparator.comparing(TableMeta::getId)).forEach(e -> {
TableMeta tableMeta = e;
LinkedHashMap map = initMap(null, null, null, tableMeta, null);
rowList.add(map);
nameList.add(tableMeta.getSchemaName() + "-" + tableMeta.getTableName());
});
return rowList;
}
/**
* Set column
*
* @param schemaConfig
* @param baseTableConfig
* @param tableType
* @param tableMeta
* @param schemaMap
* @return column map
*/
private static LinkedHashMap initMap(SchemaConfig schemaConfig, BaseTableConfig baseTableConfig, TableType tableType, TableMeta tableMeta, TreeMap<String, SchemaConfig> schemaMap) {
LinkedHashMap<String, String> map = Maps.newLinkedHashMap();
if (null == tableMeta && null != baseTableConfig && null != schemaConfig) {
//config
map.put(COLUMN_ID, PREFIX_CONFIG + baseTableConfig.getId());
switch (tableType == null ? TableType.NO_SHARDING : tableType) {
case GLOBAL:
GlobalTableConfig globalTableConfig = (GlobalTableConfig) baseTableConfig;
map.put(COLUMN_GLOBAL_CHECK, String.valueOf(globalTableConfig.isGlobalCheck()));
map.put(COLUMN_GLOBAL_CHECK_CLASS, globalTableConfig.getCheckClass());
map.put(COLUMN_GLOBAL_CRON, globalTableConfig.getCron());
break;
case SHARDING:
ShardingTableConfig shardingTableConfig = (ShardingTableConfig) baseTableConfig;
map.put(COLUMN_SHARDING_INCREMENT_COLUMN, shardingTableConfig.getIncrementColumn());
map.put(COLUMN_SHARDING_COLUMN, shardingTableConfig.getShardingColumn());
map.put(COLUMN_SHARDING_SQL_REQUIRED_SHARDING, String.valueOf(shardingTableConfig.isSqlRequiredSharding()));
map.put(COLUMN_SHARDING_ALGORITHM_NAME, shardingTableConfig.getFunction().getName());
break;
case CHILD:
ChildTableConfig childTableConfig = (ChildTableConfig) baseTableConfig;
BaseTableConfig parentTableConfig = findBySchemaATable(schemaMap, schemaConfig.getName(), childTableConfig.getParentTC().getName());
map.put(COLUMN_CHILD_PARENT_ID, null != parentTableConfig ? PREFIX_CONFIG + parentTableConfig.getId() : null);
map.put(COLUMN_CHILD_INCREMENT_COLUMN, childTableConfig.getIncrementColumn());
map.put(COLUMN_CHILD_JOIN_COLUMN, childTableConfig.getJoinColumn());
map.put(COLUMN_CHILD_PAREN_COLUMN, childTableConfig.getParentColumn());
break;
case NO_SHARDING:
case SINGLE:
map.put(COLUMN_NAME, baseTableConfig.getName());
map.put(COLUMN_SCHEMA, schemaConfig.getName());
map.put(COLUMN_MAX_LIMIT, String.valueOf(baseTableConfig.getMaxLimit()));
map.put(COLUMN_TYPE, String.valueOf(distinguishType(baseTableConfig)));
break;
default:
break;
}
} else if (null != tableMeta) {
//metadata
map.put(COLUMN_ID, PREFIX_METADATA + tableMeta.getId());
map.put(COLUMN_NAME, tableMeta.getTableName());
map.put(COLUMN_SCHEMA, tableMeta.getSchemaName());
BaseTableConfig tableConfig = DbleServer.getInstance().getConfig().getSchemas().get(tableMeta.getSchemaName()).getTable(tableMeta.getTableName());
map.put(COLUMN_TYPE, String.valueOf(distinguishType(tableConfig)));
}
return map;
}
private static BaseTableConfig findBySchemaATable(TreeMap<String, SchemaConfig> schemaMap, String schemaName, String tableName) {
Map.Entry<String, SchemaConfig> schemaConfigEntry = schemaMap.entrySet().stream().filter(t -> schemaName.equals(t.getKey())).findFirst().get();
if (null == schemaConfigEntry) {
return null;
}
Map.Entry<String, BaseTableConfig> tableConfigEntry = schemaConfigEntry.getValue().getTables().entrySet().stream().filter(t -> tableName.equals(t.getKey())).findFirst().get();
if (null == tableConfigEntry) {
return null;
}
return tableConfigEntry.getValue();
}
protected static TableType distinguishType(BaseTableConfig tableConfig) {
if (tableConfig == null) {
return TableType.NO_SHARDING;
} else if (tableConfig instanceof GlobalTableConfig) {
return TableType.GLOBAL;
} else if (tableConfig instanceof ChildTableConfig) {
return TableType.CHILD;
} else if (tableConfig instanceof SingleTableConfig) {
return TableType.SINGLE;
} else {
return TableType.SHARDING;
}
}
public enum TableType {
GLOBAL, SHARDING, CHILD, NO_SHARDING, SINGLE
}
}
@@ -0,0 +1,69 @@
/*
* Copyright (C) 2016-2020 ActionTech.
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
*/
package com.actiontech.dble.services.manager.information.tables;
import com.actiontech.dble.DbleServer;
import com.actiontech.dble.config.Fields;
import com.actiontech.dble.config.model.sharding.SchemaConfig;
import com.actiontech.dble.config.model.sharding.table.BaseTableConfig;
import com.actiontech.dble.meta.ColumnMeta;
import com.actiontech.dble.services.manager.information.ManagerBaseTable;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicInteger;
public class DbleTableShardingNode extends ManagerBaseTable {
private static final String TABLE_NAME = "dble_table_sharding_node";
private static final String COLUMN_ID = "id";
private static final String COLUMN_SHARDING_NODE = "sharding_node";
private static final String COLUMN_ORDER = "order";
public DbleTableShardingNode() {
super(TABLE_NAME, 3);
}
@Override
protected void initColumnAndType() {
columns.put(COLUMN_ID, new ColumnMeta(COLUMN_ID, "varchar(64)", false, true));
columnsType.put(COLUMN_ID, Fields.FIELD_TYPE_VAR_STRING);
columns.put(COLUMN_SHARDING_NODE, new ColumnMeta(COLUMN_SHARDING_NODE, "varchar(32)", false));
columnsType.put(COLUMN_SHARDING_NODE, Fields.FIELD_TYPE_VAR_STRING);
columns.put(COLUMN_ORDER, new ColumnMeta(COLUMN_ORDER, "int(11)", false));
columnsType.put(COLUMN_ORDER, Fields.FIELD_TYPE_LONG);
}
@Override
protected List<LinkedHashMap<String, String>> getRows() {
List<LinkedHashMap<String, String>> rowList = Lists.newLinkedList();
List<String> nameList = Lists.newArrayList();
TreeMap<String, SchemaConfig> schemaMap = new TreeMap<>(DbleServer.getInstance().getConfig().getSchemas());
schemaMap.entrySet().stream().forEach(e -> e.getValue().getTables().entrySet().stream().sorted((a, b) -> Integer.valueOf(a.getValue().getId()).compareTo(b.getValue().getId())).forEach(t -> {
BaseTableConfig baseTableConfig = t.getValue();
List<String> shardingNodes = baseTableConfig.getShardingNodes();
AtomicInteger index = new AtomicInteger();
String id = DbleTable.PREFIX_CONFIG + baseTableConfig.getId();
shardingNodes.stream().filter(q -> !nameList.contains(id + "-" + q)).forEach(p -> {
LinkedHashMap<String, String> map = Maps.newLinkedHashMap();
map.put(COLUMN_ID, id);
map.put(COLUMN_SHARDING_NODE, p);
map.put(COLUMN_ORDER, String.valueOf(index.getAndIncrement()));
rowList.add(map);
nameList.add(id + "-" + p);
});
}));
return rowList;
}
}