未验证 提交 60aea13c 编写于 作者: 邱鹿 Lucas 提交者: GitHub

rename `datasource` to `dataSource` and `destination` to `target` (#7587)

* rename `datasource` to `dataSource` and rename`destination` to `target`

* rename `datasource` to `dataSource` and rename`destination` to `target`

* rename `datasource` to `dataSource` and rename`destination` to `target`

Co-authored-by: qiulu3 <Lucas209910>
上级 8b002772
......@@ -53,14 +53,14 @@ PostgreSQL 需要开启 [test_decoding](https://www.postgresql.org/docs/9.4/test
请求体:
| Parameter | Describe |
| 参数 | 描述 |
| ------------------------------------------------- | ------------------------------------------------------------ |
| ruleConfiguration.sourceDatasource | 源端sharding sphere数据源相关配置 |
| ruleConfiguration.sourceDataSource | 源端sharding sphere数据源相关配置 |
| ruleConfiguration.sourceRule | 源端sharding sphere表规则相关配置 |
| ruleConfiguration.destinationDataSources.name | 目标端sharding proxy名称 |
| ruleConfiguration.destinationDataSources.url | 目标端sharding proxy jdbc url |
| ruleConfiguration.destinationDataSources.username | 目标端sharding proxy用户名 |
| ruleConfiguration.destinationDataSources.password | 目标端sharding proxy密码 |
| ruleConfiguration.targetDataSources.name | 目标端sharding proxy名称 |
| ruleConfiguration.targetDataSources.url | 目标端sharding proxy jdbc url |
| ruleConfiguration.targetDataSources.username | 目标端sharding proxy用户名 |
| ruleConfiguration.targetDataSources.password | 目标端sharding proxy密码 |
| jobConfiguration.concurrency | 迁移并发度,举例:如果设置为3,则待迁移的表将会有三个线程同时对该表进行迁移,前提是该表有整数型主键 |
示例:
......@@ -71,20 +71,20 @@ curl -X POST \
-H 'content-type: application/json' \
-d '{
"ruleConfiguration": {
"sourceDatasource":"
"sourceDataSource":"
dataSources:
ds_0:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
props:
driverClassName: com.mysql.jdbc.Driver
jdbcUrl: jdbc:mysql://127.0.0.1:3306/scaling?useSSL=false
jdbcUrl: jdbc:mysql://127.0.0.1:3306/scaling_0?useSSL=false
username: scaling
password: scaling
ds_1:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
props:
driverClassName: com.mysql.jdbc.Driver
jdbcUrl: jdbc:mysql://127.0.0.1:3306/scaling?useSSL=false
jdbcUrl: jdbc:mysql://127.0.0.1:3306/scaling_1?useSSL=false
username: scaling
password: scaling
",
......@@ -113,14 +113,13 @@ curl -X POST \
props:
algorithm-expression: t_order_$->{user_id % 2}
",
"destinationDataSources":{
"targetDataSources":{
"username":"root",
"password":"root",
"url":"jdbc:mysql://127.0.0.1:3307/sharding_db?serverTimezone=UTC&useSSL=false"
}
},
"jobConfiguration":{
"jobName": "jobName",
"concurrency":"3"
}
}'
......@@ -232,7 +231,7 @@ curl -X GET \
请求体:
| Parameter | Describe |
| 参数 | 描述 |
| --------- | -------- |
| jobId | job id |
......
......@@ -12,7 +12,7 @@ JAVA,JDK 1.8+.
The migration scene we support:
| Source | Destination | Support |
| Source | Target | Support |
| -------------------------- | -------------------- | ------- |
| MySQL(5.1.15 ~ 5.7.x) | ShardingSphere-Proxy | Yes |
| PostgreSQL(9.4 ~ ) | ShardingSphere-Proxy | Yes |
......@@ -55,12 +55,12 @@ Body:
| Parameter | Describe |
|---------------------------------------------------|-------------------------------------------------|
| ruleConfiguration.sourceDatasource | source sharding sphere data source configuration |
| ruleConfiguration.sourceDataSource | source sharding sphere data source configuration |
| ruleConfiguration.sourceRule | source sharding sphere table rule configuration |
| ruleConfiguration.destinationDataSources.name | destination sharding proxy name |
| ruleConfiguration.destinationDataSources.url | destination sharding proxy jdbc url |
| ruleConfiguration.destinationDataSources.username | destination sharding proxy username |
| ruleConfiguration.destinationDataSources.password | destination sharding proxy password |
| ruleConfiguration.targetDataSources.name | target sharding proxy name |
| ruleConfiguration.targetDataSources.url | target sharding proxy jdbc url |
| ruleConfiguration.targetDataSources.username | target sharding proxy username |
| ruleConfiguration.targetDataSources.password | target sharding proxy password |
| jobConfiguration.concurrency | sync task proposed concurrency |
Example:
......@@ -71,20 +71,20 @@ curl -X POST \
-H 'content-type: application/json' \
-d '{
"ruleConfiguration": {
"sourceDatasource":"
"sourceDataSource":"
dataSources:
ds_0:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
props:
driverClassName: com.mysql.jdbc.Driver
jdbcUrl: jdbc:mysql://127.0.0.1:3306/scaling?useSSL=false
jdbcUrl: jdbc:mysql://127.0.0.1:3306/scaling_0?useSSL=false
username: scaling
password: scaling
ds_1:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
props:
driverClassName: com.mysql.jdbc.Driver
jdbcUrl: jdbc:mysql://127.0.0.1:3306/scaling?useSSL=false
jdbcUrl: jdbc:mysql://127.0.0.1:3306/scaling_1?useSSL=false
username: scaling
password: scaling
",
......@@ -113,14 +113,13 @@ curl -X POST \
props:
algorithm-expression: t_order_$->{user_id % 2}
",
"destinationDataSources":{
"targetDataSources":{
"username":"root",
"password":"root",
"url":"jdbc:mysql://127.0.0.1:3307/sharding_db?serverTimezone=UTC&useSSL=false"
}
},
"jobConfiguration":{
"jobName": "jobName",
"concurrency":"3"
}
}'
......
......@@ -17,9 +17,9 @@
{
"ruleConfiguration": {
"sourceDatasource":"dataSources:\n ds_0:\n dataSourceClassName: com.zaxxer.hikari.HikariDataSource\n props:\n jdbcUrl: jdbc:h2:mem:test_db_2;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=false;MODE=MySQL\n username: root\n password: '123456'\n connectionTimeout: 30000\n idleTimeout: 60000\n maxLifetime: 1800000\n maxPoolSize: 50\n minPoolSize: 1\n maintenanceIntervalMilliseconds: 30000\n readOnly: false\n",
"sourceDataSource":"dataSources:\n ds_0:\n dataSourceClassName: com.zaxxer.hikari.HikariDataSource\n props:\n jdbcUrl: jdbc:h2:mem:test_db_2;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=false;MODE=MySQL\n username: root\n password: '123456'\n connectionTimeout: 30000\n idleTimeout: 60000\n maxLifetime: 1800000\n maxPoolSize: 50\n minPoolSize: 1\n maintenanceIntervalMilliseconds: 30000\n readOnly: false\n",
"sourceRule": "rules:\n- !SHARDING\n defaultDatabaseStrategy:\n standard:\n shardingAlgorithmName: inline\n shardingColumn: user_id\n tables:\n t1:\n actualDataNodes: ds_0.t1\n keyGenerateStrategy:\n column: order_id\n logicTable: t1\n tableStrategy:\n standard:\n shardingAlgorithmName: inline\n shardingColumn: order_id\n t2:\n actualDataNodes: ds_0.t2\n keyGenerateStrategy:\n column: order_item_id\n logicTable: t2\n tableStrategy:\n standard:\n shardingAlgorithmName: inline\n shardingColumn: order_id",
"destinationDataSources": {
"targetDataSources": {
"name": "dt_0",
"url": "jdbc:h2:mem:test_db_2;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=false;MODE=MySQL",
"username": "root",
......
......@@ -62,10 +62,10 @@ public abstract class AbstractDataConsistencyChecker implements DataConsistencyC
private DataConsistencyCheckResult countCheck(final String table) {
try (DataSourceWrapper sourceDataSource = getSourceDataSource();
DataSourceWrapper destinationDataSource = getDestinationDataSource()) {
DataSourceWrapper targetDataSource = getTargetDataSource()) {
long sourceCount = count(sourceDataSource, table);
long destinationCount = count(destinationDataSource, table);
return new DataConsistencyCheckResult(sourceCount, destinationCount);
long targetCount = count(targetDataSource, table);
return new DataConsistencyCheckResult(sourceCount, targetCount);
} catch (IOException ex) {
throw new DataCheckFailException(String.format("table %s count check failed.", table), ex);
}
......@@ -85,7 +85,7 @@ public abstract class AbstractDataConsistencyChecker implements DataConsistencyC
protected DataSourceWrapper getSourceDataSource() {
try {
Map<String, DataSource> dataSourceMap = DataSourceConverter.getDataSourceMap(
ConfigurationYamlConverter.loadDataSourceConfigurations(shardingScalingJob.getScalingConfiguration().getRuleConfiguration().getSourceDatasource()));
ConfigurationYamlConverter.loadDataSourceConfigurations(shardingScalingJob.getScalingConfiguration().getRuleConfiguration().getSourceDataSource()));
ShardingRuleConfiguration ruleConfiguration = ConfigurationYamlConverter.loadShardingRuleConfiguration(shardingScalingJob.getScalingConfiguration().getRuleConfiguration().getSourceRule());
return new DataSourceWrapper(ShardingSphereDataSourceFactory.createDataSource(dataSourceMap, Lists.newArrayList(ruleConfiguration), null));
} catch (SQLException ex) {
......@@ -93,8 +93,8 @@ public abstract class AbstractDataConsistencyChecker implements DataConsistencyC
}
}
protected DataSourceWrapper getDestinationDataSource() {
RuleConfiguration.YamlDataSourceParameter parameter = shardingScalingJob.getScalingConfiguration().getRuleConfiguration().getDestinationDataSources();
protected DataSourceWrapper getTargetDataSource() {
RuleConfiguration.YamlDataSourceParameter parameter = shardingScalingJob.getScalingConfiguration().getRuleConfiguration().getTargetDataSources();
return new DataSourceWrapper(new DataSourceFactory().newInstance(new JDBCDataSourceConfiguration(parameter.getUrl(), parameter.getUsername(), parameter.getPassword())));
}
......
......@@ -29,15 +29,15 @@ public final class DataConsistencyCheckResult {
private final long sourceCount;
private final long destinationCount;
private final long targetCount;
private final boolean countValid;
private boolean dataValid;
public DataConsistencyCheckResult(final long sourceCount, final long destinationCount) {
public DataConsistencyCheckResult(final long sourceCount, final long targetCount) {
this.sourceCount = sourceCount;
this.destinationCount = destinationCount;
countValid = sourceCount == destinationCount;
this.targetCount = targetCount;
countValid = sourceCount == targetCount;
}
}
......@@ -33,9 +33,9 @@ public interface DataSourceConfiguration {
DatabaseType getDatabaseType();
/**
* Get datasource metadata.
* Get data source metadata.
*
* @return datasource metadata.
* @return data source metadata.
*/
DataSourceMetaData getDataSourceMetaData();
}
......@@ -27,11 +27,11 @@ import lombok.Setter;
@Getter
public final class RuleConfiguration {
private String sourceDatasource;
private String sourceDataSource;
private String sourceRule;
private YamlDataSourceParameter destinationDataSources;
private YamlDataSourceParameter targetDataSources;
@Setter
@Getter
......
......@@ -42,27 +42,27 @@ public final class DataSourceManager implements AutoCloseable {
private final Map<DataSourceConfiguration, DataSourceWrapper> cachedDataSources = new ConcurrentHashMap<>();
@Getter
private final Map<DataSourceConfiguration, DataSourceWrapper> sourceDatasources = new ConcurrentHashMap<>();
private final Map<DataSourceConfiguration, DataSourceWrapper> sourceDataSources = new ConcurrentHashMap<>();
public DataSourceManager(final List<SyncConfiguration> syncConfigs) {
createDatasources(syncConfigs);
createDataSources(syncConfigs);
}
private void createDatasources(final List<SyncConfiguration> syncConfigs) {
createSourceDatasources(syncConfigs);
createTargetDatasources(syncConfigs.iterator().next().getImporterConfiguration().getDataSourceConfiguration());
private void createDataSources(final List<SyncConfiguration> syncConfigs) {
createSourceDataSources(syncConfigs);
createTargetDataSources(syncConfigs.iterator().next().getImporterConfiguration().getDataSourceConfiguration());
}
private void createSourceDatasources(final List<SyncConfiguration> syncConfigs) {
private void createSourceDataSources(final List<SyncConfiguration> syncConfigs) {
for (SyncConfiguration syncConfiguration : syncConfigs) {
DataSourceConfiguration dataSourceConfig = syncConfiguration.getDumperConfiguration().getDataSourceConfiguration();
DataSourceWrapper dataSource = dataSourceFactory.newInstance(dataSourceConfig);
cachedDataSources.put(dataSourceConfig, dataSource);
sourceDatasources.put(dataSourceConfig, dataSource);
sourceDataSources.put(dataSourceConfig, dataSource);
}
}
private void createTargetDatasources(final DataSourceConfiguration dataSourceConfig) {
private void createTargetDataSources(final DataSourceConfiguration dataSourceConfig) {
cachedDataSources.put(dataSourceConfig, dataSourceFactory.newInstance(dataSourceConfig));
}
......@@ -99,6 +99,6 @@ public final class DataSourceManager implements AutoCloseable {
}
}
cachedDataSources.clear();
sourceDatasources.clear();
sourceDataSources.clear();
}
}
......@@ -66,7 +66,7 @@ public final class ShardingScalingJobPreparer {
public void prepare(final ShardingScalingJob shardingScalingJob) {
String databaseType = shardingScalingJob.getSyncConfigurations().get(0).getDumperConfiguration().getDataSourceConfiguration().getDatabaseType().getName();
try (DataSourceManager dataSourceManager = new DataSourceManager(shardingScalingJob.getSyncConfigurations())) {
checkDatasources(databaseType, dataSourceManager);
checkDataSources(databaseType, dataSourceManager);
ResumeBreakPointManager resumeBreakPointManager = getResumeBreakPointManager(databaseType, shardingScalingJob);
if (resumeBreakPointManager.isResumable()) {
syncPositionResumer.resumePosition(shardingScalingJob, dataSourceManager, resumeBreakPointManager);
......@@ -86,11 +86,11 @@ public final class ShardingScalingJobPreparer {
return ResumeBreakPointManagerFactory.newInstance(databaseType, String.format("/%s/position/%d", shardingScalingJob.getJobName(), shardingScalingJob.getShardingItem()));
}
private void checkDatasources(final String databaseType, final DataSourceManager dataSourceManager) {
private void checkDataSources(final String databaseType, final DataSourceManager dataSourceManager) {
DataSourceChecker dataSourceChecker = DataSourceCheckerCheckerFactory.newInstanceDataSourceChecker(databaseType);
dataSourceChecker.checkConnection(dataSourceManager.getCachedDataSources().values());
dataSourceChecker.checkPrivilege(dataSourceManager.getSourceDatasources().values());
dataSourceChecker.checkVariable(dataSourceManager.getSourceDatasources().values());
dataSourceChecker.checkPrivilege(dataSourceManager.getSourceDataSources().values());
dataSourceChecker.checkVariable(dataSourceManager.getSourceDataSources().values());
}
private void initInventoryDataTasks(final ShardingScalingJob shardingScalingJob, final DataSourceManager dataSourceManager) {
......
......@@ -35,7 +35,7 @@ public abstract class AbstractDataSourceChecker implements DataSourceChecker {
each.getConnection().close();
}
} catch (final SQLException ex) {
throw new PrepareFailedException("Datasources can't connected!", ex);
throw new PrepareFailedException("Data sources can't connected!", ex);
}
}
}
......@@ -26,23 +26,23 @@ import java.util.Collection;
public interface DataSourceChecker {
/**
* Check datasource connections.
* Check data source connections.
*
* @param dataSources datasource connections
* @param dataSources data sources
*/
void checkConnection(Collection<? extends DataSource> dataSources);
/**
* Check user privileges.
*
* @param dataSources datasource connections
* @param dataSources data sources
*/
void checkPrivilege(Collection<? extends DataSource> dataSources);
/**
* Check datasource variables.
* Check data source variables.
*
* @param dataSources datasource connections
* @param dataSources data sources
*/
void checkVariable(Collection<? extends DataSource> dataSources);
}
......@@ -60,12 +60,12 @@ public final class SyncConfigurationUtil {
*/
public static Collection<SyncConfiguration> toSyncConfigurations(final ScalingConfiguration scalingConfiguration) {
Collection<SyncConfiguration> result = new LinkedList<>();
Map<String, DataSourceConfiguration> sourceDatasource = ConfigurationYamlConverter.loadDataSourceConfigurations(scalingConfiguration.getRuleConfiguration().getSourceDatasource());
Map<String, DataSourceConfiguration> sourceDataSource = ConfigurationYamlConverter.loadDataSourceConfigurations(scalingConfiguration.getRuleConfiguration().getSourceDataSource());
ShardingRuleConfiguration sourceRule = ConfigurationYamlConverter.loadShardingRuleConfiguration(scalingConfiguration.getRuleConfiguration().getSourceRule());
Map<String, Map<String, String>> dataSourceTableNameMap = toDataSourceTableNameMap(sourceRule, sourceDatasource.keySet());
Map<String, Map<String, String>> dataSourceTableNameMap = toDataSourceTableNameMap(sourceRule, sourceDataSource.keySet());
filterByShardingDataSourceTables(dataSourceTableNameMap, scalingConfiguration.getJobConfiguration());
for (Entry<String, Map<String, String>> entry : dataSourceTableNameMap.entrySet()) {
DumperConfiguration dumperConfiguration = createDumperConfiguration(entry.getKey(), sourceDatasource.get(entry.getKey()), entry.getValue());
DumperConfiguration dumperConfiguration = createDumperConfiguration(entry.getKey(), sourceDataSource.get(entry.getKey()), entry.getValue());
ImporterConfiguration importerConfiguration = createImporterConfiguration(scalingConfiguration, sourceRule);
importerConfiguration.setRetryTimes(scalingConfiguration.getJobConfiguration().getRetryTimes());
result.add(new SyncConfiguration(scalingConfiguration.getJobConfiguration().getConcurrency(), dumperConfiguration, importerConfiguration));
......@@ -163,9 +163,9 @@ public final class SyncConfigurationUtil {
private static ImporterConfiguration createImporterConfiguration(final ScalingConfiguration scalingConfiguration, final ShardingRuleConfiguration shardingRuleConfig) {
ImporterConfiguration result = new ImporterConfiguration();
JDBCDataSourceConfiguration importerDataSourceConfiguration = new JDBCDataSourceConfiguration(
scalingConfiguration.getRuleConfiguration().getDestinationDataSources().getUrl(),
scalingConfiguration.getRuleConfiguration().getDestinationDataSources().getUsername(),
scalingConfiguration.getRuleConfiguration().getDestinationDataSources().getPassword());
scalingConfiguration.getRuleConfiguration().getTargetDataSources().getUrl(),
scalingConfiguration.getRuleConfiguration().getTargetDataSources().getUsername(),
scalingConfiguration.getRuleConfiguration().getTargetDataSources().getPassword());
result.setDataSourceConfiguration(importerDataSourceConfiguration);
result.setShardingColumnsMap(toShardingColumnsMap(shardingRuleConfig));
return result;
......
......@@ -59,7 +59,7 @@ public final class AbstractDataConsistencyCheckerTest {
initTableData(shardingScalingJob.getSyncConfigurations().get(0).getImporterConfiguration().getDataSourceConfiguration());
Map<String, DataConsistencyCheckResult> resultMap = dataConsistencyChecker.countCheck();
assertTrue(resultMap.get("t1").isCountValid());
assertThat(resultMap.get("t1").getSourceCount(), is(resultMap.get("t1").getDestinationCount()));
assertThat(resultMap.get("t1").getSourceCount(), is(resultMap.get("t1").getTargetCount()));
}
@SneakyThrows(SQLException.class)
......
......@@ -17,9 +17,9 @@
{
"ruleConfiguration": {
"sourceDatasource": "dataSources:\n ds_0:\n dataSourceClassName: com.zaxxer.hikari.HikariDataSource\n props:\n jdbcUrl: jdbc:h2:mem:test_db_1;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=false;MODE=MySQL\n username: root\n password: 'password'\n connectionTimeout: 30000\n idleTimeout: 60000\n maxLifetime: 1800000\n maxPoolSize: 50\n minPoolSize: 1\n maintenanceIntervalMilliseconds: 30000\n readOnly: false\n",
"sourceDataSource": "dataSources:\n ds_0:\n dataSourceClassName: com.zaxxer.hikari.HikariDataSource\n props:\n jdbcUrl: jdbc:h2:mem:test_db_1;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=false;MODE=MySQL\n username: root\n password: 'password'\n connectionTimeout: 30000\n idleTimeout: 60000\n maxLifetime: 1800000\n maxPoolSize: 50\n minPoolSize: 1\n maintenanceIntervalMilliseconds: 30000\n readOnly: false\n",
"sourceRule": "rules:\n- !SHARDING\n defaultDatabaseStrategy:\n standard:\n shardingAlgorithmName: inline\n shardingColumn: user_id\n tables:\n t1:\n actualDataNodes: ds_0.t1\n keyGenerateStrategy:\n column: order_id\n logicTable: t1\n tableStrategy:\n standard:\n shardingAlgorithmName: inline\n shardingColumn: order_id\n t2:\n actualDataNodes: ds_0.t2\n keyGenerateStrategy:\n column: order_item_id\n logicTable: t2\n tableStrategy:\n standard:\n shardingAlgorithmName: inline\n shardingColumn: order_id",
"destinationDataSources": {
"targetDataSources": {
"name": "dt_0",
"url": "jdbc:h2:mem:test_db_2;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=false;MODE=MySQL",
"username": "root",
......
......@@ -60,8 +60,8 @@ public final class MySQLDataConsistencyChecker extends AbstractDataConsistencyCh
private boolean dataValid(final String actualTableName, final String logicTableName) {
try (DataSourceWrapper sourceDataSource = getSourceDataSource();
DataSourceWrapper destinationDataSource = getDestinationDataSource()) {
return getColumns(actualTableName).stream().allMatch(each -> sumCrc32(sourceDataSource, logicTableName, each) == sumCrc32(destinationDataSource, logicTableName, each));
DataSourceWrapper targetDataSource = getTargetDataSource()) {
return getColumns(actualTableName).stream().allMatch(each -> sumCrc32(sourceDataSource, logicTableName, each) == sumCrc32(targetDataSource, logicTableName, each));
} catch (IOException ex) {
throw new DataCheckFailException(String.format("table %s data check failed.", logicTableName), ex);
}
......
......@@ -67,9 +67,9 @@ public final class MySQLDataSourceChecker extends AbstractDataSourceChecker {
}
}
} catch (final SQLException ex) {
throw new PrepareFailedException("Source datasource check privileges failed.", ex);
throw new PrepareFailedException("Source data source check privileges failed.", ex);
}
throw new PrepareFailedException("Source datasource is lack of REPLICATION SLAVE, REPLICATION CLIENT ON *.* privileges.");
throw new PrepareFailedException("Source data source is lack of REPLICATION SLAVE, REPLICATION CLIENT ON *.* privileges.");
}
private boolean matchPrivileges(final String privilege) {
......@@ -89,7 +89,7 @@ public final class MySQLDataSourceChecker extends AbstractDataSourceChecker {
checkVariable(connection, entry);
}
} catch (final SQLException ex) {
throw new PrepareFailedException("Source datasource check variables failed.", ex);
throw new PrepareFailedException("Source data source check variables failed.", ex);
}
}
......@@ -99,7 +99,7 @@ public final class MySQLDataSourceChecker extends AbstractDataSourceChecker {
resultSet.next();
String value = resultSet.getString(2);
if (!entry.getValue().equalsIgnoreCase(value)) {
throw new PrepareFailedException(String.format("Source datasource required %s = %s, now is %s", entry.getKey(), entry.getValue(), value));
throw new PrepareFailedException(String.format("Source data source required %s = %s, now is %s", entry.getKey(), entry.getValue(), value));
}
}
}
......
......@@ -33,8 +33,6 @@ import java.sql.SQLException;
import java.util.Collection;
import java.util.LinkedList;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
......@@ -42,20 +40,20 @@ import static org.mockito.Mockito.when;
@RunWith(MockitoJUnitRunner.class)
public final class MySQLDataSourceCheckerTest {
@Mock
private Connection connection;
@Mock
private PreparedStatement preparedStatement;
@Mock
private ResultSet resultSet;
private Collection<DataSource> dataSources;
private MySQLDataSourceChecker dataSourceChecker;
@Before
public void setUp() throws SQLException {
DataSource dataSource = mock(DataSource.class);
......@@ -83,24 +81,16 @@ public final class MySQLDataSourceCheckerTest {
verify(preparedStatement, Mockito.times(1)).executeQuery();
}
@Test
@Test(expected = PrepareFailedException.class)
public void assertCheckPrivilegeLackPrivileges() throws SQLException {
when(resultSet.next()).thenReturn(false);
try {
dataSourceChecker.checkPrivilege(dataSources);
} catch (final PrepareFailedException ex) {
assertThat(ex.getMessage(), is("Source datasource is lack of REPLICATION SLAVE, REPLICATION CLIENT ON *.* privileges."));
}
dataSourceChecker.checkPrivilege(dataSources);
}
@Test
@Test(expected = PrepareFailedException.class)
public void assertCheckPrivilegeFailure() throws SQLException {
when(resultSet.next()).thenThrow(new SQLException(""));
try {
dataSourceChecker.checkPrivilege(dataSources);
} catch (final PrepareFailedException ex) {
assertThat(ex.getMessage(), is("Source datasource check privileges failed."));
}
dataSourceChecker.checkPrivilege(dataSources);
}
@Test
......@@ -111,24 +101,16 @@ public final class MySQLDataSourceCheckerTest {
verify(preparedStatement, Mockito.times(2)).executeQuery();
}
@Test
@Test(expected = PrepareFailedException.class)
public void assertCheckVariableWithWrongVariable() throws SQLException {
when(resultSet.next()).thenReturn(true, true);
when(resultSet.getString(2)).thenReturn("OFF", "ROW");
try {
dataSourceChecker.checkVariable(dataSources);
} catch (final PrepareFailedException ex) {
assertThat(ex.getMessage(), is("Source datasource required LOG_BIN = ON, now is OFF"));
}
dataSourceChecker.checkVariable(dataSources);
}
@Test
@Test(expected = PrepareFailedException.class)
public void assertCheckVariableFailure() throws SQLException {
when(resultSet.next()).thenThrow(new SQLException(""));
try {
dataSourceChecker.checkVariable(dataSources);
} catch (final PrepareFailedException ex) {
assertThat(ex.getMessage(), is("Source datasource check variables failed."));
}
dataSourceChecker.checkVariable(dataSources);
}
}
......@@ -41,13 +41,13 @@ public final class PostgreSQLDataSourceChecker extends AbstractDataSourceChecker
if (tables.next()) {
tableName = tables.getString(3);
} else {
throw new PrepareFailedException("No tables find in the source datasource");
throw new PrepareFailedException("No tables find in the source data source");
}
connection.prepareStatement(String.format("SELECT * FROM %s LIMIT 1", tableName)).executeQuery();
}
}
} catch (final SQLException ex) {
throw new PrepareFailedException("Datasources check failed!", ex);
throw new PrepareFailedException("Data sources privilege check failed!", ex);
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册