现在在mysql上可以分表,修改为openGauss后一直报表不存在,可能是什么原因导致的呢
可以看下这篇博客: 如何利用shardingSphere-proxy搭建openGauss分布式环境 | openGauss社区
关注以下配置文件:
config-sharding.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
######################################################################################################
#
# Here you can configure the rules for the proxy.
# This example is configuration of sharding rule.
#
######################################################################################################
#
databaseName: test_db
dataSources:
ds_0:
url: jdbc:postgresql://x.x.x.x:5432/test_db
username: gaussdb
password: test@123
connectionTimeoutMilliseconds: 300000
idleTimeoutMilliseconds: 600000
maxLifetimeMilliseconds: 1800000
maxPoolSize: 4200
minPoolSize: 50
ds_1:
url: jdbc:postgresql://192.168.0.182:15400/test_db
username: gaussdb
password: test@123
connectionTimeoutMilliseconds: 30000
idleTimeoutMilliseconds: 60000
maxLifetimeMilliseconds: 1800000
maxPoolSize: 4200
minPoolSize: 50
rules:
- !SHARDING
tables:
sbtest1:
actualDataNodes: ds_0.sbtest1
sbtest2:
actualDataNodes: ds_0.sbtest2
sbtest3:
actualDataNodes: ds_0.sbtest3
sbtest4:
actualDataNodes: ds_0.sbtest4
sbtest5:
actualDataNodes: ds_0.sbtest5
sbtest6:
actualDataNodes: ds_0.sbtest6
sbtest7:
actualDataNodes: ds_0.sbtest7
sbtest8:
actualDataNodes: ds_0.sbtest8
sbtest9:
actualDataNodes: ds_0.sbtest9
sbtest10:
actualDataNodes: ds_0.sbtest10
sbtest11:
actualDataNodes: ds_0.sbtest11
sbtest12:
actualDataNodes: ds_0.sbtest12
sbtest13:
actualDataNodes: ds_0.sbtest13
sbtest14:
actualDataNodes: ds_0.sbtest14
sbtest15:
actualDataNodes: ds_0.sbtest15
sbtest16:
actualDataNodes: ds_0.sbtest16
sbtest17:
actualDataNodes: ds_0.sbtest17
sbtest18:
actualDataNodes: ds_0.sbtest18
sbtest19:
actualDataNodes: ds_0.sbtest19
sbtest20:
actualDataNodes: ds_0.sbtest20
sbtest21:
actualDataNodes: ds_0.sbtest21
sbtest22:
actualDataNodes: ds_0.sbtest22
sbtest23:
actualDataNodes: ds_0.sbtest23
sbtest24:
actualDataNodes: ds_0.sbtest24
sbtest25:
actualDataNodes: ds_0.sbtest25
sbtest26:
actualDataNodes: ds_0.sbtest26
sbtest27:
actualDataNodes: ds_0.sbtest27
sbtest28:
actualDataNodes: ds_0.sbtest28
sbtest29:
actualDataNodes: ds_0.sbtest29
sbtest30:
actualDataNodes: ds_0.sbtest30
sbtest31:
actualDataNodes: ds_0.sbtest31
sbtest32:
actualDataNodes: ds_0.sbtest32
sbtest33:
actualDataNodes: ds_1.sbtest33
sbtest34:
actualDataNodes: ds_1.sbtest34
sbtest35:
actualDataNodes: ds_1.sbtest35
sbtest36:
actualDataNodes: ds_1.sbtest36
sbtest37:
actualDataNodes: ds_1.sbtest37
sbtest38:
actualDataNodes: ds_1.sbtest38
sbtest39:
actualDataNodes: ds_1.sbtest39
sbtest40:
actualDataNodes: ds_1.sbtest40
sbtest41:
actualDataNodes: ds_1.sbtest41
sbtest42:
actualDataNodes: ds_1.sbtest42
sbtest43:
actualDataNodes: ds_1.sbtest43
sbtest44:
actualDataNodes: ds_1.sbtest44
sbtest45:
actualDataNodes: ds_1.sbtest45
sbtest46:
actualDataNodes: ds_1.sbtest46
sbtest47:
actualDataNodes: ds_1.sbtest47
sbtest48:
actualDataNodes: ds_1.sbtest48
sbtest49:
actualDataNodes: ds_1.sbtest49
sbtest50:
actualDataNodes: ds_1.sbtest50
sbtest51:
actualDataNodes: ds_1.sbtest51
sbtest52:
actualDataNodes: ds_1.sbtest52
sbtest53:
actualDataNodes: ds_1.sbtest53
sbtest54:
actualDataNodes: ds_1.sbtest54
sbtest55:
actualDataNodes: ds_1.sbtest55
sbtest56:
actualDataNodes: ds_1.sbtest56
sbtest57:
actualDataNodes: ds_1.sbtest57
sbtest58:
actualDataNodes: ds_1.sbtest58
sbtest59:
actualDataNodes: ds_1.sbtest59
sbtest60:
actualDataNodes: ds_1.sbtest60
sbtest61:
actualDataNodes: ds_1.sbtest61
sbtest62:
actualDataNodes: ds_1.sbtest62
sbtest63:
actualDataNodes: ds_1.sbtest63
sbtest64:
actualDataNodes: ds_1.sbtest64
# keyGenerators:
# snowflake:
# type: SNOWFLAKE
#
# scalingName: default_scaling
# scaling:
# default_scaling:
# input:
# workerThread: 40
# batchSize: 1000
# output:
# workerThread: 40
# batchSize: 1000
# streamChannel:
# type: MEMORY
# props:
# block-queue-size: 10000
# completionDetector:
# type: IDLE
# props:
# incremental-task-idle-seconds-threshold: 1800
# dataConsistencyChecker:
# type: DATA_MATCH
# props:
# chunk-size: 1000
######################################################################################################
#
# If you want to connect to MySQL, you should manually copy MySQL driver to lib directory.
#
######################################################################################################
#databaseName: sharding_db
#
#dataSources:
# ds_0:
# url: jdbc:mysql://127.0.0.1:3306/demo_ds_0?serverTimezone=UTC&useSSL=false
# username: root
# password:
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
# ds_1:
# url: jdbc:mysql://127.0.0.1:3306/demo_ds_1?serverTimezone=UTC&useSSL=false
# username: root
# password:
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
#
#rules:
#- !SHARDING
# tables:
# t_order:
# actualDataNodes: ds_${0..1}.t_order_${0..1}
# tableStrategy:
# standard:
# shardingColumn: order_id
# shardingAlgorithmName: t_order_inline
# keyGenerateStrategy:
# column: order_id
# keyGeneratorName: snowflake
# t_order_item:
# actualDataNodes: ds_${0..1}.t_order_item_${0..1}
# tableStrategy:
# standard:
# shardingColumn: order_id
# shardingAlgorithmName: t_order_item_inline
# keyGenerateStrategy:
# column: order_item_id
# keyGeneratorName: snowflake
# bindingTables:
# - t_order,t_order_item
# defaultDatabaseStrategy:
# standard:
# shardingColumn: user_id
# shardingAlgorithmName: database_inline
# defaultTableStrategy:
# none:
#
# shardingAlgorithms:
# database_inline:
# type: INLINE
# props:
# algorithm-expression: ds_${user_id % 2}
# t_order_inline:
# type: INLINE
# props:
# algorithm-expression: t_order_${order_id % 2}
# t_order_item_inline:
# type: INLINE
# props:
# algorithm-expression: t_order_item_${order_id % 2}
#
# keyGenerators:
# snowflake:
# type: SNOWFLAKE
#
# scalingName: default_scaling
# scaling:
# default_scaling:
# input:
# workerThread: 40
# batchSize: 1000
# output:
# workerThread: 40
# batchSize: 1000
# streamChannel:
# type: MEMORY
# props:
# block-queue-size: 10000
# completionDetector:
# type: IDLE
# props:
# incremental-task-idle-seconds-threshold: 1800
# dataConsistencyChecker:
# type: DATA_MATCH
# props:
# chunk-size: 1000
config-sharding.yaml文件中配置shardingsphere-proxy连接openGauss的连接信息和分库分表策略。上面配置文件中配置的是双节点的架构,一个数据库,每个节点32张表。上面配置策略有优化空间。