部署准备
[root@rocky8 ~]# cat /etc/redhat-release Rocky Linux release 8.7 (Green Obsidian) [root@centos7 ~]# free -h total used free shared buff/cache available Mem: 1.8G 301M 1.3G 16M 227M 1.3G Swap: 2.0G 0B 2.0G [root@centos7 ~]# lscpu Architecture: x86_64 CPU op-mode(s): 32-bit, 64-bit Byte Order: Little Endian CPU(s): 2 On-line CPU(s) list: 0,1 Thread(s) per core: 1 Core(s) per socket: 2 Socket(s): 1 NUMA node(s): 1 Vendor ID: GenuineIntel CPU family: 6 Model: 183 Model name: 13th Gen Intel(R) Core(TM) i9-13900HX Stepping: 1 CPU MHz: 2419.202 BogoMIPS: 4838.40 Hypervisor vendor: KVM Virtualization type: full L1d cache: 48K L1i cache: 32K L2 cache: 2048K L3 cache: 36864K NUMA node0 CPU(s): 0,1 Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single rsb_ctxsw fsgsbase bmi1 avx2 bmi2 invpcid rdseed clflushopt md_clear flush_l1d arch_capabilities
下载文件
wget https://mirrors.aliyun.com/oceanbase/community/stable/el/8/x86_64/oceanbase-ce-3.1.5-100020022023091114.el8.x86_64.rpm wget https://mirrors.aliyun.com/oceanbase/community/stable/el/8/x86_64/oceanbase-ce-libs-3.1.5-100020022023091114.el8.x86_64.rpm wget https://mirrors.aliyun.com/oceanbase/community/stable/el/8/x86_64/obproxy-ce-3.2.3.5-2.el8.x86_64.rpm wget https://mirrors.aliyun.com/oceanbase/community/stable/el/8/x86_64/obclient-2.2.3-1.el8.x86_64.rpm
手动部署过程
参数修改
- 内核参数修改
vim /etc/sysctl.conf
net.core.somaxconn = 2048
net.core.netdev_max_backlog = 10000
net.core.rmem_default = 16777216
net.core.wmem_default = 16777216
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.ip_local_port_range = 3500 65535
net.ipv4.ip_forward = 0
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.tcp_syncookies = 0
net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_wmem = 4096 65536 16777216
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_fin_timeout = 15
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_slow_start_after_idle=0
vm.swappiness = 0
vm.min_free_kbytes = 2097152
vm.max_map_count=655360
fs.aio-max-nr=1048576
#让配置生效
sysctl -p
- 修改limit.conf
vi /etc/security/limits.conf * soft nofile 655360 * hard nofile 655360 * soft nproc 655360 * hard nproc 655360 * soft core unlimited * hard core unlimited * soft stack unlimited * hard stack unlimited
- 关闭防火墙
systemctl disable firewalld systemctl stop firewalld systemctl status firewalld
- 关闭 SELinux
vi /etc/selinux/config
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - No SELinux policy is loaded.
SELINUX=disabled
- 配置时间同步服务
yum -y install chrony
#查看时间同步活动
chronyc activity
#查看时间服务器
chronyc sources
#查看同步状态
chronyc sources -v
#校准时间服务器:
chronyc tracking
- 时区设置
timedatectl set-timezone Asia/Shanghai
chronyc -a makestep
- 安装用户
# 新增普通用户 admin
useradd admin
# 改用户密码
passwd admin
# 或下面命令指定密码,密码修改为自己的。
echo 'admin:ob_PWD123' | chpasswd
- 配置sudo
# 如果sudo 不存在,就安装 sudo
yum install -y sudo
# 方法一:admin 加到用户组 wheel 里。
[root@rocky8 ~]# usermod admin -G wheel
[root@rocky8 ~]# id admin
uid=1000(admin) gid=1000(admin) groups=1000(admin),10(wheel)
# 方法二:admin 添加到 /etc/sudoers 文件中
[root@rocky8 ~]# cat /etc/sudoers |grep wheel
## Allows people in group wheel to run all commands
%wheel ALL=(ALL) ALL
# %wheel ALL=(ALL) NOPASSWD: ALL
vim /etc/sudoers
## Allow root to run any commands anywhere
admin ALL=(ALL) ALL
- SSH 免密登录
# su - admin
[admin@rocky8 ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/admin/.ssh/id_rsa):
Created directory '/home/admin/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/admin/.ssh/id_rsa.
Your public key has been saved in /home/admin/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:ir5lXhFUd/Z51CDB8uiNx2fv5SWzuVABiTftczwAqjk admin@rocky8.localdomain
The key's randomart image is:
+---[RSA 3072]----+
| ...+*++.o|
| . +.=*.oo|
| .. = oooo|
| o.. . oo+|
| ES. + .o.|
| . ...o +.o |
| . + . ..oo.o|
| . + . . *+|
| o.. +oo|
+----[SHA256]-----+
[admin@rocky8 ~]$
- 磁盘文件系统
yum install -y tree
执行安装observer程序包
切换到admin用户
[root@rocky8 ~]# mkdir /data
[root@rocky8 ~]# chown admin.admin /data
[root@rocky8 ~]# su - admin
[admin@rocky8 ~]$ sudo rpm -ivh oceanbase-ce-libs-3*.x86_64.rpm
[admin@rocky8 ~]$ sudo rpm -ivh oceanbase-ce-3*.el8.x86_64.rpm
[admin@rocky8 ~]$ sudo rpm -ivh obclient*.el8.x86_64.rpm
[admin@rocky8 ~]$ sudo chown -R admin.admin oceanbase
[admin@rocky8 ~]$ tree oceanbase
oceanbase
├── bin
│ ├── import_time_zone_info.py
│ └── observer
├── etc
│ ├── oceanbase_upgrade_dep.yml
│ ├── priv_checker.py
│ ├── timezone_V1.log
│ ├── upgrade_checker.py
│ ├── upgrade_cluster_health_checker.py
│ ├── upgrade_post_checker.py
│ ├── upgrade_post.py
│ ├── upgrade_pre.py
│ ├── upgrade_rolling_post.py
│ └── upgrade_rolling_pre.py
└── lib
├── libaio.so -> libaio.so.1.0.1
├── libaio.so.1 -> libaio.so.1.0.1
├── libaio.so.1.0.1
├── libmariadb.so -> libmariadb.so.3
└── libmariadb.so.3
3 directories, 17 files
将环境变量加入.bash_profile文件中
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:~/oceanbase/lib' >> ~/.bash_profile
. ~/.bash_profile
初始化数据目录并查看执行完成后的目录结构
[admin@rocky8 ~]$ mkdir -p /data/{observer01,observer02,observer03,obproxy} [admin@rocky8 ~]$ mkdir -p /data/observer{01,02,03}/store/{sort_dir,sstable,clog,ilog,slog} [admin@rocky8 ~]$ tree /data /data ├── obproxy ├── observer01 │ └── store │ ├── clog │ ├── ilog │ ├── slog │ ├── sort_dir │ └── sstable ├── observer02 │ └── store │ ├── clog │ ├── ilog │ ├── slog │ ├── sort_dir │ └── sstable └── observer03 └── store ├── clog ├── ilog ├── slog ├── sort_dir └── sstable 22 directories, 0 files [admin@rocky8 ~]$ cd /data/observer01/ && /home/admin/oceanbase/bin/observer -r "192.168.56.210:2882:2881;192.168.56.210:3882:3881;192.168.56.210:4882:4881" -o __min_full_resource_pool_memory=268435456,memory_limit=8G,system_memory=4G,stack_size=512K,cpu_count=16,cache_wash_threshold=1G,workers_per_cpu_quota=10,schema_history_expire_time=1d,net_thread_count=4,sys_bkgd_migration_retry_num=3,minor_freeze_times=10,enable_separate_sys_clog=0,enable_merge_by_turn=False,datafile_size=30G,enable_syslog_recycle=True,max_syslog_file_count=10 -z zone1 -n myob -p 2881 -P 2882 -c 1 -d /data/observer01/store -i eth1 -l ERROR /home/admin/oceanbase/bin/observer -r 192.168.56.210:2882:2881;192.168.56.210:3882:3881;192.168.56.210:4882:4881 -o __min_full_resource_pool_memory=268435456,memory_limit=8G,system_memory=4G,stack_size=512K,cpu_count=16,cache_wash_threshold=1G,workers_per_cpu_quota=10,schema_history_expire_time=1d,net_thread_count=4,sys_bkgd_migration_retry_num=3,minor_freeze_times=10,enable_separate_sys_clog=0,enable_merge_by_turn=False,datafile_size=30G,enable_syslog_recycle=True,max_syslog_file_count=10 -z zone1 -n myob -p 2881 -P 2882 -c 1 -d /data/observer01/store -i eth1 -l ERROR rs list: 192.168.56.210:2882:2881;192.168.56.210:3882:3881;192.168.56.210:4882:4881 optstr: __min_full_resource_pool_memory=268435456,memory_limit=8G,system_memory=4G,stack_size=512K,cpu_count=16,cache_wash_threshold=1G,workers_per_cpu_quota=10,schema_history_expire_time=1d,net_thread_count=4,sys_bkgd_migration_retry_num=3,minor_freeze_times=10,enable_separate_sys_clog=0,enable_merge_by_turn=False,datafile_size=30G,enable_syslog_recycle=True,max_syslog_file_count=10 zone: zone1 appname: myob mysql port: 2881 rpc port: 2882 cluster id: 1 data_dir: /data/observer01/store devname: eth1 log level: INFO [admin@rocky8 observer01]$ cd /data/observer02/ && /home/admin/oceanbase/bin/observer -r "192.168.56.210:2882:2881;192.168.56.210:3882:3881;192.168.56.210:4882:4881" -o __min_full_resource_pool_memory=268435456,memory_limit=8G,system_memory=4G,stack_size=512K,cpu_count=16,cache_wash_threshold=1G,workers_per_cpu_quota=10,schema_history_expire_time=1d,net_thread_count=4,sys_bkgd_migration_retry_num=3,minor_freeze_times=10,enable_separate_sys_clog=0,enable_merge_by_turn=False,datafile_size=30G,enable_syslog_recycle=True,max_syslog_file_count=10 -z zone2 -n myob -p 3881 -P 3882 -c 1 -d /data/observer02/store -i eth1 -l ERROR [admin@rocky8 observer02]$ cd /data/observer03/ && /home/admin/oceanbase/bin/observer -r "192.168.56.210:2882:2881;192.168.56.210:3882:3881;192.168.56.210:4882:4881" -o __min_full_resource_pool_memory=268435456,memory_limit=8G,system_memory=4G,stack_size=512K,cpu_count=16,cache_wash_threshold=1G,workers_per_cpu_quota=10,schema_history_expire_time=1d,net_thread_count=4,sys_bkgd_migration_retry_num=3,minor_freeze_times=10,enable_separate_sys_clog=0,enable_merge_by_turn=False,datafile_size=30G,enable_syslog_recycle=True,max_syslog_file_count=10 -z zone3 -n myob -p 4881 -P 4882 -c 1 -d /data/observer03/store -i eth1 -l ERROR
确认服务启动集群
[admin@rocky8 observer03]$ ps -ef |grep obs
admin 1473 1 99 15:49 ? 00:11:24 /home/admin/oceanbase/bin/observer -r 192.168.56.210:2882:2881;192.168.56.210:3882:3881;192.168.56.210:4882:4881 -o __min_full_resource_pool_memory=268435456,memory_limit=8G,system_memory=4G,stack_size=512K,cpu_count=16,cache_wash_threshold=1G,workers_per_cpu_quota=10,schema_history_expire_time=1d,net_thread_count=4,sys_bkgd_migration_retry_num=3,minor_freeze_times=10,enable_separate_sys_clog=0,enable_merge_by_turn=False,datafile_size=30G,enable_syslog_recycle=True,max_syslog_file_count=10 -z zone1 -p 2881 -P 2882 -c 1 -d /data/observer01/store -i eth1 -l ERROR
admin 1741 1 99 15:49 ? 00:05:22 /home/admin/oceanbase/bin/observer -r 192.168.56.210:2882:2881;192.168.56.210:3882:3881;192.168.56.210:4882:4881 -o __min_full_resource_pool_memory=268435456,memory_limit=8G,system_memory=4G,stack_size=512K,cpu_count=16,cache_wash_threshold=1G,workers_per_cpu_quota=10,schema_history_expire_time=1d,net_thread_count=4,sys_bkgd_migration_retry_num=3,minor_freeze_times=10,enable_separate_sys_clog=0,enable_merge_by_turn=False,datafile_size=30G,enable_syslog_recycle=True,max_syslog_file_count=10 -z zone2 -p 3881 -P 3882 -c 1 -d /data/observer02/store -i eth1 -l ERROR
admin 2056 1 99 15:50 ? 00:03:32 /home/admin/oceanbase/bin/observer -r 192.168.56.210:2882:2881;192.168.56.210:3882:3881;192.168.56.210:4882:4881 -o __min_full_resource_pool_memory=268435456,memory_limit=8G,system_memory=4G,stack_size=512K,cpu_count=16,cache_wash_threshold=1G,workers_per_cpu_quota=10,schema_history_expire_time=1d,net_thread_count=4,sys_bkgd_migration_retry_num=3,minor_freeze_times=10,enable_separate_sys_clog=0,enable_merge_by_turn=False,datafile_size=30G,enable_syslog_recycle=True,max_syslog_file_count=10 -z zone3 -p 4881 -P 4882 -c 1 -d /data/observer03/store -i eth1 -l INFO
admin 2996 1430 0 15:52 pts/0 00:00:00 grep --color=auto obs
[admin@rocky8 observer03]$ netstat -ntlp|grep ob
(Not all processes could be identified, non-owned process info
will not be shown, you would have to be root to see it all.)
tcp 0 0 0.0.0.0:2881 0.0.0.0:* LISTEN 1473/observer
tcp 0 0 0.0.0.0:2882 0.0.0.0:* LISTEN 1473/observer
tcp 0 0 0.0.0.0:3881 0.0.0.0:* LISTEN 1741/observer
tcp 0 0 0.0.0.0:3882 0.0.0.0:* LISTEN 1741/observer
tcp 0 0 0.0.0.0:4881 0.0.0.0:* LISTEN 2056/observer
tcp 0 0 0.0.0.0:4882 0.0.0.0:* LISTEN 2056/observer
启动observer进程:
cd ~/oceanbase && bin/observer -i eth1 -p 2881 -P 2882 -z zone1 -d ~/oceanbase/store/obdemo -r '192.168.56.210:2882:2881' -c 20211207 -n obdemo -o "memory_limit=8G,cache_wash_threshold=1G,__min_full_resource_pool_memory=268435456,system_memory=3G,memory_chunk_cache_size=128M,cpu_count=16,net_thread_count=4,stack_size=1536K,config_additional_dir=/data/obdemo/etc3;/redo/obdemo/etc2" -d ~/oceanbase/store/obdemo
配置租户
[admin@localhost ~]$ obclient -h 192.168.56.210 -u root -P 2881 -c -A
登录数据库并进行集群bootstrap命令汇总
obclient -h192.168.56.210 -u root -P 2881 -c -A -pobAdmin123
set session ob_query_timeout=1000000000;
alter system bootstrap ZONE 'zone1' SERVER '192.168.56.210:2882' , ZONE 'zone2' SERVER '192.168.56.210:3882' , ZONE 'zone3' SERVER '192.168.56.210:4882';
alter user root identified by 'obAdmin123'; --设置系统租户sys密码
create user proxyro identified by 'obAdmin123'; --创建obproxy用户
grant all privileges on *.* to proxyro@'%';
检查当前节点是否正常提供服务
$ obclient -h 192.168.56.210 -u root@sys -P 2881 -pobAdmin123 -c -A oceanBase
SELECT status,start_service_time FROM OCEANBASE.__all_server;
创建MySQL租户
登录数据库
$ obclient -h 192.168.56.210 -u root@sys -P 2881 -pobAdmin123 -c -A oceanBase
创建资源
--创建资源单元 S4C1G
alter resource unit sys_unit_config min_cpu=2;
create resource unit S4C1G max_cpu=4,min_cpu=1,max_memory='1G',min_memory='1G',max_iops=1000,min_iops=1000,max_session_num=1000000,max_disk_size='10G';
--创建资源池 poo1
create resource pool poo1 unit='S4C1G' ,unit_num=1;
--创建租户 mysqlTest
create tenant mysqlTest resource_pool_list=('poo1'),primary_zone='RANDOM',charset='utf8' set ob_tcp_invited_nodes='%',ob_compatibility_mode='mysql';
创建数据库
--使用mysql直连
mysql -h127.1 -uroot@sys -P2881 -A -pobAdmin123
--使用obclient 登录数据库
obclient -uroot@mysqlTest -h127.0.0.1 -P2881
--创建数据库、表
create database testdb;
use testdb;
创建业务用户
obclient [testdb]> create user user1@'%' identified by 'user1' ;
Query OK, 0 rows affected (0.093 sec)
obclient [testdb]> grant all privileges on *.* to user1@'%';
Query OK, 0 rows affected (0.067 sec)
建业务表
# 使用业务用户 user1 登录 mysqlTest 租户 通过直连方式
[root@f454e5952328 ~]# obclient -h127.0.0.1 -uuser1@mysqlTest -P2881 -puser1 -c -A testdb
Welcome to the OceanBase. Commands end with ; or \g.
Your OceanBase connection id is 3221488182
Server version: 5.7.25 OceanBase 3.1.4 (r10000092022071511-b4bfa011ceaef428782dcb65ae89190c40b78c2f) (Built Jul 15 2022 11:45:14)
Copyright (c) 2000, 2022, OceanBase and/or its affiliates. All rights reserved.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
obclient [testdb]> create table t1(id int) ;
Query OK, 0 rows affected (0.047 sec)
obclient [testdb]> insert into t1 values(1) ;
Query OK, 1 row affected (0.008 sec)
obclient [testdb]> select * from t1 ;
+------+
| id |
+------+
| 1 |
+------+
1 row in set (0.001 sec)
obclient [testdb]>
参考:通过 OBClient 连接 OceanBase 租户-OceanBase 数据库-OceanBase文档中心-分布式数据库使用文档
obproxy安装
OBPROXY 用户(proxyro)密码默认OBPROXY 连接 OceanBase 集群使用用户 proxyro 。这个用户不存在,需要创建
grant select on oceanbase.* to proxyro identified by 'pobProxy123';
CREATE USER proxyro IDENTIFIED BY password 'pobProxy123';
查看集群状态
select * from __all_server;
部署obproxy:
rpm -ivh obproxy-ce-3.2.3.5-2.el8.x86_64.rpm
cd ~/obproxy-3.2.3.5 && bin/obproxy -r "192.168.56.210:2881;192.168.56.210:4881;192.168.56.210:4881" -p 2883 -o "enable_strict_kernel_release=false,enable_cluster_checkout=false,enable_metadb_used=false" -c myob
#启动obproxy,其中-c对应集群的名称,和前面的observer的启动参数-n对应。
cd ~/obproxy-3.2.3.5 && bin/obproxy -r "192.168.56.210:2881" -p 2883 -o "enable_strict_kernel_release=false,enable_cluster_checkout=false,enable_metadb_used=false" -c myob
## obproxy默认会监听2个端口:2883 和 2884。
[admin@rocky8 bin]$ netstat -ntlp | grep obproxy
(Not all processes could be identified, non-owned process info
will not be shown, you would have to be root to see it all.)
tcp 0 0 0.0.0.0:2883 0.0.0.0:* LISTEN 7134/bin/obproxy
tcp 0 0 0.0.0.0:2884 0.0.0.0:* LISTEN 7134/bin/obproxy
[admin@rocky8 obproxy-3.2.3.5]$ ps -ef|grep obproxy |grep -v grep
admin 7134 1 6 20:02 ? 00:00:05 bin/obproxy -r 192.168.56.210:2881;192.168.56.210:3881;192.168.56.210:4881 -p 2883 -o enable_strict_kernel_release=false,enable_cluster_checkout=false,enable_metadb_used=false -c myob
创建OBProxy专用用户
OBProxy有两个账号:
root@proxysys账号:OBProxy的管理员账号,每个OBProxy进程的管理员账号相互独立,初始密码为空。首次登陆,需要参考如下命令设置密码。
# 以root@proxysys账号登录OBProxy,修改root@proxysys的密码
mysql -h127.0.0.1 -P2883 -uroot@proxysys
mysql> alter proxyconfig set obproxy_sys_password = 'admin12345';
(1)任一节点下,登陆OB集群sys租户,创建obproxy的内部proxyro用户,并设置密码
mysql -h127.1 -uroot@sys -P2881 -pobAdmin123 -A -Doceanbase
mysql> create user if not exists proxyro identified by 'admin123123';
mysql> grant select on *.* to proxyro;
(2). 以obproxy管理员账号root@proxysys登录OBProxy,设置proxyro@sys的密码为上一步OB集群创建的proxyro用户的密码
mysql -h127.0.0.1 -P2883 -uroot@proxysys -p'admin12345'
mysql> alter proxyconfig set observer_sys_password = 'admin123123';
proxyro@sys账号:OBProxy访问OB集群的账号,OBProxy需要跟OceanBase集群保持通信,首先在OB集群创建proxyro@sys用户,并设置密码,然后在OBProxy中修改observer_sys_password配置项为这个密码,即打通了OBProxy和OB集群的连通。 步骤如下:
登录obproxy修改密码
## root@proxysys初始密码为空
mysql -h 192.168.56.210 -u root@proxysys -P 2883 -pobProxy123
show proxyconfig like '%sys_password%';
alter proxyconfig set obproxy_sys_password='obAdmin123';
show proxyconfig like '%sys_password%';
## 修改 OBPROXY 连接 OceanBase 集群用户 proxyro 的密码
show proxyconfig like '%sys_password%';
alter proxyconfig set observer_sys_password='obAdmin123';
show proxyconfig like '%sys_password%';
obclient -h 192.168.56.210 -uproxyro@sys -P2883 -pobAdmin123 -c -A oceanbase
检查部署情况
通过 OBPROXY 连接 OceanBase 集群进行检查确认
mysql -h192.168.56.210 -umyob:sys:root -P2883 -pobAdmin123 -c -A oceanbase
或
mysql -h192.168.56.210 -uroot@sys#myob -P2883 -pobAdmin123 -c -A oceanbase
select * from oceanbase.__all_server;
show full processlist;
配置obproxy参数
$ mysql -h192.168.56.210 -uroot@sys#myob -P2883 -pobProxy123 -c -A oceanbase
Enter password:
下面是obproxy的一些参数配置用于减少运行日志量,根据实际情况修改。
show proxyconfig like '%compress%';
alter proxyconfig set enable_metadb_used=False;
alter proxyconfig set enable_proxy_scramble=True;
alter proxyconfig set proxy_mem_limited=2G;
alter proxyconfig set log_dir_size_threshold=10G;
alter proxyconfig set slow_proxy_process_time_threshold='1000ms';
alter proxyconfig set xflush_log_level=ERROR;
alter proxyconfig set syslog_level=WARN;
alter proxyconfig set enable_compression_protocol=false;
使用OBProxy连接ob集群
通过OBProxy连接OceanBase集群的命令有两种格式,区别在于用户名的格式。如:[用户名]@[租户名]#[集群名] 或 [集群名]:[租户名]:[用户名]。
mysql -h192.168.56.210 -umyob:sys:root -P2883 -pobAdmin123 -c -A oceanbase
或
mysql -h192.168.56.210 -uroot@sys#myob -P2883 -pobAdmin123 -c -A oceanbase
参考:
https://open.oceanbase.com/blog/8600144
https://open.oceanbase.com/blog/8600171
obproxy的安装、配置和使用 - 墨天轮 (modb.pro)
使用obproxy连接OceanBase数据库,报错ERROR 2013 (HY000)要怎么办?-数据库技术博客-OceanBase分布式数据库
https://www.modb.pro/db/28977




