暂无图片
暂无图片
2
暂无图片
暂无图片
暂无图片

MogDB v2.1.1 标准安装

1.环境准备(主备)

1.1.安装规划

IP port localport localheartbeatport localservice
192.168.11.5 51000 51001 51005 51004
192.168.11.6 51000 51001 51005 51004
192.168.11.7 51000 51001 51005 51004

1.2.配置hosts文件

# echo "191.168.11.5 mogdb1
191.168.11.6 mogdb2
191.168.11.7 mogdb3">>/etc/hosts

1.3.关闭防火墙及selinux


# systemctl status firewalld.service
# systemctl disable firewalld.service
# systemctl stop firewalld.service
# systemctl list-unit-files firewalld.service

查看selinux状态,如果启动,修改为disabled
# getenforce
# sed -i s/SELINUX=enforcing/SELINUX=disabled/g /etc/selinux/config
# setenforce 0

1.4.设置字符集参数

# echo "export LANG=en_US.UTF-8">>/etc/profile
# source /etc/profile
# echo $LANG

1.5.检查时区

# ll /etc/localtime
lrwxrwxrwx. 1 root root 35 Mar 10 14:49 /etc/localtime -> ../usr/share/zoneinfo/Asia/Shanghai

1.6.关闭RemoveIPC

# echo "RemoveIPC=no">>/etc/systemd/logind.conf
# systemctl daemon-reload
# systemctl restart systemd-logind
# loginctl show-session | grep RemoveIPC
# systemctl show systemd-logind | grep RemoveIPC

1.7.修改网卡rx/tx

# ethtool -g ens224|grep -A 4 "Pre-set maximums"
# ethtool -G ens224 rx 4096 
# ethtool -G ens224 tx 1024

1.8.关闭透明大页

# echo never > /sys/kernel/mm/transparent_hugepage/enabled
# echo never > /sys/kernel/mm/transparent_hugepage/defrag
# echo "echo never > /sys/kernel/mm/transparent_hugepage/enabled
echo never > /sys/kernel/mm/transparent_hugepage/defrag">>/etc/rc.local
# chmod +x /etc/rc.local

1.9.sudo权限配置

# echo "omm     ALL=(ALL)       NOPASSWD: /usr/sbin/ifconfig
omm     ALL=(ALL)       NOPASSWD: /usr/bin/systemctl">>/etc/sudoers

1.10.配置root登录

# echo "PermitRootLogin yes">>/etc/ssh/sshd_config
# systemctl restart sshd

1.11.安装python 3.6+

# tar -zxvf Python-3.6.9.tgz
# cd Python-3.6.9
# ./configure --prefix=/usr/local/python3 --enable-shared CFLAGS=-fPIC && make && make install
# ln -s /usr/local/python3/bin/python3 /usr/bin/python3
# ln -s /usr/local/python3/bin/pip3 /usr/bin/pip3
# cp /usr/local/python3/lib/libpython3.6m.so.1.0 /usr/lib64
# python3 -V

1.12.安装依赖包

# yum install -y bzip2 libaio-devel flex bison ncurses-devel glibc-devel libxml2-devel patch redhat-lsb-core unzip gcc gcc-c++ perl openssl-devel libffi-devel libtool zlib-devel 

1.13.配置ssh互信

每个节点执行:
# ssh-keygen -t rsa
# ssh-copy-id root@192.168.11.5
主节点执行:
# scp ~/.ssh/authorized_keys 192.168.11.6:/root/.ssh
# scp ~/.ssh/authorized_keys 192.168.11.7:/root/.ssh

1.14.创建用户

# groupadd dbgrp -g 2000
# useradd omm -g 2000 -u 2000
# echo omm:omm|chpasswd

1.15.创建安装目录

# mkdir -p /dbdata/{arch,tbl,dbback}
# chown -R omm: /dbdata
# chmod 755 /dbdata

1.16.重启操作系统

# reboot

2.安装mogdb1主2备(主)

2.1.创建介质目录

# mkdir -p /opt/software/mogdb
# chmod 755 -R /opt/software/mogdb

2.2.软件下载及上传(omm用户)

请通过 MogDB 下载 进行下载

# su - omm
$ cd /opt/software/mogdb
$ rz
$ tar -xvf MogDB-2.1.1-CentOS-x86_64.tar
$ tar -zxvf MogDB-2.1.1-CentOS-64bit-om.tar.gz
$ tar -jxvf MogDB-2.1.1-CentOS-64bit.tar.bz2

2.3.配置xml文件(omm用户)

$ cd /opt/mogdb/software/
$ vi clusterconfig.xml
<?xml version="1.0" encoding="utf-8"?>
<ROOT>
  <!-- MogDB整体信息 -->
  <CLUSTER>
    <!-- 数据库名称 -->
    <PARAM name="clusterName" value="testCluster" />
	<!-- 数据库节点名称(hostname) -->
    <PARAM name="nodeNames" value="mogdb1,mogdb2,mogdb3"/>
	<!-- 节点IP,与nodeNames一一对应 -->
    <PARAM name="backIp1s" value="192.168.11.5,192.168.11.6,192.168.11.7"/>
	<!-- 数据库安装目录-->
    <PARAM name="gaussdbAppPath" value="/dbdata/app/mogdb" />
	<!-- 日志目录-->
    <PARAM name="gaussdbLogPath" value="/dbdata/log" />
	<!-- 临时文件目录-->
    <PARAM name="tmpMppdbPath" value="/dbdata/tmp"/>
	<!--数据库工具目录-->
    <PARAM name="gaussdbToolPath" value="/dbdata/app/tools" />
	<!--数据库core文件目录-->
    <PARAM name="corePath" value="/dbdata/corefile"/>
  </CLUSTER>
  <!-- 每台服务器上的节点部署信息 --> 
  <DEVICELIST>
    <!-- node1上的节点部署信息 --> 
    <DEVICE sn="1000001">
	  <!-- node1的hostname -->
      <PARAM name="name" value="mogdb1"/>
	  <!-- node1所在的AZ及AZ优先级 -->
      <PARAM name="azName" value="AZ1"/>
      <PARAM name="azPriority" value="1"/>
	  <!-- node1的IP,如果服务器只有一个网卡可用,将backIP1和sshIP1配置成同一个IP -->
      <PARAM name="backIp1" value="192.168.11.5"/>
      <PARAM name="sshIp1" value="192.168.11.5"/>
      <!--dbnode-->
	  <!--当前主机上需要部署的数据库节点个数-->
      <PARAM name="dataNum" value="1"/>
	  <!--dn端口号-->
      <PARAM name="dataPortBase" value="51000"/>
	  <!--DBnode主节点上数据目录,及备机数据目录-->
      <PARAM name="dataNode1" value="/dbdata/data,mogdb2,/dbdata/data,mogdb3,/dbdata/data"/>
      <PARAM name="dataNodeXlogPath1" value="/dbdata/xlog,/dbdata/xlog,/dbdata/xlog "/>
</DEVICE>

	   <!-- node2上的节点部署信息 --> 
    <DEVICE sn="1000002">
	  <!-- node2的hostname -->
      <PARAM name="name" value="mogdb2"/>
	  <!-- node2所在的AZ及AZ优先级 -->
      <PARAM name="azName" value="AZ1"/>
      <PARAM name="azPriority" value="1"/>
	  <!-- node2的IP,如果服务器只有一个网卡可用,将backIP1和sshIP1配置成同一个IP -->
      <PARAM name="backIp1" value="192.168.11.6"/>
      <PARAM name="sshIp1" value="192.168.11.6"/>
      <!--dbnode-->
    </DEVICE>

	   <!-- node3上的节点部署信息 --> 
    <DEVICE sn="1000003">
	  <!-- node3的hostname -->
      <PARAM name="name" value="mogdb3"/>
	  <!-- node3所在的AZ及AZ优先级 -->
      <PARAM name="azName" value="AZ1"/>
      <PARAM name="azPriority" value="1"/>
	  <!-- node3的IP,如果服务器只有一个网卡可用,将backIP1和sshIP1配置成同一个IP -->
      <PARAM name="backIp1" value="192.168.11.7"/>
      <PARAM name="sshIp1" value="192.168.11.7"/>
      <!--dbnode-->
    </DEVICE>
  </DEVICELIST>
</ROOT>

2.4.预安装检查

# export LD_LIBRARY_PATH=/opt/software/mogdb/script/gspylib/clib:$LD_LIBRARY_PATH
# /opt/software/mogdb/script/gs_preinstall -U omm -G dbgrp -X /opt/software/mogdb/clusterconfig.xml
Parsing the configuration file.
Successfully parsed the configuration file.
Installing the tools on the local node.
Successfully installed the tools on the local node.
Are you sure you want to create trust for root (yes/no)? no
Setting pssh path
Successfully set core path.
Distributing package.
Begin to distribute package to tool path.
Successfully distribute package to tool path.
Begin to distribute package to package path.
Successfully distribute package to package path.
Successfully distributed package.
Are you sure you want to create the user[omm] and create trust for it (yes/no)? yes   
Preparing SSH service.
Successfully prepared SSH service.
Installing the tools in the cluster.
Successfully installed the tools in the cluster.
Checking hostname mapping.
Successfully checked hostname mapping.
Creating SSH trust for [omm] user.
Please enter password for current user[omm].
Password: 
Checking network information.
All nodes in the network are Normal.
Successfully checked network information.
Creating SSH trust.
Creating the local key file.
Successfully created the local key files.
Appending local ID to authorized_keys.
Successfully appended local ID to authorized_keys.
Updating the known_hosts file.
Successfully updated the known_hosts file.
Appending authorized_key on the remote node.
Successfully appended authorized_key on all remote node.
Checking common authentication file content.
Successfully checked common authentication content.
Distributing SSH trust file to all node.
Successfully distributed SSH trust file to all node.
Verifying SSH trust on all hosts.
Successfully verified SSH trust on all hosts.
Successfully created SSH trust.
Successfully created SSH trust for [omm] user.
Checking OS software.
Successfully check os software.
Checking OS version.
Successfully checked OS version.
Creating cluster's path.
Successfully created cluster's path.
Set and check OS parameter.
Setting OS parameters.
Successfully set OS parameters.
Warning: Installation environment contains some warning messages.
Please get more details by "/opt/mogdb/software/script/gs_checkos -i A -h mogdb1,mogdb2,mogdb3 --detail".
Set and check OS parameter completed.
Preparing CRON service.
Successfully prepared CRON service.
Setting user environmental variables.
Successfully set user environmental variables.
Setting the dynamic link library.
Successfully set the dynamic link library.
Setting Core file
Successfully set core path.
Setting pssh path
Successfully set pssh path.
Setting Cgroup.
Successfully set Cgroup.
Set ARM Optimization.
No need to set ARM Optimization.
Fixing server package owner.
Setting finish flag.
Successfully set finish flag.
Preinstallation succeeded.

2.5.安装数据库并启动(omm用户)

# su - omm
$ /opt/mogdb/software/script/gs_install \
> -X /opt/mogdb/software/clusterconfig.xml \
> -l /home/omm/gs_install.log \
> --gsinit-parameter="--encoding=UTF8" \
> --gsinit-parameter="--locale=en_US.UTF8" \
> --gsinit-parameter="--lc-collate=en_US.UTF8" \
> --gsinit-parameter="--lc-ctype=en_US.UTF8" \
> --gsinit-parameter="--lc-messages=en_US.UTF8" \
> --gsinit-parameter="--dbcompatibility=PG" \
> --gsinit-parameter="--pwpasswd=Testdb@1234"
Parsing the configuration file.
Check preinstall on every node.
Successfully checked preinstall on every node.
Creating the backup directory.
Successfully created the backup directory.
begin deploy..
Installing the cluster.
begin prepare Install Cluster..
Checking the installation environment on all nodes.
begin install Cluster..
Installing applications on all nodes.
Successfully installed APP.
begin init Instance..
encrypt cipher and rand files for database.
begin to create CA cert files
The sslcert will be generated in /dbdata/app/mogdb/share/sslcert/om
Cluster installation is completed.
Configuring.
Deleting instances from all nodes.
Successfully deleted instances from all nodes.
Checking node configuration on all nodes.
Initializing instances on all nodes.
Updating instance configuration on all nodes.
Check consistence of memCheck and coresCheck on database nodes.
Successful check consistence of memCheck and coresCheck on all nodes.
Configuring pg_hba on all nodes.
Configuration is completed.
Successfully started cluster.
Successfully installed application.
end deploy.

2.6.修改一个节点为同步备库

$ sed -i "/synchronous_standby_names/csynchronous_standby_names = 'dn_6002'" /dbdata/data/postgresql.conf
$ gs_om -t restart

3.mogdb常用操作

3.1.检查集群状态

$ gs_om -t status --detail
[   Cluster State   ]

cluster_state   : Normal
redistributing  : No
current_az      : AZ_ALL

[  Datanode State   ]

    node   node_ip         port      instance             state
-------------------------------------------------------------------------------
1  mogdb1 192.168.11.5    51000      6001 /dbdata/data   P Primary Normal
2  mogdb2 192.168.11.6    51000      6002 /dbdata/data   S Standby Normal
3  mogdb3 192.168.11.7    51000      6003 /dbdata/data   S Standby Normal

3.2.检查集群同步详细

$ gs_ctl query -D /dbdata/data
[2022-05-19 11:36:28.568][9266][][gs_ctl]: gs_ctl query ,datadir is /dbdata/data 
 HA state:           
        local_role                     : Primary
        static_connections             : 2
        db_state                       : Normal
        detail_information             : Normal

 Senders info:       
        sender_pid                     : 27549
        local_role                     : Primary
        peer_role                      : Standby
        peer_state                     : Normal
        state                          : Streaming
        sender_sent_location           : 0/50005C8
        sender_write_location          : 0/50005C8
        sender_flush_location          : 0/50005C8
        sender_replay_location         : 0/50005C8
        receiver_received_location     : 0/50005C8
        receiver_write_location        : 0/50005C8
        receiver_flush_location        : 0/50005C8
        receiver_replay_location       : 0/50005C8
        sync_percent                   : 100%
        sync_state                     : Quorum
        sync_priority                  : 1
        sync_most_available            : Off
        channel                        : 192.168.11.5:51001-->192.168.11.7:36720

        sender_pid                     : 28164
        local_role                     : Primary
        peer_role                      : Standby
        peer_state                     : Normal
        state                          : Streaming
        sender_sent_location           : 0/50005C8
        sender_write_location          : 0/50005C8
        sender_flush_location          : 0/50005C8
        sender_replay_location         : 0/50005C8
        receiver_received_location     : 0/50005C8
        receiver_write_location        : 0/50005C8
        receiver_flush_location        : 0/50005C8
        receiver_replay_location       : 0/50005C8
        sync_percent                   : 100%
        sync_state                     : Quorum
        sync_priority                  : 1
        sync_most_available            : Off
        channel                        : 192.168.11.5:51001-->192.168.11.6:58841

 Receiver info:      
No information 

$ gs_ctl query -D /dbdata/data
[2022-05-19 16:14:16.972][5954][][gs_ctl]: gs_ctl query ,datadir is /dbdata/data 
 HA state:           
        local_role                     : Primary
        static_connections             : 2
        db_state                       : Normal
        detail_information             : Normal

 Senders info:       
        sender_pid                     : 26582
        local_role                     : Primary
        peer_role                      : Standby
        peer_state                     : Normal
        state                          : Streaming
        sender_sent_location           : 0/50126A0
        sender_write_location          : 0/50126A0
        sender_flush_location          : 0/50126A0
        sender_replay_location         : 0/50126A0
        receiver_received_location     : 0/50126A0
        receiver_write_location        : 0/50126A0
        receiver_flush_location        : 0/50126A0
        receiver_replay_location       : 0/50126A0
        sync_percent                   : 100%
        sync_state                     : Sync
        sync_priority                  : 1
        sync_most_available            : Off
        channel                        : 192.168.11.5:51001-->192.168.11.6:43856

        sender_pid                     : 28196
        local_role                     : Primary
        peer_role                      : Standby
        peer_state                     : Normal
        state                          : Streaming
        sender_sent_location           : 0/50126A0
        sender_write_location          : 0/50126A0
        sender_flush_location          : 0/50126A0
        sender_replay_location         : 0/50126A0
        receiver_received_location     : 0/50126A0
        receiver_write_location        : 0/50126A0
        receiver_flush_location        : 0/50126A0
        receiver_replay_location       : 0/50126A0
        sync_percent                   : 100%
        sync_state                     : Async
        sync_priority                  : 0
        sync_most_available            : Off
        channel                        : 192.168.11.5:51001-->192.168.11.7:45770

 Receiver info:      
No information 

3.3.检查进程

$ ps -ef|grep mogdb
omm      20631     1  4 11:31 ?        00:00:13 /dbdata/app/mogdb/bin/mogdb -D /dbdata/data -M primary
omm      21120 29807  0 11:36 pts/2    00:00:00 grep --color=auto mogdb

$ ps -ef|grep mogdb
omm      14431     1 18 11:31 ?        00:00:53 /dbdata/app/mogdb/bin/mogdb -D /dbdata/data -M standby
omm      23659 30578  0 11:36 pts/0    00:00:00 grep --color=auto mogdb

$ ps -ef|grep mogdb
omm       7320     1 25 11:33 ?        00:01:14 /dbdata/app/mogdb/bin/mogdb -D /dbdata/data -M standby
omm      14748 31174  0 11:38 pts/0    00:00:00 grep --color=auto mogdb

3.4.停止集群

$ gs_om -t stop
Stopping cluster.
=========================================
Successfully stopped cluster.
=========================================

3.5.启动集群

$ gs_om -t start
Starting cluster.
=========================================
=========================================
Successfully started.

3.6.重启集群

$ gs_om -t restart
Stopping cluster.
=========================================
Successfully stopped cluster.
=========================================
End stop cluster.
Starting cluster.
=========================================
=========================================
Successfully started.

3.7.停止本节点

$ gs_ctl stop -D /dbdata/data
$ gs_om -t stop -h 192.168.11.5

3.8.启动本节点

$ gs_ctl start -D /dbdata/data
$ gs_om -t start -h 192.168.11.5

3.9.重启本节点

$ gs_ctl restart -D /dbdata/data
$ gs_om -t restart -h 192.168.11.5

4.参数调整脚本

#!/bin/bash

source ~/.bashrc

memory=`free|awk '{print $2}' |sed -n 2p`
if [[ $memory -lt 4*1024*1024 ]]
then
    max_process_memory=2GB
    shared_buffers=128MB
    max_connections=500
    work_mem=4MB
    maintenance_work_mem=256MB
    echo "If the database fails to start, lower the parameters max_process_memory and shared_buffers"
elif [[ $memory -gt 4*1024*1024 ]] && [[ $memory -lt 8*1024*1024 ]]
then
    max_process_memory=5GB
    shared_buffers=1GB
    max_connections=1000
    work_mem=16MB
    maintenance_work_mem=1GB
else
    max_process_memory=$((memory*6/10/1024/1024))GB
    shared_buffers=$((memory*3/10/1024/1024))GB
    max_connections=3000
    work_mem=64MB
    maintenance_work_mem=2GB
fi

##内存相关参数
gs_guc set -I all -N all -c "max_process_memory=${max_process_memory}"
gs_guc set -I all -N all -c "shared_buffers=${shared_buffers}"
gs_guc set -I all -N all -c "work_mem=${work_mem}"
gs_guc set -I all -N all -c "maintenance_work_mem=${maintenance_work_mem}"
gs_guc set -I all -N all -c "cstore_buffers=16MB"
gs_guc set -I all -N all -c "wal_buffers=1GB"
gs_guc set -I all -N all -c "local_syscache_threshold=32MB"
gs_guc set -I all -N all -c "standby_shared_buffers_fraction=1"

##连接访问相关参数
gs_guc set -I all -N all -c "max_connections=${max_connections}"
gs_guc set -I all -N all -c "max_prepared_transactions=${max_connections}"
gs_guc set -I all -N all -c "listen_addresses = '*'"
gs_guc set -I all -N all -c "remote_read_mode=non_authentication"
gs_guc set -I all -N all -c "password_encryption_type=1"
gs_guc set -I all -N all -c "password_reuse_time=0"
gs_guc set -I all -N all -c "password_lock_time=0"
gs_guc set -I all -N all -c "password_effect_time=0"
gs_guc set -I all -N all -c "session_timeout=0"

##wal相关参数
gs_guc set -I all -N all -c "wal_level=logical"
gs_guc set -I all -N all -c "full_page_writes=off"
gs_guc set -I all -N all -c "wal_log_hints=off"
gs_guc set -I all -N all -c "xloginsert_locks=48"
gs_guc set -I all -N all -c "advance_xlog_file_num=10"

##复制相关参数
gs_guc set -I all -N all -c "synchronous_commit=on"
gs_guc set -I all -N all -c "wal_keep_segments=1024"
gs_guc set -I all -N all -c "max_wal_senders=16"
gs_guc set -I all -N all -c "recovery_max_workers=4"
gs_guc set -I all -N all -c "most_available_sync=on"
gs_guc set -I all -N all -c "max_size_for_xlog_prune=104857600"
gs_guc set -I all -N all -c "catchup2normal_wait_time=0"
gs_guc set -I all -N all -c "enable_slot_log=on"
gs_guc set -I all -N all -c "max_replication_slots=32"
gs_guc set -I all -N all -c "wal_receiver_timeout=60s"
gs_guc set -I all -N all -c "sync_config_strategy=none_node"

##日志相关参数
gs_guc set -I all -N all -c "logging_collector=on"
gs_guc set -I all -N all -c "log_duration=on"
gs_guc set -I all -N all -c "log_line_prefix='%m %u %d %r %p %S'"
gs_guc set -I all -N all -c "log_checkpoints=on"
gs_guc set -I all -N all -c "plog_merge_age=0"

##性能统计相关参数
gs_guc set -I all -N all -c "vacuum_cost_limit=1000"
gs_guc set -I all -N all -c "autovacuum_max_workers=10"
gs_guc set -I all -N all -c "autovacuum_naptime=20s"
gs_guc set -I all -N all -c "autovacuum_vacuum_cost_delay=10"
gs_guc set -I all -N all -c "autovacuum_vacuum_scale_factor=0.05"
gs_guc set -I all -N all -c "autovacuum_analyze_scale_factor=0.02"
gs_guc set -I all -N all -c "autovacuum_vacuum_threshold=200"
gs_guc set -I all -N all -c "autovacuum_analyze_threshold=200"
gs_guc set -I all -N all -c "autovacuum_io_limits=104857600"
gs_guc set -I all -N all -c "instr_unique_sql_count=20000"
gs_guc set -I all -N all -c "enable_save_datachanged_timestamp=off"
gs_guc set -I all -N all -c "track_sql_count=off"
gs_guc set -I all -N all -c "enable_instr_rt_percentile=off"
gs_guc set -I all -N all -c "enable_instance_metric_persistent=off"
gs_guc set -I all -N all -c "enable_logical_io_statistics=off"
gs_guc set -I all -N all -c "enable_user_metric_persistent=off"
gs_guc set -I all -N all -c "enable_mergejoin=on"
gs_guc set -I all -N all -c "enable_nestloop=on"
gs_guc set -I all -N all -c "enable_pbe_optimization=off"
gs_guc set -I all -N all -c "enable_resource_track=on"
gs_guc set -I all -N all -c "enable_wdr_snapshot=on"
gs_guc set -I all -N all -c "instr_unique_sql_count=5000"

##客户端白名单
gs_guc set -I all -N all -h "host     all      all  0.0.0.0/0    md5"

##其他参数
gs_guc set -I all -N all -c "checkpoint_segments=1024"
gs_guc set -I all -N all -c "checkpoint_completion_target=0.8"
gs_guc set -I all -N all -c "pagewriter_sleep=200"

gs_guc set -I all -N all -c "enable_alarm=off"
gs_guc set -I all -N all -c "enable_codegen=off"
gs_guc set -I all -N all -c "audit_enabled=off"
gs_guc set -I all -N all -c "enable_asp=off"

gs_guc set -I all -N all -c "lc_messages='en_US.UTF-8'"
gs_guc set -I all -N all -c "lc_monetary='en_US.UTF-8'"
gs_guc set -I all -N all -c "lc_numeric='en_US.UTF-8'"
gs_guc set -I all -N all -c "lc_time='en_US.UTF-8'"

gs_guc set -I all -N all -c "update_lockwait_timeout=1min"
gs_guc set -I all -N all -c "lockwait_timeout=1min"

gs_guc set -I all -N all -c "max_files_per_process=100000"
gs_guc set -I all -N all -c "behavior_compat_options='display_leading_zero'"
gs_guc set -I all -N all -c "enable_thread_pool=off"

gs_om -t stop && gs_om -t start

5.Mogdb卸载

5.1.MogDB集群卸载

主节点:
$ gs_uninstall --delete-data
每个节点:
$ gs_uninstall --delete-data -L

5.2.MogDB环境清理

# gs_postuninstall -U omm -X /opt/software/mogdb/clusterconfig.xml --delete-user

请参考MogDB官方 安装参考文档 进行详细学习

最后修改时间:2022-05-25 21:25:16
「喜欢这篇文章,您的关注和赞赏是给作者最好的鼓励」
关注作者
【版权声明】本文为墨天轮用户原创内容,转载时必须标注文章的来源(墨天轮),文章链接,文章作者等基本信息,否则作者和墨天轮有权追究责任。如果您发现墨天轮中有涉嫌抄袭或者侵权的内容,欢迎发送邮件至:contact@modb.pro进行举报,并提供相关证据,一经查实,墨天轮将立刻删除相关内容。

评论