
在当前运行的 gp6 集群服务器安装部署 YMatrix。 YMatrix 可以与 gp6 共存,不影响当前 gp6 上面的业务运行,使用 YMatrix 的 UI 部署会自动判断端口是否已被占用。 有一套新的服务器环境安装部署 YMatrix。
如果是企业版客户,需要申请替换长期授权的 license。
#所有节点都要执行#删除旧的licenserm usr/local/matrixdb/bin/LICENSE_XXX#拷贝新的license到该目录下,并赋权限chmod 755 usr/local/matrixdb/bin/LICENSE_XXX
验证license是否生效,启动mxgate就可以验证--创建test数据库CREATE DATABASE test;--切换到test数据库\c test--创建测试表CREATE TABLE dest(time timestamp,c1 int,c2 text)DISTRIBUTED BY(c1);
#生成mxgate配置文件mxgate config --db-database test \--db-master-host localhost \--db-master-port 5432 \--db-user mxadmin \--target public.dest \--time-format raw \--delimiter ',' \> mxgate.conf
#启动MatrixGatemxgate start --config mxgate.conf如果可以启动就说明license没问题,如果没办法启动就说明license有问题。验证完成后将MatrixGate关闭。#关闭MatrixGatemxgate stop


#强制xlog落盘,防止数据丢失psql -d postgres -c "checkpoint" #可以连续执行3-6次#关闭gp6集群gpstop -fa
#备份pg_hba.confcp $MASTER_DATA_DIRECTORY/pg_hba.conf $MASTER_DATA_DIRECTORY/pg_hba.conf.20221208#编辑pg_hba.conf,将业务端的访问权限都删掉vi $MASTER_DATA_DIRECTORY/pg_hba.conf
#启动gp6集群gpstart -a
注意:路径创建一个有权限,没有数据的路径#gpcheckperf 网络、磁盘性能检测su - mxadmin/usr/local/matrixdb/bin/gpcheckperf -f home/mxadmin/all_hosts -r ds -D -d data
#备份gp6用户信息pg_dumpall -h 127.0.0.1 -U gpadmin -s -p 5432 --roles-only -f owner.sql#备份gp6准备迁移的数据库,例如:testdbpg_dump -h 127.0.0.1 -U gpadmin -p 5432 -d testdb -s -f testdb.ddl
#屏蔽掉资源队列sed -i 's#ALTER RESOURCE#--ALTER RESOURCE#Ig' testdb.ddlsed -i 's#CREATE RESOURCE#--CREATE RESOURCE#Ig' testdb.ddl#修改压缩算法,提高压缩比、提高数据插入性能,(将所有的ao、aoco表的压缩算法改成zstd、并且压缩级调整为1级)sed -i "s#compresstype=quicklz#compresstype=zstd#Ig" testdb.ddlsed -i "s#compresstype=zlib#compresstype=zstd#Ig" testdb.sqlsed -i "s#compresslevel=5#compresslevel=1#Ig" testdb.sqlsed -i "s#compresslevel='5'#compresslevel=1#Ig" testdb.sql#修改plpythonu为plpython3used -i 's#plpythonu#plpython3u#Ig' testdb.ddl#修改数据类型unknown为textsed -i 's#unknown#text#Ig' testdb.ddl#把索引信息挑出来,单出存放cat testdb.ddl | egrep -i 'CREATE INDEX|CREATE UNIQUE INDEX' > testdb.idx#把索引屏蔽掉,待数据迁移完成后再创建索引,提高效率sed -i 's#CREATE INDEX#--CREATE INDEX#Ig' testdb.ddlsed -i 's#CREATE UNIQUE INDEX#--CREATE UNIQUE INDEX#Ig' testdb.ddl#检查有没有表空间的信息,手动挑出来修正为目标值cat testdb.ddl | grep -i TABLESPACE
#创建用户psql -h localhost -p 6433 -d testdb -U mxadmin -f ./owner.sql > owner.out 2>&1 &#检查owner.out文件是否有报错,有报错及时解决cat owner.out | grep -i error#创建DDLpsql -h localhost -p 6433 -d testdb -U mxadmin -f ./testdb.ddl > ddl.out 2>& 1 &#检查ddl.out文件是否有报错,有报错及时解决cat ddl.out | grep -i error
mxshift迁移是通过segment直连实现高速并发迁移,所以执行mxshift工具需要在gp6的所有实例添加白名单访问权限。#快速的查找gp6实例的白名单文件find data/gpdata/ -name pg_hba.conf#将YMatrix集群所有IP放到gp6所有实例的白名单文件里面。允许相互访问。例如:host all all 172.16.236.0/24 md5
vi testdb.json{"gphome": "/usr/local/greenplum-db-6.7.1","mode": "normal","verify": false,"bothway": true,"concurrency": 6,"log_level": "info","src_host": "localhost","src_db": "testdb","src_user": "gpadmin","src_password": "123123","src_port": 5432,"target_host": "localhost","target_db": "testdb","target_user": "mxadmin","target_password": "123123","target_port": 6433}
time mxshift --config_path testdb.json
更多解释详见
https://www.ymatrix.cn/doc/4.7/tools/mxshift
#执行创建索引psql -h localhost -p 6433 -d testdb -U mxadmin -f testdb.idx >idx.out 2>&1 &#检查idx.out文件是否有报错,有报错及时解决cat idx.out | grep -i error
export PGPORT=6433time analyzedb -d testdb -p 10 -a
vi config.ini[db]s_ip = localhosts_port = 5432s_user = gpadmins_pass = 123123s_db = edpd_ip = localhostd_port = 6433d_user = mxadmind_pass = 123123d_db= edp[thread_control]# 控制大小超过多少GB使用mxgate,否则使用pgdumpvault = 1gate_pool = 10dump_pool = 10dump_concurrency = 15dump_compress = falsecheck_concurrency = 15# 不需要迁移的schema,table[exclude]schemas = information_schema, gp_toolkit, matrixgate_internal, matrixts_internaltables = tep_tes.tes2_source_data,tep_tes.tes2_source_data
vi run_count_check.sh#!/bin/bashdatepython3 data_transfer.py plancat pgdump_transfer_plan.csv >>mxgate_transfer_plan.csvpython3 data_transfer.py -cgatedate
nohup sh run_count_check.sh > run_count_check.out 2>&1 &#检查run_count_check.out,找到数据校验不一致的,查找原因。cat run_count_check.out | grep "count diff"
--查询普通表+分区主表信息--YMatrixSELECT n.nspname as "Schema",count(c.relname) as ctFROM pg_catalog.pg_class cLEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespaceWHERE c.relkind IN ('p','r','')AND NOT c.relispartitionAND n.nspname <> 'pg_catalog'AND n.nspname <> 'information_schema'AND n.nspname !~ '^pg_toast'and n.nspname <> 'matrixts_internal'and n.nspname <> 'gp_toolkit'group by 1;--gp6SELECT n.nspname as "Schema",count(c.relname) as ctFROM pg_catalog.pg_class cLEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespaceWHERE c.relkind IN ('r')AND c.relstorage IN ('h','a','c')AND n.nspname <> 'pg_catalog'AND n.nspname <> 'information_schema'AND n.nspname !~ '^pg_toast'and n.nspname <> 'gp_toolkit'AND c.oid NOT IN (select inhrelid from pg_catalog.pg_inherits)AND pg_catalog.pg_table_is_visible(c.oid)GROUP BY 1;--索引SELECT n.nspname as "Schema",count(c.relname) as ctFROM pg_catalog.pg_class cLEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespaceWHERE c.relkind IN ('i','I','')AND n.nspname <> 'pg_catalog'AND n.nspname <> 'information_schema'AND n.nspname !~ '^pg_toast'AND pg_catalog.pg_table_is_visible(c.oid)group BY 1;--视图SELECT n.nspname as "Schema",count(c.relname) as ctFROM pg_catalog.pg_class cLEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespaceWHERE c.relkind IN ('v','')AND n.nspname <> 'pg_catalog'AND n.nspname <> 'information_schema'AND n.nspname !~ '^pg_toast'and n.nspname <> 'matrixts_internal'and n.nspname <> 'gp_toolkit'group BY 1;--函数SELECT n.nspname as "Schema",count(p.proname) as ctFROM pg_catalog.pg_proc pLEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespaceWHERE 1=1AND n.nspname <> 'pg_catalog'AND n.nspname <> 'information_schema'AND n.nspname !~ '^pg_toast'and n.nspname <> 'matrixts_internal'and n.nspname <> 'gp_toolkit'and p.prokind <> 'a'and p.prokind <> 'w'and p.prokind <> 'p'group BY 1;--序列SELECT n.nspname as "Schema",count(c.relname) as ctFROM pg_catalog.pg_class cLEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespaceWHERE c.relkind IN ('S','')AND n.nspname <> 'pg_catalog'AND n.nspname <> 'information_schema'AND n.nspname !~ '^pg_toast'and n.nspname <> 'matrixts_internal'and n.nspname <> 'gp_toolkit'and n.nspname <> 'mxshift_internal_catalog'group BY 1;--extensionSELECT n.nspname AS "Schema",count(e.extname) AS ctFROM pg_catalog.pg_extension eLEFT JOIN pg_catalog.pg_namespace n ON n.oid = e.extnamespaceLEFT JOIN pg_catalog.pg_description c ON c.objoid = e.oidAND c.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclassgroup BY 1;--typeSELECT n.nspname as "Schema",count(pg_catalog.format_type(t.oid, NULL)) AS ctFROM pg_catalog.pg_type tLEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespaceWHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid))AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type el WHERE el.oid = t.typelem AND el.typarray = t.oid)AND n.nspname <> 'pg_catalog'AND n.nspname <> 'information_schema'AND n.nspname !~ '^pg_toast'and n.nspname <> 'matrixts_internal'and n.nspname <> 'gp_toolkit'group BY 1;--languageSELECT l.lanname AS "Name",pg_catalog.pg_get_userbyid(l.lanowner) as "Owner",l.lanpltrusted AS "Trusted",d.description AS "Description"FROM pg_catalog.pg_language lLEFT JOIN pg_catalog.pg_description dON d.classoid = l.tableoid AND d.objoid = l.oidAND d.objsubid = 0WHERE l.lanplcallfoid != 0ORDER BY 1;--roleSELECT count(r.rolname) as ct_roleFROM pg_catalog.pg_roles rWHERE r.rolname !~ '^pg_';
#utility模式进库su - gpadmin[gpadmin@mdw ~]$ PGOPTIONS='-c gp_session_role=utility' psql postgres--修改master和standby端口postgres=# BEGIN;BEGINpostgres=# set allow_system_table_mods=true;SETpostgres=# update gp_segment_configuration set port=6432 where content=-1;UPDATE 2--验证修改已成功postgres=# select * from gp_segment_configuration where content=-1;postgres=# end;COMMIT#停止集群gpstop -af#修改postgresql.conf(如果有standby也要修改)[gpadmin@mdw ~]$ vi $MASTER_DATA_DIRECTORY/postgresql.conf修改前:port = 5432修改后:port = 6432#修改环境变量vi home/gpadmin/.bash_profile修改前:export PGPORT=5432修改后:export PGPORT=6432#新的参数立马生效source home/gpadmin/.bash_profile#启动集群,校验是否修改成功gpstart -a#检查端口psql postgrespostgres=# show port;
sudo su - gpadmingpstop -fa
#utility模式进库su - mxadmin[mxadmin@mdw1 ~]$ PGOPTIONS='-c gp_role=utility' psql postgres--修改master和standby端口postgres=# BEGIN;BEGINpostgres=# set allow_system_table_mods=true;SETpostgres=# update gp_segment_configuration set port=5432 where content=-1;UPDATE 2--验证修改已成功postgres=# select * from gp_segment_configuration where content=-1;postgres=# end;COMMIT#停止集群gpstop -af#修改postgresql.conf(如果有standby也要修改)[mxadmin@mdw1 ~]$ vi $MASTER_DATA_DIRECTORY/postgresql.conf修改前:port = 6433修改后:port = 5432#修改环境变量vi home/mxadmin/.matrixdb.env修改前:export PGPORT=6433修改后:export PGPORT=5432#新的参数立马生效source home/mxadmin/.matrixdb.env#修改以下(不限于)文件中的port值cat etc/matrixdb/supervisor.confcat etc/matrixdb/cluster.confcat etc/matrixdb/service/telegraf.confcat etc/matrixdb/service/mxmgr_gate_ctrl.confcat etc/matrixdb/service/mxmgr_telegraf_ctrl.conf#启动集群gpstart -a#如果之前有部署监控,需要修改监控的端口号psql -d matrixmgrselect * from local.matrix_manager_config ;update local.matrix_manager_config set value='5432' where values='6433';SELECT mxmgr_remove_all('local');SELECT mxmgr_deploy('local');#重启supervisor#master节点root用户登录systemctl stop matrixdb.supervisor.servicesystemctl start matrixdb.supervisor.service
#根据服务器硬件资源进行调整gpconfig -c shared_buffers -v 6GBgpconfig -c statement_mem -v 2GBgpconfig -c work_mem -v 2GBgpconfig -c max_statement_mem -v 4GBgpconfig -c maintenance_work_mem -v 2GBgpconfig -c effective_cache_size -v 64GBgpconfig -c gp_workfile_limit_per_segment -v 128GBgpconfig -c max_connections -v 3000 -m 800gpconfig -c max_prepared_transactions -v 1500 -m 500gpconfig -c superuser_reserved_connections -v 150 -m 30#这部分的参数可以作为通用数仓场景调整gpconfig -c runaway_detector_activation_percent -v 90gpconfig -c statement_timeout -v 0gpconfig -c gp_segment_connect_timeout -v 3mingpconfig -c gp_fts_probe_retries -v 30gpconfig -c resource_scheduler -v ongpconfig -c mx_interconnect_compress -v off #网络性能较差的环境建议开启gpconfig -c tcp_keepalives_idle -v 7200gpconfig -c gp_enable_global_deadlock_detector -v ongpconfig -c gp_global_deadlock_detector_period -v 2mingpconfig -c enable_hashjoin -v ongpconfig -c enable_mergejoin -v offgpconfig -c enable_nestloop -v offgpconfig -c enable_seqscan -v ongpconfig -c enable_bitmapscan -v ongpconfig -c gp_max_packet_size -v 32000gpconfig -c gp_snapshotadd_timeout -v 60s --skipvalidationgpconfig -c gp_autostats_mode -v nonegpconfig -c enable_parallel_mode -v ongpconfig -c max_parallel_workers_per_gather -v 2gpconfig -c parallel_tuple_cost -v 10000000gpconfig -c random_page_cost -v 1.1gpconfig -c parallel_setup_cost -v 1000000gpconfig -c join_collapse_limit -v 10gpconfig -c from_collapse_limit -v 10gpconfig -c wal_keep_segments -v 128gpconfig -c max_wal_size -v 4GBgpconfig -c log_transaction_sample_rate -v 0 -m 0.001gpconfig -c log_checkpoints -v ongpconfig -c log_statement -v none -m ddlgpconfig -c log_duration -v offgpconfig -c log_min_duration_statement -v 5000msgpconfig -c log_rotation_age -v 1dgpconfig -c log_timezone -v PRCgpconfig -c log_min_messages -v warninggpconfig -c mxstat_statements.harvest_interval -v '10min'gpconfig -c mxstat_statements.harvest_usage -v offgpconfig -c gp_max_slices -v 50
YMatrix使用UI部署后,另外添加的内核参数vim etc/sysctl.d/99-matrixdb.confnet.core.somaxconn = 65535net.ipv4.tcp_tw_reuse = 1net.ipv4.tcp_tw_recycle = 1net.ipv4.tcp_max_syn_backlog = 65535net.netfilter.nf_conntrack_max = 6553600net.core.netdev_max_backlog = 65535
重新加载内核参数sysctl --load=/etc/sysctl.d/99-matrixdb.conf
调整网卡参数为了防止网络丢包#根据实际网卡名称替换ethtool -G ens9f0 rx 8192 tx 8192ethtool -G ens3f0 rx 8192 tx 8192ethtool -K ens9f0 gro offethtool -K ens3f0 gro offethtool -K ens9f0 lro offethtool -K ens3f0 lro off
--验证grep cgroup/proc/mountsls -l sys/fs/cgroup/cpu/${db_cluster_id}/gpdb--开启资源组gpconfig -c gp_resource_manager -v groupgpconfig -c resource_scheduler -v ongpconfig -c gp_resource_group_memory_limit -v 0.9--重启数据库mxstop -raf
--修改资源组资源限制ALTER RESOURCE GROUP default_group SET CPU_RATE_LIMIT 60;ALTER RESOURCE GROUP default_group SET MEMORY_LIMIT 60;ALTER RESOURCE GROUP default_group SET CONCURRENCY 200;ALTER RESOURCE GROUP admin_group SET CONCURRENCY 50;--将执行出来的SQL,复制在数据库里面再执行。SELECT 'alter role '||a.rolname||' RESOURCE GROUP default_group;'FROM pg_roles ainner join pg_resgroup bon a.rolresgroup=b.oidand rsgname <> 'admin_group';
#将gp6集群原始的pg_hba.conf文件里面开通的业务访问白名单信息复制到YMatrix的pg_hba.conf文件里面,切记是以追加的方式进行添加,YMatrix原始的pg_hba.conf文件里面的内容记得保留。su - mxadminvi $MASTER_DATA_DIRECTORY/pg_hba.conf#重新加载配置文件gpstop -u
[mxadmin@IT0140015 crontab]$ vi vacuum_v4.sh#!/bin/bashif [ $# -ne 1 ]thenecho "Usage $0 database[1]"exit 1;fisource home/mxadmin/.bash_profiledataname=$1vacuum_sql_dir=/home/mxadmin/vacuum_logis_vacuum=truemkdir -p ${vacuum_sql_dir}/${dataname}starttime=`date +"%Y-%m-%d %H:%M:%S"`startts=`date +%s`echo ">>> ---------------------------------------------------------------------- <<<"echo ">>> --------------------$starttime start vacuum------------------ <<<"echo ">>> ---------------------------------------------------------------------- <<<"fun_sql="CREATE or replace FUNCTION is_vacuum(schemaname text,tablename text) RETURNS bool AS \$\$DECLAREis_vacuum bool := false;tablename text;threshold float;BEGINselect current_setting('gp_appendonly_compaction_threshold') INTO threshold;tablename:=\$1||'.'||\$2;select (case when 100*sum(hidden_tupcount)/sum(total_tupcount)::numeric(32,10) > threshold then true else false end) as bloat into is_vacuum from gp_toolkit.__gp_aovisimap_compaction_info(tablename::regclass);RETURN is_vacuum;END;\$\$ LANGUAGE plpgsql;"#psql -Aqc "$fun_sql" -d$datanameVACUUM_ANALYZE(){echo ">>> `date +"%Y-%m-%d %H:%M:%S"` 开始Vacuum $1"if [ "$is_vacuum" = "true" ];thenpsql -c"\timing" -Aqc "vacuum (analyze,skip_locked) $1" -d$datanamefiecho ">>> `date +"%Y-%m-%d %H:%M:%S"` $1 Vacuum 完成"}VACUUM_ANALYZE_AO(){echo ">>> `date +"%Y-%m-%d %H:%M:%S"` 开始Vacuum $1"psql -c"\timing" -Aqc "vacuum (analyze,skip_locked) $1" -d$datanameecho ">>> `date +"%Y-%m-%d %H:%M:%S"` $1 Vacuum 完成"}reorganize(){echo ">>> `date +"%Y-%m-%d %H:%M:%S"` 开始Vacuum $1"psql -c"\timing" -Aqc "alter table $1 set with(reorganize=true) " -d$datanameecho ">>> `date +"%Y-%m-%d %H:%M:%S"` $1 Vacuum 完成"}#vacuum analyze catalogcatalog_start=`date +"%Y-%m-%d %H:%M:%S"`catalog_sts=`date +%s`echo ">>> $catalog_start 开始Vacuum Analyze Catalog"SQL="select schemaname||'.'||relname from pg_stat_sys_tables where n_live_tup>0 and n_dead_tup/n_live_tup::float>0.1 and schemaname in ('pg_catalog','information_schema');"for table in `psql -X -Atc "$SQL" -d$dataname`doecho "vacuum analyze $table;" >> $vacuum_sql_dir/$dataname/vacuum_catalog_`date '+%Y%m%d'`.sqlVACUUM_ANALYZE $tabledonecatalog_end=`date +"%Y-%m-%d %H:%M:%S"`catalog_ets=`date +%s`echo ">>> $catalog_end Vacuum Analyze Catalog 结束,耗时:$((catalog_ets-catalog_sts))s"echo ">>> ---------------------------------------------------------------------- <<<"echo ">>> ---------------------------------------------------------------------- <<<"echo ">>> ---------------------------------------------------------------------- <<<"#vacuum analyze ao tableaotable_start=`date +"%Y-%m-%d %H:%M:%S"`aotable_sts=`date +%s`echo ">>> $aotable_start 开始Vacuum Analyze AO"THRESHOLD=`psql -Aqt -c "select current_setting('gp_appendonly_compaction_threshold')" -d $dataname`for TABLE_AO_LIST in `psql -Aqt -c "select(c.relnamespace::regnamespace)::text||'.'||c.relnamefrom pg_class cinner join pg_am am on am.oid=c.relamwhere am.amname in ('ao_row','ao_column')" -d $dataname`doANALYZE_TABLE=`psql -Aqt -c "SELECT table_name from(select '${TABLE_AO_LIST}' as table_name,(case when 100*sum(hidden_tupcount)/sum(total_tupcount)::numeric(32,10) > ${THRESHOLD} then trueelse false end) as bloatfrom gp_toolkit.__gp_aovisimap_compaction_info('${TABLE_AO_LIST}'::regclass)) f where bloat=true and f.bloat is not null" -d $dataname`if [[ $ANALYZE_TABLE != '' ]]thenecho "vacuum analyze $ANALYZE_TABLE;" >> $vacuum_sql_dir/$dataname/vacuum_ao_`date '+%Y%m%d'`.sqlecho "alter table $ANALYZE_TABLE set with(reorganize=true);" >> $vacuum_sql_dir/$dataname/reorganize_ao_`date '+%Y%m%d'`.sqlVACUUM_ANALYZE_AO $ANALYZE_TABLE#reorganize $ANALYZE_TABLEfidoneaotable_end=`date +"%Y-%m-%d %H:%M:%S"`aotable_ets=`date +%s`echo ">>> $aotable_end Vacuum Analyze AO 结束,耗时:$((aotable_ets-aotable_sts))s"echo ">>> ---------------------------------------------------------------------- <<<"echo ">>> ---------------------------------------------------------------------- <<<"echo ">>> ---------------------------------------------------------------------- <<<"#vacuum analyze heap tableheaptable_start=`date +"%Y-%m-%d %H:%M:%S"`heaptable_sts=`date +%s`echo ">>> $heaptable_start 开始Vacuum Analyze Heap"SQL="select a.schemaname||'.'||a.relname from pg_class c inner join pg_stat_all_tables a on a.relname=c.relname and a.schemaname=(c.relnamespace::regnamespace)::name inner join pg_am am on am.oid=c.relam where am.amname in('heap','mars2') and n_live_tup>0 and n_dead_tup/n_live_tup::float>0.1 and a.schemaname not in ('information_schema','pg_catalog');"for table in `psql -X -Atc "$SQL" -d$dataname`doecho "vacuum analyze $table;" >> $vacuum_sql_dir/$dataname/vacuum_heap_`date '+%Y%m%d'`.sqlVACUUM_ANALYZE $tabledoneheaptable_end=`date +"%Y-%m-%d %H:%M:%S"`heaptable_ets=`date +%s`echo ">>> $heaptable_end Vacuum Analyze Heap 结束,耗时:$((heaptable_ets-heaptable_sts))s"endtime=`date +"%Y-%m-%d %H:%M:%S"`endts=`date +%s`echo ">>> ---------------------------------------------------------------------- <<<"echo ">>> --------------$endtime end vacuum,耗时:$((endts-startts))s--------------- <<<"echo ">>> ---------------------------------------------------------------------- <<<"
#创建日志目录mkdir -p /home/mxadmin/vacuum_log/testdb#定时任务crontab -e35 */1 * * * sh /home/mxadmin/crontab/vacuum_v4.sh testdb | grep -v "gp_appendonly_compaction_threshold " >> /home/mxadmin/vacuum_log/testdb/vacuum_`date '+\%Y\%m\%d'`.log 2>&1

3. 按照业务运行周期来确认项目升级成功的标志,例如:稳定运行 2 小时,说明本次升级 YMatrix 已经成功。

文章转载自yMatrix,如果涉嫌侵权,请发送邮件至:contact@modb.pro进行举报,并提供相关证据,一经查实,墨天轮将立刻删除相关内容。







