巨衫数据库SCDP认证参考福利发放
整理了常用操作
su - sdbadmin
sdb
var db=new Sdb("localhost",11810);
创建 company_domain 逻辑域;
db.createDomain("company_domain", [ "group1", "group2", "group3" ], { AutoSplit: true } );
创建 company 集合空间;
db.createCS("company", { Domain: "company_domain" } );
创建 employee 集合;
db.company.createCL("employee", { "ShardingKey": { "_id": 1 }, "ShardingType": "hash", "ReplSize": -1, "Compressed": true, "CompressionType": "lzw", "AutoSplit": true, "EnsureShardingIndex": false } );
查询集群当前节点是否开启事务以及隔离级别。
db.snapshot(SDB_SNAP_CONFIGS, {}, { NodeName: "", transisolation: "", transactionon: "" } );
transisolation 参数为事务隔离级别,默认是 0 。取值列表:
0: RU,读未提交。
1: RC, 读已提交。
2: RS,读稳定性。
修改事务隔离级别 1表示RC
db.updateConf( { transisolation: 1 }, { Global: true } );
开启事务
db.transBegin();
写入数据
db.company.employee.insert( { id: 10001, name: 'Jack', age: 48 } );
查询
db.company.employee.find();
提交事务
db.transCommit();
回滚事务
db.transRollback();
全量备份
db.backup( { Name: "cluster_backup", Path: "/opt/sequoiadb/tmp/%g", Overwrite: true, Description: "full backup" } );
统计 company.employee 数据量;
db.company.employee.count();
写入数据
db.company.employee.insert( { id: 10002, name: 'Jane', age: 32 } );
增量备份
db.backup( { Name: "cluster_backup", Path: "/opt/sequoiadb/tmp/%g", EnsureInc: true } );
查看备份信息;
db.listBackup( { Path: "/tmp" } );
删除集群上的集合空间
db.dropCS("company");
退出 SequoiaDB Shell;
quit;
停止所有节点;
dbstop -t all
执行全量恢复操作;
sdbrestore -p opt/sequoiadb/tmp/SYSCatalogGroup/ -n cluster_backup -b 0 -i 0
sdbrestore -p opt/sequoiadb/tmp/group1/ -n cluster_backup -b 0 -i 0
sdbrestore -p opt/sequoiadb/tmp/group2/ -n cluster_backup -b 0 -i 0
sdbrestore -p opt/sequoiadb/tmp/group3/ -n cluster_backup -b 0 -i 0
启动节点;
sdbstart -t all
-----------------------------------------------------------------------------------------------
增量恢复
1)停止所有节点;
sdbstop -t all
2)增量恢复操作;
sdbrestore -p tmp/group1/ -n cluster_backup -b -1
sdbrestore -p tmp/group2/ -n cluster_backup -b -1
sdbrestore -p tmp/group3/ -n cluster_backup -b -1
3)启动节点;
sdbstart -t all
-----------------------------------------------------------------------------------------------
查看编目信息
db.snapshot( SDB_SNAP_CATALOG )
-----------------------------------------------------------------------------------------------集群扩容
1)创建一个新的数据组;
db.createRG("group4");
在 group4 上创建 1 个数据节点副本,节点端口号为 11850 ,数据文件路径为( opt/sequoiadb/database/data/11850/ ),设置日志个数为5;
2)在 group4 上创建1个数据节点副本;
db.getRG("group4").createNode("sdbserver1", 11850, "/opt/sequoiadb/database/data/11850/",{logfilenum:5});
3)将集合 employee 的数据切分一部分到新加入的数据组 grou4 中,使数据分布均匀
启动 group4 数据组;
db.getRG("group4").start();
数据域 company_domain 增加数据组;
db.getDomain("company_domain").addGroups( { Groups: [ 'group4' ] } );
将集合的数据迁移部分到新的数据组上;
db.company.employee.split("group1", "group4", 25);
db.company.employee.split("group2", "group4", 25);
db.company.employee.split("group3", "group4", 25);
安装 SequoiaSQL-MySQL 实例;
./sequoiasql-mysql-3.4-linux_x86_64-installer.run --mode text
查看存储引擎中的集合信息;
db.list(SDB_LIST_COLLECTIONS);
检查 SequoiaSQL-MySQL实例是否已启动。
/opt/sequoiasql/mysql/bin/sdb_sql_ctl status
检查 SequoiaSQL-PostgreSQL实例是否已启动。
/opt/sequoiasql/postgresql/bin/sdb_sql_ctl status
查看 Spark 的 master 和 worker 是否已启动;
jps
设置事务级别及配置为等锁模式;
db.updateConf( { transactionon: true, transisolation: 1, translockwait: true } );
-----------------------------------------------------------------------------------------------创建一个 MySQL 实例,实例名为 myinst,使用的端口为 3306
1)切换 sdbadmin 用户;
su - sdbadmin
2)进入 SequoiaSQL-MySQL 实例安装目录;
cd opt/sequoiasql/mysql
3)创建 myinst 实例;
bin/sdb_sql_ctl addinst myinst -D database/3306/
4)查看实例;
bin/sdb_sql_ctl status
创建 metauser 用户并赋予全局权限,用于创建 SparkSQL 元数据库 metastore,存放元数据信息;
/opt/sequoiasql/mysql/bin/mysql -h 127.0.0.1 -P 3306 -u root
CREATE USER 'metauser'@'%' IDENTIFIED BY 'metauser';
GRANT ALL ON *.* TO 'metauser'@'%';
FLUSH PRIVILEGES;
在 MySQL 实例中创建数据库 company,数据表 employee ( empno INT, ename VARCHAR(128), age INT ),并写入如下数据,然后查询是否存在数据;
(empno:10001, ename:'Georgi', age:48)
CREATE DATABASE company;
USE company;
CREATE TABLE employee
(
empno INT AUTO_INCREMENT PRIMARY KEY,
ename VARCHAR(128),
age INT
);
INSERT INTO employee (empno,ename, age) VALUES (10001,"Georgi", 48);
创建一个 PostgreSQL 实例,实例名为 pginst,使用的端口为 5432,并与 SequoiaDB 巨杉数据库对接
----------------------------------------------------------------------------------------------- 创建 PostgreSQL 实例
1)切换 sdbadmin 用户;
su - sdbadmin
2)进入 SequoiaSQL-PostgreSQL 实例安装目录;
cd opt/sequoiasql/postgresql
3)创建 PostgreSQL 实例;
bin/sdb_sql_ctl addinst pginst -D database/5432/
4)启动 PostgreSQL 实例;
bin/sdb_sql_ctl start pginst
5)检查创建的实例状态;
bin/sdb_sql_ctl status
bin/sdb_sql_ctl createdb company pginst
bin/psql -p 5432 company
CREATE EXTENSION sdb_fdw;
-----------------------------------------------------------------------------------------------PostgreSQL 实例中访问数据
1)创建 company 数据库;
/opt/sequoiasql/postgresql/bin/sdb_sql_ctl createdb company myinst
2)进入 PostgreSQL shell;
/opt/sequoiasql/postgresql/bin/psql -p 5432 company
3)加载SequoiaDB连接驱动;
CREATE EXTENSION sdb_fdw;
4)配置与SequoiaDB连接参数;
CREATE SERVER sdb_server FOREIGN DATA WRAPPER sdb_fdw OPTIONS
(
address '127.0.0.1',
service '11810',
preferedinstance 'A',
transaction 'off'
);
关联存储引擎中的集合
创建 company 数据库外表;
CREATE FOREIGN TABLE employee
(
empno INTEGER,
ename TEXT,
age INTEGER
) SERVER sdb_server
OPTIONS (collectionspace 'company', collection 'employee', decimal 'on');
更新表的统计信息;
ANALYZE employee;
-----------------------------------------------
安装spark
tar -zxvf spark-2.4.4-bin-hadoop2.7.tar.gz -C home/sdbadmin/
ssh-keygen -t rsa
ssh-copy-id sdbadmin@sdbserver1
cd home/sdbadmin/spark-2.4.4-bin-hadoop2.7/conf
cp spark-env.sh.template spark-env.sh
echo "SPARK_MASTER_HOST=sdbserver1" >> spark-env.sh
cat > home/sdbadmin/spark-2.4.4-bin-hadoop2.7/conf/hive-site.xml << EOF
<configuration>
<property>
<name>hive.metastore.schema.verification</name>
<value>false</value>
</property>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://localhost:3306/metastore?useSSL=false</value>
<description>JDBC connect string for a JDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
<description>Driver class name for a JDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>metauser</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>metauser</value>
</property>
<property>
<name>datanucleus.autoCreateSchema</name>
<value>true</value>
<description>creates necessary schema on a startup if one doesn't exist. set this to false, after creating it once</description>
</property>
</configuration>
EOF
home/sdbadmin/spark-2.4.4-bin-hadoop2.7/conf/
cp opt/sequoiadb/spark/spark-sequoiadb_2.11-3.4.jar home/sdbadmin/spark-2.4.4-bin-hadoop2.7/jars/
cp opt/sequoiadb/java/sequoiadb-driver-3.4.jar home/sdbadmin/spark-2.4.4-bin-hadoop2.7/jars/
cp home/sdbadmin/soft/mysql-jdbc.jar home/sdbadmin/spark-2.4.4-bin-hadoop2.7/jars/
cp log4j.properties.template log4j.properties
sed -i 's/log4j.rootCategory=INFO, console/log4j.rootCategory=ERROR, console/g' log4j.properties
cd home/sdbadmin/spark-2.4.4-bin-hadoop2.7
sbin/start-all.sh
bin/spark-sql
CREATE DATABASE company;
USE company;
CREATE TABLE company.employee
(
empno INT,
ename STRING,
age INT
) USING com.sequoiadb.spark OPTIONS (host 'localhost:11810', collectionspace 'company', collection 'employee', username '', password '');
MySQL实例管理
1) 切换到 sdbadmin 用户;
su - sdbadmin
2)创建 MySQL 实例 myinst 并创建 company 数据库以及 employee 数据表( id INT, name VARCHAR(128), age INT );
cd opt/sequoiasql/mysql
bin/sdb_sql_ctl addinst myinst -D database/3306/
bin/sdb_sql_ctl listinst
bin/sdb_sql_ctl status
/opt/sequoiasql/mysql/bin/mysql -h 127.0.0.1 -P 3306 -u root
CREATE DATABASE company;
USE company;
SHOW DATABASES;
CREATE TABLE employee
(
empno INT,
ename VARCHAR(128),
age INT,
PRIMARY KEY (empno)
) ENGINE = sequoiadb COMMENT = "雇员表, sequoiadb: { table_options: { ShardingKey: { 'empno': 1 }, ShardingType: 'hash', 'Compressed': true, 'CompressionType': 'lzw', 'AutoSplit': true, 'EnsureShardingIndex': false } }";
3)在 employee 数据表创建普通索引 name_Index,字段为 name ;
CREATE INDEX name_Index ON employee(ename);
4)查看 SequoiaDB 是否有 name_Index 这个索引;
show index from employee;
5)在 employee 插入信息(id: 10001, name: 'Georgi', age: 48);
INSERT INTO employee VALUES (10001, 'Georgi', 48);
6)开始 MySQL 事务,插入数据(id: 10002, name: 'Bezalel', age: 21),然后查询 employee 表信息,最后提交事务;
begin;
INSERT INTO employee (empno,ename, age) VALUES (10002,"Bezalel", 21);
select * from employee;
commit;
7)开始 MySQL 事务,插入数据(id: 10003, name: 'lazhu', age: 22),然后查询 employee 表信息,不提交然后回滚事务,在进行表信息查询;
begin;
INSERT INTO employee (empno,ename, age) VALUES (10003,"lazhu", 22);
select * from employee;
rollback;
commit;
8)修改实例下的配置文件(database/3306/auto.cnf),加入导入导出路径参数配置(secure_file_priv);
cat >> opt/sequoiasql/mysql/database/3306/auto.cnf << EOF
secure_file_priv = "/opt/sequoiasql/tmp"
EOF
mkdir opt/sequoiasql/tmp
/opt/sequoiasql/mysql/bin/sdb_sql_ctl restart myinst
9)导出 employee 表数据至 opt/sequoiasql/tmp/employee_export.csv下;
SELECT * FROM employee
INTO OUTFILE '/opt/sequoiasql/tmp/employee_export.csv'
FIELDS TERMINATED BY ','
OPTIONALLY ENCLOSED BY '"'
ESCAPED BY '"'
LINES TERMINATED BY '\r\n';
10)新建 employee_import_test 表( id INT, name VARCHAR(128), age INT )并将刚刚导出的数据进行导入到 employee_import_test 表;
USE company;
CREATE TABLE employee_import_test
(
empno INT AUTO_INCREMENT PRIMARY KEY,
ename VARCHAR(128),
age INT
)
ENGINE=sequoiadb COMMENT="雇员表, sequoiadb: { table_options: { ShardingKey: { 'empno': 1 }, ShardingType: 'hash', 'Compressed': true,'CompressionType': 'lzw', 'AutoSplit': true, 'EnsureShardingIndex': false } }";
LOAD DATA INFILE '/opt/sequoiasql/tmp/employee_export.csv'
INTO TABLE employee_import_test
FIELDS TERMINATED BY ','
OPTIONALLY ENCLOSED BY '"'
ESCAPED BY '"'
LINES TERMINATED BY '\r\n';
SELECT * FROM employee_import_test;
11)对 employee 表进行备份,备份路径为 opt/sequoiasql/tmp/employee.sql;
mkdir opt/sequoiasql/tmp
/opt/sequoiasql/mysql/bin/mysqldump -h 127.0.0.1 -u root company employee > opt/sequoiasql/tmp/employee.sql
12)清空 employee 表数据并执行备份恢复操作;
use company;
drop table employee;
source opt/sequoiasql/tmp/employee.sql
-----------------------------------------------------------------------------------------------PostgreSQL实例管理
1)切换到 sdbadmin 用户;
su - sdbadmin;
2)创建 company_domain 数据域,包含 group1、group2、group3 三个复制组;
sdb
var db = new Sdb("localhost", 11810);
db.createDomain("company_domain", [ "group1", "group2", "group3" ], { AutoSplit: true } );
3)创建 company 集合空间,所属 company_domain 数据域;
db.createCS("company", { Domain: "company_domain" } );
4)创建 employee 集合;
db.company.createCL("employee", { "ShardingKey": { "_id": 1 }, "ShardingType": "hash", "ReplSize": -1, "Compressed": true, "CompressionType": "lzw", "AutoSplit": true, "EnsureShardingIndex": false } );
5)创建实例 pginst,端口使用 5432;
cd opt/sequoiasql/postgresql
bin/sdb_sql_ctl delinst myinst
bin/sdb_sql_ctl addinst pginst -D database/5432/
bin/sdb_sql_ctl listinst
bin/sdb_sql_ctl start pginst
bin/sdb_sql_ctl status
6)配置连接 PostgreSQL 实例与 SequoiaDB 巨杉数据库存储引擎(transaction 设置为 on ,默认开启事务);
7)在 PostgreSQL 创建 company 数据库和 employee 外部表 ( id INT, name TEXT, age INT ) 映射到 SequoiaDB 数据库的 company.employee 集合;
/opt/sequoiasql/postgresql/bin/sdb_sql_ctl createdb company pginst
/opt/sequoiasql/postgresql/bin/psql -l
/opt/sequoiasql/postgresql/bin/psql -p 5432 company
CREATE EXTENSION sdb_fdw;
CREATE SERVER sdb_server FOREIGN DATA WRAPPER sdb_fdw
OPTIONS
(
address '127.0.0.1',
service '11810',
user '',
password '',
preferedinstance 'A',
transaction 'on'
);
CREATE FOREIGN TABLE employee
(
id INT,
name TEXT,
age INT
)
SERVER sdb_server
OPTIONS ( collectionspace 'company', collection 'employee', decimal 'on' );
ANALYZE employee;
CREATE FOREIGN TABLE employee_bak
(
id INT,
name TEXT,
age INT
)
SERVER sdb_server
OPTIONS ( collectionspace 'company', collection 'employee', decimal 'on' );
ANALYZE employee;
8)向 employee 表插入数据 ( id: 10001, name: 'Jerry', age: 18 );
INSERT INTO employee VALUES (10001, 'Jerry', 18);
id : 10001 , name : 'Jerry' , age : 18
9)开始事务,向 employee 表插入数据 ( id: 10002, name: 'Tom', age: 20 ),回滚事务,然后查询 employee 表;
BEGIN;
INSERT INTO employee VALUES (10002, 'Tom',20);
ROLLBACK;
10)将 employee 表数据导出到 “/opt/sequoiasql/postgresql/employee.csv”,使用 “\! more opt/sequoiasql/postgresql/employee.csv” 命令查看数据内容;
/opt/sequoiasql/postgresql/bin/psql -p 5432 company
COPY (SELECT * FROM employee) TO '/opt/sequoiasql/postgresql/employee.csv' with delimiter ',' csv;
\! more opt/sequoiasql/postgresql/employee.csv
11)在 company 集合空间创建集合 employee_bak, 并将上一步导出的数据文件导入到 employee_bak 中;
db.company.createCL("employee_bak", { "ShardingKey": { "_id": 1 }, "ShardingType": "hash", "ReplSize": -1, "Compressed": true, "CompressionType": "lzw", "AutoSplit": true, "EnsureShardingIndex": false } );
sdbimprt --hosts=localhost:11810 --type=csv --file=/opt/sequoiasql/postgresql/employee.csv --fields="id int, name string, age int" -c company -l employee_bak
-----------------------------------------------------------------------------------------------SparkSQL 实例管理
1)切换到 sdbadmin 用户;
su - sdbadmin
2)在 SequoiaDB 数据库上创建集合空间 company,集合 employee,写入如下数据:
( empno: 10001, ename: 'Georgi', age: 48 );
( empno: 10002, ename: 'Bezalel', age: 21 );
( empno: 10003, ename: 'Parto', age: 33 );
Note:
该考试环境为单分区三副本,仅创建了数据组 group1
var db = new Sdb("localhost", 11810);
db.createDomain("company_domain", [ "group1"], { AutoSplit: true } );
db.createCS("company", { Domain: "company_domain" } );
db.company.createCL("employee", { "ShardingKey": { "_id": 1 }, "ShardingType": "hash", "ReplSize": -1, "Compressed": true, "CompressionType": "lzw", "AutoSplit": true, "EnsureShardingIndex": false } );
db.company.employee.insert( { empno: 10001, ename: 'Georgi', age: 48 } );
db.company.employee.insert( { empno: 10002, ename: 'Bezalel', age: 21} );
db.company.employee.insert( { empno: 10003, ename: 'Parto', age: 33} );
3)在 MySQL 实例创建数据库 company 和 数据表 employee 与 SequoiaDB 巨杉数据库存储引擎的 employee 集合映射;
/opt/spark/sbin/start-all.sh
/opt/spark/sbin/start-thriftserver.sh
/opt/spark/bin/beeline -u 'jdbc:hive2://localhost:10000'
CREATE DATABASE company;
USE company;
CREATE TABLE employee
(
empno INT,
ename STRING,
age INT
)
USING com.sequoiadb.spark OPTIONS
(
host 'localhost:11810',
collectionspace 'company',
collection 'employee'
);
CREATE <[TEMPORARY] TABLE | TEMPORARY VIEW> <tableName> [(SCHEMA)]
USING com.sequoiadb.spark OPTIONS (<option>, <option>, ...);
4)查看当前 MySQL 实例连接 SequoiaDB 巨杉数据库引擎的协调节点端口是否为11810;
sdblist -l | grep -E "Name|coord"
/opt/sequoiasql/mysql/bin/mysql -h 127.0.0.1 -P 3306 -u root -e "SHOW VARIABLES LIKE 'sequoiadb_conn%';"
5)使用 beeline 客户端连接 SparkSQL 的 thriftserver 服务,创建对应集合空间的数据库 company,对应集合的表 employee;建表要求:
设置连接 SequoiaDB 的协调节点为 21810;
设置 SparkSQL 读取数据时优先选择备节点;
/opt/spark/bin/beeline -u 'jdbc:hive2://localhost:10000'
CREATE DATABASE company;
USE company;
CREATE TABLE employee
(
empno INT,
ename STRING,
age INT
)
USING com.sequoiadb.spark OPTIONS
(
host 'localhost:21810',
collectionspace 'company',
collection 'employee',
preferredinstance 'S',
preferredinstancemode 'random',
preferredinstancestrict true
);
6)查找年龄最小的员工信息并写入 company.result 表中(使用 Spark 结果集创建表的方式把结果存储到巨杉数据库,包含 empno,ename, age 三个字段);
db.company.createCL("result", { "ShardingKey": { "_id": 1 }, "ShardingType": "hash", "ReplSize": -1, "Compressed": true, "CompressionType": "lzw", "AutoSplit": true, "EnsureShardingIndex": false } );
db.company.result.insert( { empno: 10002, ename: 'Bezalel', age: 21} );
7)使用 SELECT 方法查看数据;
------------------------------------------------------------------------分割线-------------------------------------------------------------------------
数据库应用开发
JSON 实例应用
1)创建域 company_domain ,在域下创建集合空间 company , 在集合空间下创建集合 employee ;
var db = new Sdb("localhost", 11810);
db.createDomain("company_domain", [ "group1"], { AutoSplit: true } );
db.createCS("company", { Domain: "company_domain" } );
db.company.createCL("employee", { "ShardingKey": { "_id": 1 }, "ShardingType": "hash", "ReplSize": -1, "Compressed": true, "CompressionType": "lzw", "AutoSplit": true, "EnsureShardingIndex": false } );
2)给创建的 employee 集合插入下面 6 条数据;
db.company.employee.insert( { "empno": 10001, "ename": "Georgi", "age": 48 } );
db.company.employee.insert( { "empno": 10002, "ename": "Bezalel", "age": 21 } );
db.company.employee.insert( { "empno": 10003, "ename": "Parto", "age": 33 } );
db.company.employee.insert( { "empno": 10004, "ename": "Chirstian", "age": 40 } );
db.company.employee.insert( { "empno": 10005, "ename": "Kyoichi", "age": 23 } );
db.company.employee.insert( { "empno": 10006, "ename": "Anneke", "age": 19 } );
3)连接查询集合 employee 中age 大于20,小于30的数据;
db.company.employee.find( { "age": { "$gt": 20, "$lt": 30 } } );
4)将集合 employee 中 empno 为 10001 的记录的 age 更改为34;
db.company.employee.update( { "$set": { "age": 34 } }, { "empno": 10001 } );
5)删除集合 employee 中 empno 为 10006 的记录删除;
db.company.employee.remove( { "empno": 10006 } );
SequoiaFS 实例应用
1) 创建 fs_domain 域使用,在此域下创建集合空间 fscs,在此集合空间下创建集合 fscl ;
var db = new Sdb("localhost", 11810);
db.createDomain("fs_domain", [ "group1", "group2", "group3" ], { AutoSplit: true } );
db.createCS("fscs", { Domain: "fs_domain" } );
db.fscs.createCL("fscl", { "ShardingKey": { "_id": 1}, "ShardingType": "hash", "ReplSize": -1, "Compressed": true, "CompressionType": "lzw", "AutoSplit": true, "EnsureShardingIndex": false } );
2)创建挂载点目录" opt/sequoiadb/sequoiafs/mountpoint ";
mkdir -p opt/sequoiadb/sequoiafs/mountpoint
3) 创建 SequoiaFS 的配置文件夹" opt/sequoiadb/sequoiafs/conf/fscs_fscl/001/ "和日志目录" opt/sequoiadb/sequoiafs/log/fscs_fscl/001/ " ;
mkdir -p opt/sequoiadb/sequoiafs/conf/fscs_fscl/001/
mkdir -p opt/sequoiadb/sequoiafs/log/fscs_fscl/001/
4) 生成一个空的配置文件,SequoiaFS 服务在启动时会将指定的值写入该文件中,其他参数使用缺省值" opt/sequoiadb/sequoiafs/conf/fscs_fscl/001/sequoiafs.conf " ;
touch opt/sequoiadb/sequoiafs/conf/fscs_fscl/001/sequoiafs.conf
5)启动 SequoiaFS 服务,挂载上面创建的目录(可参考学习章节 “启动 SequoiaFS 服务” 相关内容 );
sequoiafs opt/sequoiadb/sequoiafs/mountpoint -i localhost:11810 -l fscs.fscl --autocreate -c opt/sequoiadb/sequoiafs/conf/fscs_fscl/001/ --diagpath opt/sequoiadb/sequoiafs/log/fscs_fscl/001/ -o big_writes -o max_write=131072 -o max_read=131072
6) 在 mount 目录下创建子目录 fsdir ,且在这个目录下创建文件 fsfile.txt 向文件 fsfile.txt 写入 " hello, this is a fsfile ! ";
cd opt/sequoiadb/sequoiafs/mountpoint/
mkdir fsdir
ls -trl
cd opt/sequoiadb/sequoiafs/mountpoint/fsdir
echo 'hello, this is a fsfile!' >> fsfile.txt
cat fsfile.txt
7) 在数据库检查是否将文件夹中的文件挂载上去 ;
var db = new Sdb("localhost", 11810);
db.list(SDB_LIST_COLLECTIONS);
db.sequoiafs.maphistory.find();
S3 对象存储部署
1)开启 RC 级别事务为读已提交,且配置为等锁模式;
sdb
var db = new Sdb("localhost", 11810);
db.updateConf( { transactionon: true, transisolation: 1, translockwait: true } );
2)创建元数据逻辑域;
db.createDomain("metaDomain", [ "group1", "group2", "group3" ], { AutoSplit: true} );
3)创建对象数据逻辑域;
db.createDomain("dataDomain", [ "group1", "group2", "group3" ], { AutoSplit: true} );
4)配置启动 SequoiaS3 实例;
cd opt/sequoiadb/tools/sequoias3
echo 'server.port=8002' >> config/application.properties
echo 'sdbs3.sequoiadb.url=sequoiadb://localhost:11810' >> config/application.properties
echo 'sdbs3.sequoiadb.meta.domain=metaDomain' >> config/application.properties
echo 'sdbs3.sequoiadb.data.domain=dataDomain' >> config/application.properties
/opt/sequoiadb/tools/sequoias3/sequoias3.sh start
5)创建桶 exambucket;
curl -v -X PUT "http://localhost:8002/exambucket" -H "Host: localhost:8002" -H "Authorization: AWS ABCDEFGHIJKLMNOPQRST:abcdefghijklmnopqrstuvwxyz0123456789ABCD"
6)向 exambucket 中写入文件"/opt/sequoiadb/version.conf",命名为"version.conf";
ls opt/sequoiadb/version.conf
ls opt/sequoiadb/tools/sequoias3/sequoia-s3-3.4.jar
curl -X PUT -T "/opt/sequoiadb/version.conf" "http://localhost:8002/exambucket/version.conf" -H "Host: localhost:8002" -H "Authorization: AWS ABCDEFGHIJKLMNOPQRST:abcdefghijklmnopqrstuvwxyz0123456789ABCD" -H "Content-Type: text/plain"
var db = new Sdb("localhost", 11810);
db.S3_SYS_Meta.S3_ObjectMeta.find( { "Key": "version.conf" } );
7)从桶 exambucket 中下载文件对象 "version.conf" 保存为 home/sdbadmin/version.conf ;
curl -o version.conf -X GET "http://localhost:8002/exambucket/version.conf" -H "Host: localhost:8002" -H "Authorization: AWS ABCDEFGHIJKLMNOPQRST:abcdefghijklmnopqrstuvwxyz0123456789ABCD" -H "Content-Type: text/plain"
删除桶
curl -X DELETE "http://localhost:8002/sdbbucket/version.conf" -H "Host: localhost:8002" -H "Authorization: AWS ABCDEFGHIJKLMNOPQRST:abcdefghijklmnopqrstuvwxyz0123456789ABCD"
curl -X DELETE "http://localhost:8002/sdbbucket/sdbs3.jar" -H "Host: localhost:8002" -H "Authorization: AWS ABCDEFGHIJKLMNOPQRST:abcdefghijklmnopqrstuvwxyz0123456789ABCD"
curl -v -X DELETE "http://localhost:8002/sdbbucket" -H "Host: localhost:8002" -H "Authorization: AWS ABCDEFGHIJKLMNOPQRST:abcdefghijklmnopqrstuvwxyz0123456789ABCD"
-----------------------------------------------------------------------------------------------1)新增数据节点组 group4 并在该数据组中创建 11850 数据节点;
sdb
var db=new Sdb("localhost",11810);
var dataRG = db.createRG("group4");
dataRG.createNode("sdbserver1", 11850, "/opt/sequoiadb/database/data/11850/", { logfilenum: 5, transactionon: true } );
dataRG.start();
2)创建域 company_domain,其中域包含有 group1,group2,group3;
db.createDomain("company_domain", [ "group1", "group2", "group3" ], { AutoSplit: true } );
3)创建集合空间 company,存放于 company_domain 域中;
db.createCS("company", { Domain: "company_domain" } );
4)在集合空间 company 创建 ReplSize 为 0 的强一致集合 employee;
db.company.createCL("employee", { "ShardingKey": { "_id": 1 }, "ShardingType": "hash", "ReplSize": 0, "Compressed": true, "CompressionType": "lzw", "AutoSplit": true, "EnsureShardingIndex": false } );
5)在 company.employee 表中插入数据为(empno:10001, ename:'Georgi', age:48);
db.company.employee.insert( { empno:10001, ename:'Georgi', age:48 } );
6)在 company 集合空间中创建以 tx_time 为分区键,字段类型为 date 的主集合 log;
db.company.createCL("log", { "IsMainCL": true, "ShardingKey": {"tx_time": 1 }, "ShardingType": "range" } );
7)分别创建 year2020.log 和 year2021.log 子集合,year2020 和year2021 为集合空间名。两个子集合的 hash 分区键字段为 serial_no(log表中的主键字段,为序列号)。并将两个子集合挂载到主集合 company.log,子集合 year2020.log 保存小于 2021年的数据,而子集合 year2021.log 保存大于等于 2021 年的数据;
db.createCS("year2020", { "Domain": "company_domain" } );
db.createCS("year2021", { "Domain": "company_domain" } );
db.year2020.createCL("log", { "ShardingKey": { "serial_no": 1 }, "ShardingType": "hash", "ReplSize": -1, "Compressed": true, "CompressionType": "lzw", "AutoSplit": true, "EnsureShardingIndex": false } );
db.year2021.createCL("log", { "ShardingKey": { "serial_no": 1 }, "ShardingType": "hash", "ReplSize": -1, "Compressed": true, "CompressionType": "lzw", "AutoSplit": true, "EnsureShardingIndex": false } );
db.company.log.attachCL("year2020.log", { "LowBound": { "tx_time": MinKey() }, UpBound: { tx_time: { "$date": "2021-01-01" } } } );
db.company.log.attachCL("year2021.log", { LowBound: { "tx_time": { "$date": "2021-01-01" } }, "UpBound": { "tx_time": MaxKey() } } );
db.company.log.insert( { "serial_no": 1, "order_id": "20200302001", "bus_pay_no": "7312a297-21b4-1328-7834-ss21a251708", "tx_time": { "$date": "2020-03-02" } } );
db.company.log.insert( { "serial_no": 2, "order_id": "20210101008", "bus_pay_no": "4321a297-15b4-4528-9034-cc21a256708", "tx_time": { "$date": "2021-01-01" } } );
db.year2020.log.count();
db.year2021.log.count();
8)用 snapshot 中的集合快照查看 company.employee 的集合并输出查询结果到 home/sdbadmin/snap_collection.log 文件中;
sdb 'var db=new Sdb("localhost", 11810)'
sdb 'db.snapshot(SDB_SNAP_COLLECTIONS, { Name: "company.employee" } )' > home/sdbadmin/snap_collection.log
9)用 snapshot 中的配置快照查看集群中所有协调节点、编目节点和数据节点的诊断日志路径并输出查询结果到 home/sdbadmin/snap_diagpath.log 文件中;
sdb 'var db=new Sdb("localhost", 11810)'
sdb 'db.snapshot(SDB_SNAP_CONFIGS)' > home/sdbadmin/snap_diagpath.log
-----------------------------------------------------------------------------------------------1)通过 SequoiaSQL-MySQL 实例创建 company 数据库以及数据表 employee,字段为(empno INT, ename VARCHAR(128), age INT),其中 empno 为主键自增;
/opt/sequoiasql/mysql/bin/mysql -h 127.0.0.1 -P 3306 -u root
CREATE DATABASE company;
USE company;
CREATE TABLE employee
(
empno INT AUTO_INCREMENT PRIMARY KEY,
ename VARCHAR(128),
age INT
);
2)写入表 company.employee 数据值为(empno:10001, ename:"Jacky", age:36);
INSERT INTO employee (empno,ename, age) VALUES (10001,"Jacky", 36);
3)在 SequoiaDB Shell 中设置事务隔离级别为 RC(读已提交);
db.updateConf( { transisolation: 1 }, { Global: true } );
4)登录 MySQL,开始事务,插入数据(ename:'lazhu', age:22),其中 empno 字段为自增字段,无需填写,执行插入后并提交事务。
BEGIN;
INSERT INTO employee (ename, age) VALUES ("lazhu", 22);
COMMIT;
5)开始 MySQL 事务,更新 empno:10002 的记录年龄为 20( age:20 )并提交事务;
BEGIN;
UPDATE company.employee SET age = 20 WHERE empno=10002 ;
COMMIT;
6)关闭 31820 数据节点;
sdbstop -p 31820
sdblist -t all -l -m local
7)开始 MySQL 事务,插入数据( empno:10003, ename:'Susan', age:32 )并提交事务;
/opt/sequoiasql/mysql/bin/mysql -h 127.0.0.1 -P 3306 -u root
BEGIN;
INSERT INTO employee ( empno,ename, age) VALUES (10003,"Susan", 32);
COMMIT;
8)启动 31820 数据节点;
sdbstart -p 31820
9)修改 11820 数据节点实例 id 为 1,修改 21820 数据节点实例 id 为 2,配置 11810 协调节点读取数据时的读取策略为优先使用数据节点实例 2,后使用数据节点实例 1 ,重启节点使参数生效
SHOW VARIABLES LIKE '%sequoiadb_conn_addr%';
sdb;
var db = new Sdb("localhost", 11810);
sdbstart -p 21820
db.updateConf( { instanceid: 1 }, { svcname: { "$in": [ "11820" ] } } );
db.updateConf( { instanceid: 2 }, { svcname: { "$in": [ "21820" ] } } );
db.updateConf( { preferedinstance: "2,1", preferedinstancemode: "ordered", preferedstrict: true }, { GroupName: "SYSCoord", svcname: "11810" } );
sdbstop -r data
sdbstart -r data




