准备软硬件安装环境
环境信息:
OpenGauss版本:2.1.0 (GaussDB Kernel V500R002C10 build 6ac75df9)
OS版本:Kylin Linux Advanced Server release V10 (Tercel)
192.168.0.11 node1
192.168.0.12 node2
192.168.0.13 node3
资源大小:8c、32g
1. 配置互信
-- 配置域名解析
[root@node1 ~]# vim /etc/hosts
......
192.168.0.11 node1
192.168.0.12 node2
192.168.0.13 node3
[root@node1 ~]# for ip in `cat /etc/hosts | grep -i node | awk '{print $1}'`
do
echo $ip
scp /etc/hosts $ip:/etc/
done
-- 在所有节点执行:
[root@node1 ~]# ssh-keygen -t rsa -P "" -f ~/.ssh/id_rsa
[root@node2 ~]# ssh-keygen -t rsa -P "" -f ~/.ssh/id_rsa
[root@node3 ~]# ssh-keygen -t rsa -P "" -f ~/.ssh/id_rsa
-- 将所有节点的公钥文件内容拷贝到authorized_keys文件中
[root@node1 ~]# cat .ssh/id_rsa.pub >> .ssh/authorized_keys
[root@node1 ~]# vim .ssh/authorized_keys
[root@node2 ~]# cat .ssh/id_rsa.pub
[root@node3 ~]# cat .ssh/id_rsa.pub
-- 传送至其他所有节点
[root@node1 ~]# for ip in `cat /etc/hosts | grep -i node | awk '{print $1}'`
do
echo $ip
scp /root/.ssh/authorized_keys $ip:/root/.ssh/
done2. 开启允许root用户ssh远程登录
-- 所有节点修改SSH配置文件,开启root用户登录ssh功能(以node1节点为例)
[root@node1 ~]# vim /etc/ssh/sshd_config
......
PermitRootLogin yes
[root@node1 ~]# service sshd restart3. 关闭所有节点的防火墙和Selinux
[root@node1 ~]# for ip in `cat /etc/hosts | grep -i node | awk '{print $1}'`
do
echo $ip
ssh $ip "systemctl stop firewalld.service"
ssh $ip "systemctl disable firewalld.service"
done
-- 临时关闭selinux
[root@node1 ~]# for ip in `cat /etc/hosts | grep -i node | awk '{print $1}'`
do
echo $ip
ssh $ip "setenforce 0"
ssh $ip "sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config"
done4. 设置字符集参数
[root@node1 ~]# echo $LANG
zh_CN.UTF-8
[root@node2 ~]# echo $LANG
zh_CN.UTF-8
[root@node3 ~]# echo $LANG
zh_CN.UTF-8
[root@node1 ~]# echo "LANG=en_US.UTF-8" >> /etc/profile
[root@node1 ~]# source /etc/profile
[root@node1 ~]# echo $LANG
en_US.UTF-8
[root@node2 ~]# echo "LANG=en_US.UTF-8" >> /etc/profile
[root@node2 ~]# source /etc/profile
[root@node2 ~]# echo $LANG
en_US.UTF-8
[root@node3 ~]# echo "LANG=en_US.UTF-8" >> /etc/profile
[root@node3 ~]# source /etc/profile
[root@node3 ~]# echo $LANG
en_US.UTF-85. 检查时区和时间为:Asia/Shanghai
[root@node1 ~]# ll /etc/localtime
lrwxrwxrwx. 1 root root 35 Mar 18 17:53 /etc/localtime -> ../usr/share/zoneinfo/Asia/Shanghai
[root@node2 ~]# ll /etc/localtime
lrwxrwxrwx. 1 root root 35 Mar 18 17:53 /etc/localtime -> ../usr/share/zoneinfo/Asia/Shanghai
[root@node3 ~]# ll /etc/localtime
lrwxrwxrwx. 1 root root 35 Mar 18 17:53 /etc/localtime -> ../usr/share/zoneinfo/Asia/Shanghai6. 关闭 swap 交换内存
[root@node1 ~]# swapoff -a
[root@node2 ~]# swapoff -a
[root@node3 ~]# swapoff -a7. 设置网卡 MTU 值
-- 以node1节点为例
[root@node1 ~]# ifconfig eth0 mtu 8192
-- 配置开机自动配置
[root@node1 ~]# echo "ifconfig eth0 mtu 8192" >> /etc/rc.local
-- 给予执行权限
[root@node1 ~]# chmod +x /etc/rc.local8. 配置本地yum源
-- 上传Euler镜像文件到/tmp目录并挂载
[root@node1 ~]# mount -o loop /tmp/Kylin-Server-10-SP1-Release-Build04-20200711-arm64.iso /mnt
mount: /dev/loop0 is write-protected,mounting read-only
[root@node1 ~]# cat /etc/yum.repos.d/EulerOS.repo
[base]
name=EulerOS-2.0SP5 base
baseurl=file:///mnt/
gpgcheck=1
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EulerOS9. 安装依赖包
[root@node1 ~]# for ip in `cat /etc/hosts | grep -i node | awk '{print $1}'`
do
echo $ip
ssh $ip "yum -y install bzip2 libaio-devel flex bison ncurses-devel glibc-devel patch expect"
done10. 配置操作系统其他参数
[root@node1 ~]# cp /etc/sysctl.conf /etc/sysctl.conf_bak
[root@node2 ~]# cp /etc/sysctl.conf /etc/sysctl.conf_bak
[root@node3 ~]# cp /etc/sysctl.conf /etc/sysctl.conf_bak
[root@node1 ~]# vim /etc/sysctl.conf
net.ipv4.tcp_max_tw_buckets = 10000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_keepalive_time = 30
net.ipv4.tcp_keepalive_probes = 9
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_retries1 = 5
net.ipv4.tcp_syn_retries = 5
net.ipv4.tcp_synack_retries = 5
net.ipv4.tcp_retries2 = 12
vm.overcommit_memory = 0
net.ipv4.tcp_rmem = 8192 250000 16777216
net.ipv4.tcp_wmem = 8192 250000 16777216
net.core.wmem_max = 21299200
net.core.rmem_max = 21299200
net.core.wmem_default = 21299200
net.core.rmem_default = 21299200
net.ipv4.ip_local_port_range = 26000 65535
kernel.sem = 250 6400000 1000 25600
vm.min_free_kbytes = 1638181
net.core.somaxconn = 65535
net.ipv4.tcp_syncookies = 1
net.core.netdev_max_backlog = 65535
net.ipv4.tcp_max_syn_backlog = 65535
net.ipv4.tcp_fin_timeout = 60
kernel.shmall = 1152921504606846720
kernel.shmmax = 18446744073709551615
net.ipv4.tcp_sack = 1
net.ipv4.tcp_timestamps = 1
vm.extfrag_threshold = 500
vm.overcommit_ratio = 90
fs.suid_dumpable = 1
kernel.core_uses_pid = 1
kernel.core_pattern = /data1/core/core-%e-%u-%s-%t-%h
[root@node1 ~]# for ip in `cat /etc/hosts | grep -i node | awk '{print $1}'`
do
echo $ip
scp /etc/sysctl.conf $ip:/etc/
ssh $ip "sysctl -p"
done11. core文件配置
[root@node1 ~]# vim core.sh
ulimit -c unlimited
mkdir -p /data1/core
chmod -R 777 /data1/core/
echo "* soft core unlimited" >> /etc/security/limits.conf
echo "* soft nofile 1000000" >> /etc/security/limits.conf
echo "* hard nofile 1000000" >> /etc/security/limits.conf
[root@node1 ~]# for ip in `cat /etc/hosts | grep -i node | awk '{print $1}'`
do
echo $ip
scp core.sh $ip:~/
ssh $ip "sh ~/core.sh"
done
[root@node1 ~]# cat /proc/sys/kernel/core_pattern
/data1/core/core-%e-%u-%s-%t-%h
[root@node2 ~]# cat /proc/sys/kernel/core_pattern
/data1/core/core-%e-%u-%s-%t-%h
[root@node3 ~]# cat /proc/sys/kernel/core_pattern
/data1/core/core-%e-%u-%s-%t-%h12. 系统支持的最大进程数设置
[root@node1 ~]# vim /etc/security/limits.d/20-nproc.conf
* soft nproc 60000
[root@node2 ~]# vim /etc/security/limits.d/20-nproc.conf
* soft nproc 60000
[root@node3 ~]# vim /etc/security/limits.d/20-nproc.conf
* soft nproc 6000013. 关闭透明大页
-- 在grub文件GRUB_CMDLINE_LINUX项中追加transparent_hugepage=never(以node1节点为例)
[root@node1 ~]# vim /etc/default/grub
......
GRUB_CMDLINE_LINUX="... transparent_hugepage=never"
[root@node2 ~]# grub2-mkconfig -o /boot/efi/EFI/kylin/grub.cfg
Generating grub configuration file ...
Found linux image: /boot/vmlinuz-4.19.90-17.5.ky10.aarch64
Found initrd image: /boot/initramfs-4.19.90-17.5.ky10.aarch64.img
Found linux image: /boot/vmlinuz-4.19.90-17.ky10.aarch64
Found initrd image: /boot/initramfs-4.19.90-17.ky10.aarch64.img
Found linux image: /boot/vmlinuz-0-rescue-84f6d348922d49a083d71be73f385d3f
Found initrd image: /boot/initramfs-0-rescue-84f6d348922d49a083d71be73f385d3f.img
Adding boot menu entry for EFI firmware configuration
done
-- 重启生效
[root@node1 ~]# reboot
[root@node1 ~]# cat /proc/cmdline
...... transparent_hugepage=never14. 创建XML配置文件
[root@node1 ~]# vim clusterconfig.xml
<?xml version="1.0" encoding="utf-8"?>
<ROOT>
<!--集群级整体信息-->
<CLUSTER>
<PARAM name="clusterName" value="mppdbCluster" />
<PARAM name="nodeNames" value="node1,node2,node3" />
<PARAM name="clusterRings" value="node1,node2,node3" />
<PARAM name="gaussdbAppPath" value="/opt/huawei/gaussdb/gauss" />
<PARAM name="tmpMppdbPath" value="/opt/huawei/temp/tmp" />
<PARAM name="gaussdbLogPath" value="/var/log/gaussdb" />
<PARAM name="gaussdbToolPath" value="/opt/huawei/wisequery" />
<PARAM name="clusterType" value="single-primary-multi-standby"/>
</CLUSTER>
<!--每台服务器上的节点部署信息-->
<DEVICELIST>
<!--1000001服务器上的节点部署信息-->
<DEVICE sn="1000001">
<PARAM name="name" value="node1"/>
<PARAM name="azName" value="AZ1"/>
<PARAM name="azPriority" value="1"/>
<!-- 如果服务器只有一个网卡可用,将backIP1和sshIP1配置成同一个IP -->
<PARAM name="backIp1" value="192.168.0.11" />
<PARAM name="sshIp1" value="192.168.0.11"/>
<!--CM节点部署信息-->
<PARAM name="cmsNum" value="1"/>
<PARAM name="cmServerPortBase" value="25000"/>
<PARAM name="cmServerListenIp1" value="192.168.0.11,192.168.0.12,192.168.0.13"/>
<PARAM name="cmServerHaIp1" value="192.168.0.11,192.168.0.12,192.168.0.13"/>
<PARAM name="cmServerlevel" value="1"/>
<PARAM name="cmServerRelation"
value="node1,node2,node3"/>
<PARAM name="cmDir" value="/data1/cmserver"/>
<!--CN节点部署信息-->
<PARAM name="cooNum" value="1"/>
<PARAM name="cooPortBase" value="6000"/>
<PARAM name="cooListenIp1" value="192.168.0.11"/>
<PARAM name="cooDir1" value="/data1/coordinator"/>
<!-- etcd -->
<PARAM name="etcdNum" value="1"/>
<PARAM name="etcdListenPort" value="16000"/>
<PARAM name="etcdHaPort" value="17000"/>
<PARAM name="etcdListenIp1" value="192.168.0.11"/>
<PARAM name="etcdHaIp1" value="192.168.0.11"/>
<PARAM name="etcdDir1" value="/data1/etcd"/>
<!-- dn -->
<PARAM name="dataNum" value="1"/>
<PARAM name="dataPortBase" value="40000"/>
<PARAM name="dataNode1" value="/data1/master1,node2,/data1/slave11,node3,/data1/slave12"/>
<PARAM name="dataNodeXlogPath1" value="/data1/master1_xlog,/data1/xlog/slave11_xlog,/data1/xlog/slave12_xlog"/>
</DEVICE>
<!--1000002服务器上的节点部署信息-->
<DEVICE sn="1000002">
<PARAM name="name" value="node2"/>
<PARAM name="azName" value="AZ1"/>
<PARAM name="azPriority" value="1"/>
<!-- 如果服务器只有一个网卡可用,将backIP1和sshIP1配置成同一个IP -->
<PARAM name="backIp1" value="192.168.0.12"/>
<PARAM name="sshIp1" value="192.168.0.12"/>
<!-- cm -->
<PARAM name="cmServerPortStandby" value="25500"/>
<PARAM name="cmDir" value="/data1/cmserver"/>
<!--GTM主节点部署信息-->
<PARAM name="gtmNum" value="1" />
<PARAM name="gtmPortBase" value="15000" />
<PARAM name="gtmListenIp1" value="192.168.0.12,192.168.0.13,192.168.0.11" />
<PARAM name="gtmHaIp1" value="192.168.0.12,192.168.0.13,192.168.0.11" />
<PARAM name="gtmDir1" value="/data1/gtm,node3,/data1/gtm,node1,/data1/gtm" />
<PARAM name="gtmRelation" value="node2,node3,node1" />
<!-- cn -->
<PARAM name="cooNum" value="1"/>
<PARAM name="cooPortBase" value="6000"/>
<PARAM name="cooListenIp1" value="192.168.0.12"/>
<PARAM name="cooDir1" value="/data1/coordinator"/>
<!-- etcd -->
<PARAM name="etcdNum" value="1"/>
<PARAM name="etcdListenPort" value="16000"/>
<PARAM name="etcdHaPort" value="17000"/>
<param name="etcdListenIp1" value="192.168.0.12"/>
<PARAM name="etcdHaIp1" value="192.168.0.12"/>
<PARAM name="etcdDir1" value="/data1/etcd"/>
<!-- dn -->
<PARAM name="dataNum" value="1"/>
<PARAM name="dataPortBase" value="40200"/>
<PARAM name="dataNode1" value="/data1/master2,node3,/data1/slave21,node1,/data1/slave22"/>
<PARAM name="dataNodeXlogPath1" value="/data1/xlog/master2_xlog,/data1/xlog/slave21_xlog,/data1/xlog/slave22_xlog"/>
</DEVICE>
<!-- plat3上的节点部署信息 -->
<DEVICE sn="1000003">
<PARAM name="name" value="node3"/>
<PARAM name="azName" value="AZ1"/>
<PARAM name="azPriority" value="1"/>
<!-- 如果服务器只有一个网卡可用,将backIP1和sshIP1配置成同一个IP -->
<PARAM name="backIp1" value="192.168.0.13"/>
<PARAM name="sshIp1" value="192.168.0.13"/>
<!-- etcd -->
<PARAM name="etcdNum" value="1"/>
<PARAM name="etcdListenPort" value="16000"/>
<PARAM name="etcdHaPort" value="17000"/>
<param name="etcdListenIp1" value="192.168.0.13"/>
<PARAM name="etcdHaIp1" value="192.168.0.13"/>
<PARAM name="etcdDir1" value="/data1/etcd"/>
<!-- cm -->
<PARAM name="cmServerPortStandby" value="25500"/>
<PARAM name="cmDir" value="/data1/cmserver"/>
<!-- cn -->
<PARAM name="cooNum" value="0"/>
<PARAM name="cooPortBase" value="6000"/>
<PARAM name="cooListenIp1" value="192.168.0.13"/>
<PARAM name="cooDir1" value="/data1/coordinator"/>
<!-- dn -->
<PARAM name="dataNum" value="1"/>
<PARAM name="dataPortBase" value="40400"/>
<PARAM name="dataNode1" value="/data1/master3,node1,/data1/slave31,node2,/data1/slave32"/>
<PARAM name="dataNodeXlogPath1" value="/data1/xlog/master3_xlog,/data1/xlog/slave31_xlog,/data1/xlog/slave32_xlog"/>
</DEVICE>
</DEVICELIST>
</ROOT>15. 配置openGauss安装包
-- 使用root用户创建目录/opt/software/GaussDB_Kernel
[root@node1 ~]# mkdir -p /opt/software/GaussDB_Kernel
[root@node1 ~]# chmod 700 -R /opt/software
-- 将安装包和XML配置文件上传到该目录下,并解压安装包
[root@node1 ~]# mv clusterconfig.xml /opt/software/GaussDB_Kernel/
[root@node1 ~]# cd /opt/software/GaussDB_Kernel/
[root@node1 GaussDB_Kernel]# tar -zxf GaussDB_Kernel_V500R002C10B027_ARM_Distributed_SERVER_PACKAGE.tar.gz
[root@node1 GaussDB_Kernel]# tar -xzf GaussDB-Kernel-V500R002C10-KYLIN-64bit.tar.gz16. 使用root用户预安装
[root@node1 ~]# cd /opt/software/GaussDB_Kernel/script/
[root@node1 script]# ./gs_preinstall -U omm -G dbgrp -X /opt/software/GaussDB_Kernel/clusterconfig.xml --alarm-type=1
......
Are you sure you want to create trust for root (yes/no)?yes
Please enter password for root
Password: -- 输入root用户密码
......
Are you sure you want to create the user[omm] and create trust for it (yes/no)? yes
Please enter password for cluster user.
Password: -- 输入omm用户密码
Please enter password for cluster user again.
Password: -- 输入omm用户密码
......
Please enter password for current user[omm].
Password: -- 输入omm用户密码
......
-- 如果报如下进入目录无权限的问题,则赋予数据目录data1的权限给omm
Exception: [FAILURE] node1:
[GAUSS-50102] : The /data1/cmserver is not writable for omm.
-bash: line 0: cd: /data1/cmserver: Permission denied
[FAILURE] node2:
[GAUSS-50102] : The /data1/cmserver is not writable for omm.
-bash: line 0: cd: /data1/cmserver: Permission denied
[FAILURE] node3:
[GAUSS-50102] : The /data1/cmserver is not writable for omm.
-bash: line 0: cd: /data1/cmserver: Permission denied
[root@node1 ~]# for ip in `cat /etc/hosts | grep -i node | awk '{print $1}'`
do
echo $ip
ssh $ip "chown -R omm:dbgrp /data1"
done
-- 再次执行
[root@node1 script]# ./gs_preinstall -U omm -G dbgrp -X /opt/software/GaussDB_Kernel/clusterconfig.xml --alarm-type=1
-- 显示如下信息则为安装预安装成功
Successfully set finish flag.
Preinstallation succeeded.17. 使用omm用户正式安装gaussdb
[root@node1 ~]# chmod -R 700 /opt/software
[root@node1 ~]# chown -R omm:dbgrp /opt/software
[root@node1 ~]# su - omm
[omm@node1 ~]$ gs_install -X /opt/software/GaussDB_Kernel/clusterconfig.xml
......
-- 显示如下信息则为集群安装完成
Successfully installed application.18. 检查集群状态
[omm@node1 ~]$ cm_ctl query -Cv
[ CMServer State ]
node instance state
-------------------------
1 node1 1 Primary
2 node2 2 Standby
3 node3 3 Standby
[ ETCD State ]
node instance state
-------------------------------
1 node1 7001 StateFollower
2 node2 7002 StateLeader
3 node3 7003 StateFollower
[ Cluster State ]
cluster_state : Normal
redistributing : No
balanced : Yes
current_az : AZ_ALL
[ Coordinator State ]
node instance state
-------------------------
1 node1 5001 Normal
2 node2 5002 Normal
[ Central Coordinator State ]
node instance state
-------------------------
1 node1 5001 Normal
[ GTM State ]
node instance state sync_state
---------------------------------------------------------
1 node1 1003 S Standby Connection ok Sync
2 node2 1001 P Primary Connection ok Sync
3 node3 1002 S Standby Connection ok Sync
[ Datanode State ]
node instance state | node instance state | node instance state
---------------------------------------------------------------------------------------------------------------
1 node1 6001 P Primary Normal | 2 node2 6002 S Standby Normal | 3 node3 6003 S Standby Normal
2 node2 6004 P Primary Normal | 3 node3 6005 S Standby Normal | 1 node1 6006 S Standby Normal
3 node3 6007 P Primary Normal | 1 node1 6008 S Standby Normal | 2 node2 6009 S Standby Normal19. 使用gsql命令连接gaussdb
[omm@node1 ~]$ gsql -d postgres -p 6000 -r
gsql ((GaussDB Kernel V500R002C10 build 6ac75df9) compiled at 2022-03-21 08:45:19 commit 3662 last mr 7734 release)
Non-SSL connection (SSL connection is recommended when requiring high-security)
Type "help" for help.
openGauss=# \l
ERROR: Please use "ALTER ROLE "omm" PASSWORD 'password';" to set the password of the user before other operations!
openGauss=# ALTER ROLE "omm" PASSWORD 'Huawei@123';
ALTER ROLE
openGauss=# \l
List of databases
Name | Owner | Encoding | Collate | Ctype | Access privileges
-----------+-------+-----------+---------+-------+-------------------
postgres | omm | SQL_ASCII | C | C |
template0 | omm | SQL_ASCII | C | C | =c/omm +
| | | | | omm=CTc/omm
template1 | omm | SQL_ASCII | C | C | =c/omm +
| | | | | omm=CTc/omm
(3 rows)20. 创建数据库和用户
openGauss=# create database tpcc encoding 'utf-8';
CREATE DATABASE
openGauss=# \c tpcc;
tpcc=# create user testuser with password 'Huawei@123';
CREATE ROLE
tpcc=# \q
[omm@node1 ~]$ gsql -U testuser -d tpcc -p 6000 -r
Password for user testuser:
gsql ((GaussDB Kernel V500R002C10 build 6ac75df9) compiled at 2022-03-21 08:45:19 commit 3662 last mr 7734 release)
Non-SSL connection (SSL connection is recommended when requiring high-security)
Type "help" for help.
tpcc=> \dn
List of schemas
Name | Owner
----------------------+----------
......
testuser | testuser21. 配置开通远程连接权限(白名单)
-- 使用omm用户登录其中一台节点并执行
[omm@node1 ~]$ gs_guc reload -Z coordinator -Z datanode -Nall -I all -h "host all replication 0.0.0.0/0 sha256"
......
-- 输出如下信息则为修改成功
ALL: Success to perform gs_guc!
[omm@node1 ~]$ gs_guc reload -Z coordinator -Z datanode -Nall -I all -h "host all all 0.0.0.0/0 sha256"
......
-- 输出如下信息则为修改成功
ALL: Success to perform gs_guc!22. 关闭自动剔除故障CN(白名单)
-- 测高可用场景建议执行该操作关闭自动剔除故障CN
[omm@node1 ~]$ gs_guc set -Z cmserver -N all -I all -c"coordinator_heartbeat_timeout=0"
......
-- 输出如下信息则为修改成功
ALL: Success to perform gs_guc!
-- 重启集群生效
[omm@node1 ~]$ gs_om -t restart
......
-- 输出如下信息则为重启成功
Successfully started cluster.23. CN隔离恢复
[omm@node1 ~]$ gs_guc set -Z cmserver -N all -I all -c"coordinator_heartbeat_timeout=25"
......
-- 输出如下信息则为修改成功
ALL: Success to perform gs_guc!
-- 重启集群生效
[omm@node1 ~]$ gs_om -t restart
......
-- 输出如下信息则为重启成功
Successfully started cluster.
-- 如果CN实例已经故障(deleted状态)则进行修复操作,需要在一个正常主机上执行。
[omm@node1 ~]$ gs_replace -t config -h node3
-- 执行如下命令,对需要修复实例的主机进行启动操作。
[omm@node1 ~]$ gs_replace -t start -h node3
-- node3是故障CN所在主机的主机名称「喜欢这篇文章,您的关注和赞赏是给作者最好的鼓励」
关注作者
【版权声明】本文为墨天轮用户原创内容,转载时必须标注文章的来源(墨天轮),文章链接,文章作者等基本信息,否则作者和墨天轮有权追究责任。如果您发现墨天轮中有涉嫌抄袭或者侵权的内容,欢迎发送邮件至:contact@modb.pro进行举报,并提供相关证据,一经查实,墨天轮将立刻删除相关内容。




