第一,清理目录
source /usr/local/greenplum-db/greenplum_path.sh
================== 架构目标 ============================
mdw sdw1 sdw2 sdw3
master seg0p seg2p seg4p
seg1p seg3p seg5p
seg5m seg0m seg2m
seg4m seg1m seg3m
smdw
一、准备工作 -- 所有节点都要操作
1.1 关闭防火墙
#systemctl status firewalld (查看防火墙状态)
#systemctl stop firewalld (停止防火墙)
#systemctl disable firewalld (设置防火墙不可用)
systemctl stop firewalld && systemctl disable firewalld
systemctl status firewalld
假设有 iptables
service iptables stop 停止防火墙服务,重启电脑后仍然会开启
chkconfig iptables off 关闭防火墙服务开机启动,重启后生效
1.2 关闭 selinux -
vim /etc/selinux/config
SELINUX=disabled
setenforce 0
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
sestatus
1.3 设置主机名
hostnamectl set-hostname sdw1
hostnamectl status 状态
/etc/sysconfig/network
1.4 修改主机名
vi /etc/hosts
10.102.254.24 sdw1
10.102.254.25 sdw2
10.102.254.26 sdw3
10.102.254.27 mdw
1.5 系统内核参数优化
vi /etc/sysctl.conf
kernel.shmmax = 500000000
kernel.shmmni = 4096
kernel.shmall = 4000000000
kernel.sem = 250 512000 100 2048
kernel.sysrq = 1
kernel.core_uses_pid = 1
kernel.msgmnb = 65536
kernel.msgmax = 65536
kernel.msgmni = 2048
net.ipv4.tcp_syncookies = 1
net.ipv4.ip_forward = 0
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_max_syn_backlog = 4096
net.ipv4.conf.all.arp_filter = 1
net.ipv4.ip_local_port_range = 1025 65535
net.core.netdev_max_backlog = 10000
net.core.rmem_max = 2097152
net.core.wmem_max = 2097152
vm.overcommit_memory = 2
sysctl -p
cat > /etc/sysctl.conf << EOF
# sysctl settings are defined through files in
# /usr/lib/sysctl.d/, /run/sysctl.d/, and /etc/sysctl.d/.
#
# Vendors settings live in /usr/lib/sysctl.d/.
# To override a whole file, create a new file with the same in
# /etc/sysctl.d/ and put new settings there. To override
# only specific settings, add a file with a lexically later
# name in /etc/sysctl.d/ and put new settings there.
#
# For more information, see sysctl.conf(5) and sysctl.d(5).
kernel.shmmax = 500000000
kernel.shmmni = 4096
kernel.shmall = 4000000000
kernel.sem = 500 1024000 200 4096
kernel.sysrq = 1
kernel.core_uses_pid = 1
kernel.msgmnb = 65536
kernel.msgmax = 65536
kernel.msgmni = 2048
net.ipv4.tcp_syncookies = 1
net.ipv4.ip_forward = 0
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_max_syn_backlog = 4096
net.ipv4.conf.all.arp_filter = 1
net.ipv4.ip_local_port_range = 1025 65535
net.core.netdev_max_backlog = 10000
net.core.rmem_max = 2097152
net.core.wmem_max = 2097152
vm.overcommit_memory = 2
vm.swappiness = 1
kernel.pid_max = 655350
EOF
sysctl -p
1.6 修改 Linux 最大限制
cat /etc/security/limits.conf
vi /etc/security/limits.conf
* soft nofile 65536
* hard nofile 65536
* soft nproc 131072
* hard nproc 131072
cat > /etc/security/limits.conf << EOF
* soft nofile 65536
* hard nofile 65536
* soft nproc 131072
* hard nproc 131072
EOF
如何是 rhel 6.x 请注意 /etc/security/limits.d/90-nproc.conf ,详细情况请见文档
1.7 设备与 IO- 文件系统
设置 XFS 文件系统并挂载
EXT4 是第四代扩展文件系统(英语: Fourth EXtended filesystem ,缩写为 ext4 )是 Linux 系统下的日志文件系统,是 ext3 文件系统的后继版本。
Ext4 的文件系统容量达到 1EB ,而文件容量则达到 16TB ,这是一个非常大的数字了。对一般的台式机和服务器而言,这可能并不重要,但对于大型磁盘阵列的用户而言,这就非常重要了。
XFS 是一个 64 位文件系统,最大支持 8EB 减 1 字节的单个文件系统,实际部署时取决于宿主操作系统的最大块限制。对于一个 32 位 Linux 系统,文件和文件系统的大小会被限制在 16TB 。
二者各有特点,而性能表现基本上是差不多的。例如,谷歌公司就考虑将 EXT2 系统升级,最终确定为 EXT4 系统。谷歌公司表示,他们还考虑过 XFS 和 JFS 。结果显示, EXT4 和 XFS 的表现类似,不过从 EXT2 升级到 EXT4 比升级到 XFS 容易。
例子:
cat >> /etc/fstab << EOF
/dev/sdb1 /greenplum xfs rw,nodev,noatime,inode64,allocsize=16m 0 0
EOF
rw,nodev,noatime,nobarrier,inode64
cat /etc/fstab
1.8 磁盘访问策略
Linux 磁盘 I/O 调度器对磁盘的访问支持不同的策略,默认的为 CFQ , GP 建议设置为 deadline
查看磁盘的 I/O 调度策略,看到默认的为 [cfq]
The deadline scheduler option is recommended. To specify a scheduler until the next system reboot,
run the following:
# echo schedulername > /sys/block/devname/queue/scheduler
echo deadline > /sys/block/sda/queue/scheduler
linux 7
# grubby --update-kernel=ALL --args="elevator=deadline"
grubby --info=ALL
1.8 调整磁盘预读扇区数
fdisk -l
检查
/sbin/blockdev --getra /dev/sda
设置
/sbin/blockdev --setra 16384 /dev/sda
在参数文件 /etc/rc.d/rc.local 中增加
DELL : blockdev --setra 16384 /dev/sd* ( 红色部分为硬盘设备标识 ) HP:blockdev --setra 16384 /dev/cciss/c?d?*
1.9 禁用 THP
On systems that use grub2 such as RHEL 7.x or CentOS 7.x, use the system utility grubby. This
command adds the parameter when run as root.
# grubby --update-kernel=ALL --args="transparent_hugepage=never"
After adding the parameter, reboot the system.
This cat command checks the state of THP. The output indicates that THP is disabled.
$ cat /sys/kernel/mm/*transparent_hugepage/enabled
always [never]
服务方式注册
# 创建 init.d 脚本
echo '#!/bin/sh
case $1 in
start)
if [ -d /sys/kernel/mm/transparent_hugepage ]; then
thp_path=/sys/kernel/mm/transparent_hugepage
elif [ -d /sys/kernel/mm/redhat_transparent_hugepage ]; then
thp_path=/sys/kernel/mm/redhat_transparent_hugepage
else
exit 0
fi
echo never > ${thp_path}/enabled
echo never > ${thp_path}/defrag
unset thp_path
;;
esac' > /etc/init.d/disable-transparent-hugepages
# 注册 systemd 文件
echo '[Unit]
Description=Disable Transparent Hugepages
After=multi-user.target
[Service]
ExecStart=/etc/init.d/disable-transparent-hugepages start
Type=simple
[Install]
WantedBy=multi-user.target' > /etc/systemd/system/disable-thp.service
# 磁盘预读扇区数
/sbin/blockdev --getra /dev/sdb1 # 查看大小
/sbin/blockdev --setra 65535 /dev/sdb1 # 设置大小
# 创建 init.d 脚本
echo '#!/bin/sh
device_name=/dev/sdb1
case $1 in
start)
if `mount | grep "^${device_name}" > /dev/null`;then
/sbin/blockdev --setra 65535 ${device_name}
else
exit 0
fi
unset device_name
;;
esac' > /etc/init.d/blockdev-setra-sdb
# 注册 systemd 文件
echo '[Unit]
Description=Blocdev --setra N
After=multi-user.target
[Service]
ExecStart=/etc/init.d/blockdev-setra-sdb start
Type=simple
[Install]
WantedBy=multi-user.target' > /etc/systemd/system/blockdev-setra-sdb.service
# 授权并设置开机启动
chmod 755 /etc/init.d/disable-transparent-hugepages
chmod 755 /etc/init.d/blockdev-setra-sdb
chmod 755 /etc/systemd/system/disable-thp.service
chmod 755 /etc/systemd/system/blockdev-setra-sdb.service
systemctl enable disable-thp blockdev-setra-sdb
1.10 Disable IPC object removal for RHEL 7 or CentOS 7
Set this parameter in /etc/systemd/logind.conf on the Greenplum
Database host systems.
RemoveIPC=no
The setting takes effect after restarting the systemd-login service or rebooting the system. To
restart the service, run this command as the root user.
service systemd-logind restart
cat /etc/systemd/logind.conf
1.11 时间同步
/etc/chrony.conf
systemctl status chronyd.service -- 查看状态
systemctl start chronyd.service -- 启动
systemctl enable chronyd.service -- 使其开机自启
systemctl status chronyd.service
server 10.1.3.1 prefer
查看时间同步源
chronyc sources -v
chronyc sourcestats -v
1.12 控制 ssh 连接数
/etc/ssh/sshd_config
MaxStartups 10:30:200
systemctl restart sshd.service
1.13 系统依赖包
yum -y install epel-release
yum -y install wget cmake3 git gcc gcc-c++ bison flex libedit-devel zlib zlib-devel perl-devel perl-ExtUtils-Embed python-devel libevent libevent-devel libxml2 libxml2-devel libcurl libcurl-devel bzip2 bzip2-devel net-tools libffi-devel openssl-devel
2 安装
2.1 创建用户和组
# groupadd gpadmin
# useradd gpadmin -g gpadmin
# passwd gpadmin
New password: <changeme>
Retype new password: <changeme>
echo gpadmin | passwd gpadmin --stdin
2.2 root 用户解压缩和安装
./greenplum-db-5.10.2-rhel6-x86_64.bin
I HAVE READ AND AGREE TO THE TERMS OF THE ABOVE PIVOTAL SOFTWARE
LICENSE AGREEMENT.
********************************************************************************
Do you accept the Pivotal Database license agreement? [yes|no]
********************************************************************************
yes
********************************************************************************
Provide the installation path for Greenplum Database or press ENTER to
accept the default installation path: /usr/local/greenplum-db-5.10.2
********************************************************************************
********************************************************************************
Install Greenplum Database into /usr/local/greenplum-db-5.10.2? [yes|no]
********************************************************************************
yes
********************************************************************************
/usr/local/greenplum-db-5.10.2 does not exist.
Create /usr/local/greenplum-db-5.10.2 ? [yes|no]
(Selecting no will exit the installer)
********************************************************************************
安装完成后授权
# chown -R gpadmin /usr/local/greenplum* (在创建 gpadmin 后执行)
# chgrp -R gpadmin /usr/local/greenplum* (在创建 gpadmin 后执行)
2.3 编辑环境变量
cat >> .bashrc << EOF
export MASTER_DATA_DIRECTORY=/greenplum/gpdata/master/gpseg-1
source /usr/local/greenplum-db/greenplum_path.sh
EOF
source .bashrc
cat >> /home/gpadmin/.bash_profile <<EOF
export MASTER_DATA_DIRECTORY=/greenplum/gpdata/master/gpseg-1
source /usr/local/greenplum-db/greenplum_path.sh
export PGPORT=5432
export PGDATABASE=archdata
EOF
source /home/gpadmin/.bash_profile
2.4 进行文件配置
切换 root
source /usr/local/greenplum-db/greenplum_path.sh
------ 只在 mdw , smdw 执行
mkdir /home/gpadmin/gpconfig
chown -R gpadmin:gpadmin /home/gpadmin/gpconfig
------ 只在 mdw , smdw 执行
cat >> /home/gpadmin/gpconfig/all_host <<EOF
mdw
sdw1
sdw2
sdw3
EOF
------ 只在 mdw , smdw 执行
cat >> /home/gpadmin/gpconfig/all_segment <<EOF
sdw1
sdw2
sdw3
EOF
chown -R gpadmin:gpadmin /home/gpadmin/gpconfig/all_host
chown -R gpadmin:gpadmin /home/gpadmin/gpconfig/all_segment
2.5 设置主机免密码登陆 -
source /usr/local/greenplum-db/greenplum_path.sh
/usr/local/greenplum-db/bin/gpssh-exkeys -f /home/gpadmin/gpconfig/all_host
2.6 确认检查主机连接状态
gpssh -f /home/gpadmin/gpconfig/all_host -e "ls -l"
2.7 批量创建其他节点的用户
gpssh -f /home/gpadmin/gpconfig/all_segment
groupadd gpadmin
useradd gpadmin -g gpadmin
passwd gpadmin
echo gpadmin | passwd gpadmin --stdin
gpadmin 用户 - 互信
source /usr/local/greenplum-db/greenplum_path.sh
/usr/local/greenplum-db/bin/gpssh-exkeys -f /home/gpadmin/gpconfig/all_host
gpssh -f /home/gpadmin/gpconfig/all_host -e "ls -l"
检查时间同步
gpssh -f /home/gpadmin/gpconfig/all_host -e "date"
2.8 分发所有 seg 节点软件
root 执行
source /usr/local/greenplum-db/greenplum_path.sh
gpseginstall -f /home/gpadmin/gpconfig/all_host -u gpadmin -p gpadmin
2.9 检查安装情况
o Log in as the gpadmin user and source
• source /usr/local/greenplum-db/greenplum_path.sh
o Use the gpssh utility to see if you can login to all hosts without a password prompt
•
2.10 创建相关目录 root 用户
mkdir -p /greenplum/gpdata/master
chown gpadmin:gpadmin /greenplum/gpdata/master
gpssh -f /home/gpadmin/gpconfig/all_segment -e 'mkdir -p /greenplum/gpdata/primary1'
gpssh -f /home/gpadmin/gpconfig/all_segment -e 'mkdir -p /greenplum/gpdata/primary2'
gpssh -f /home/gpadmin/gpconfig/all_segment -e 'chown -R gpadmin:gpadmin /greenplum/gpdata'
gpssh -f /home/gpadmin/gpconfig/all_segment -e 'mkdir -p /greenplum/gpdata/mirror1'
gpssh -f /home/gpadmin/gpconfig/all_segment -e 'mkdir -p /greenplum/gpdata/mirror2'
gpssh -f /home/gpadmin/gpconfig/all_segment -e 'chown -R gpadmin:gpadmin /greenplum/gpdata'
or 批量创建
gpssh -f /home/gpadmin/gpconfig/all_segment -e 'mkdir -p /greenplum/gpdata/primary{1..2}'
gpssh -f /home/gpadmin/gpconfig/all_segment -e 'chown -R gpadmin:gpadmin /greenplum/gpdata'
2.11 验证系统
检查系统参数和测试性能
检查命令: gpcheck -f host_file -m mdw -ssmdw
Validating Hardware Performance
o gpcheckperf can be used to identify hardware and system-level issues on the machines in your Greenplum
Database array.
o Network Performance (gpnetbench*)
• gpcheckperf -f hostfile_gpchecknet_ic1 -r N -d /tmp > subnet1.out
o Disk I/O Performance (dd test) & Memory Bandwidth (stream test)
• gpcheckperf -f hostfile_gpcheckperf -r ds -D -d /data/primary -d /data/mirror
验证 OS 配置
source /usr/local/greenplum-db/greenplum_path.sh
gpcheck -f /home/gpadmin/gpconfig/all_host -m mdw
验证硬件性能 -- 这个需要确认(网络和 IO )
gpcheckperf -f /home/gpadmin/gpconfig/all_host -r N -d /tmp > checknetwork.out
[root@mdw greenplum-db]# cat checknetwork.out
/usr/local/greenplum-db/./bin/gpcheckperf -f /home/gpadmin/gpconfig/all_host -r N -d /tmp
-------------------
-- NETPERF TEST
-------------------
====================
== RESULT
====================
Netperf bisection bandwidth test
mdw -> sdw1 = 112.340000
sdw2 -> sdw3 = 112.340000
sdw1 -> mdw = 112.330000
sdw3 -> sdw2 = 112.330000
Summary:
sum = 449.34 MB/sec
min = 112.33 MB/sec
max = 112.34 MB/sec
avg = 112.33 MB/sec
median = 112.34 MB/sec
gpcheckperf -f /home/gpadmin/gpconfig/all_host -r ds -D -d /greenplum/gpdata/primary1 -d /greenplum/gpdata/mirror1 > checkDISKIO.out
[root@mdw greenplum-db]# gpcheckperf -f /home/gpadmin/gpconfig/all_host -r ds -D -d /greenplum/gpdata/primary1 -d /greenplum/gpdata/mirror1
/usr/local/greenplum-db/./bin/gpcheckperf -f /home/gpadmin/gpconfig/all_host -r ds -D -d /greenplum/gpdata/primary1 -d /greenplum/gpdata/mirror1
--------------------
-- DISK WRITE TEST
--------------------
--------------------
-- DISK READ TEST
--------------------
--------------------
-- STREAM TEST
--------------------
====================
== RESULT
====================
disk write avg time (sec): 20.88
disk write tot bytes: 132920115200
disk write tot bandwidth (MB/s): 6074.65
disk write min bandwidth (MB/s): 1476.04 [ mdw]
disk write max bandwidth (MB/s): 1551.18 [sdw3]
-- per host bandwidth --
disk write bandwidth (MB/s): 1476.04 [ mdw]
disk write bandwidth (MB/s): 1537.63 [sdw1]
disk write bandwidth (MB/s): 1509.80 [sdw2]
disk write bandwidth (MB/s): 1551.18 [sdw3]
disk read avg time (sec): 59.80
disk read tot bytes: 132920115200
disk read tot bandwidth (MB/s): 2175.57
disk read min bandwidth (MB/s): 454.54 [sdw2]
disk read max bandwidth (MB/s): 700.04 [sdw1]
-- per host bandwidth --
disk read bandwidth (MB/s): 520.03 [ mdw]
disk read bandwidth (MB/s): 700.04 [sdw1]
disk read bandwidth (MB/s): 454.54 [sdw2]
disk read bandwidth (MB/s): 500.96 [sdw3]
stream tot bandwidth (MB/s): 49348.52
stream min bandwidth (MB/s): 12297.76 [ mdw]
stream max bandwidth (MB/s): 12388.57 [sdw2]
-- per host bandwidth --
stream bandwidth (MB/s): 12297.76 [ mdw]
stream bandwidth (MB/s): 12321.47 [sdw1]
stream bandwidth (MB/s): 12388.57 [sdw2]
stream bandwidth (MB/s): 12340.73 [sdw3]
2.12 初始化 database
cp $GPHOME/docs/cli_help/gpconfigs/gpinitsystem_config /home/gpadmin/gpconfig/gpinitsystem_config
cat >> /home/gpadmin/gpconfig/hostfile_gpinitsystem <<EOF
sdw1
sdw2
sdw3
EOF
chown -R gpadmin:gpadmin /home/gpadmin/gpconfig/gpinitsystem_config
chown -R gpadmin:gpadmin /home/gpadmin/gpconfig/hostfile_gpinitsystem
调整参数
su - gpadmin
ARRAY_NAME="EMC Greenplum DW"
PORT_BASE=40000
SEG_PREFIX=gpseg
declare -a DATA_DIRECTORY=(/greenplum/gpdata/primary1 /greenplum/gpdata/primary2)
MASTER_HOSTNAME=mdw
MASTER_DIRECTORY=/greenplum/gpdata/master
MASTER_PORT=5432
TRUSTED_SHELL=ssh
CHECK_POINT_SEGMENTS=8
ENCODING=UNICODE
MIRROR_PORT_BASE=50000
REPLICATION_PORT_BASE=41000
MIRROR_REPLICATION_PORT_BASE=51000
declare -a MIRROR_DATA_DIRECTORY=(/greenplum/gpdata/mirror1 /greenplum/gpdata/mirror2)
vim /home/gpadmin/gpconfig/gpinitsystem_config
修改如下
[gpadmin@mdw ~]$ cat /home/gpadmin/gpconfig/gpinitsystem_config
# FILE NAME: gpinitsystem_config
# Configuration file needed by the gpinitsystem
################################################
#### REQUIRED PARAMETERS
################################################
#### Name of this Greenplum system enclosed in quotes.
ARRAY_NAME="Greenplum Data Platform"
#### Naming convention for utility-generated data directories.
SEG_PREFIX=gpseg
#### Base number by which primary segment port numbers
#### are calculated.
PORT_BASE=40000
#### File system location(s) where primary segment data directories
#### will be created. The number of locations in the list dictate
#### the number of primary segments that will get created per
#### physical host (if multiple addresses for a host are listed in
#### the hostfile, the number of segments will be spread evenly across
#### the specified interface addresses).
declare -a DATA_DIRECTORY=(/greenplum/gpdata/primary1 /greenplum/gpdata/primary2)
#### OS-configured hostname or IP address of the master host.
MASTER_HOSTNAME=mdw
#### File system location where the master data directory
#### will be created.
MASTER_DIRECTORY=/greenplum/gpdata/master
#### Port number for the master instance.
MASTER_PORT=5432
#### Shell utility used to connect to remote hosts.
TRUSTED_SHELL=ssh
#### Maximum log file segments between automatic WAL checkpoints.
CHECK_POINT_SEGMENTS=8
#### Default server-side character set encoding.
ENCODING=UNICODE
################################################
#### OPTIONAL MIRROR PARAMETERS
################################################
#### Base number by which mirror segment port numbers
#### are calculated.
MIRROR_PORT_BASE=50000
#### Base number by which primary file replication port
#### numbers are calculated.
REPLICATION_PORT_BASE=41000
#### Base number by which mirror file replication port
#### numbers are calculated.
MIRROR_REPLICATION_PORT_BASE=51000
#### File system location(s) where mirror segment data directories
#### will be created. The number of mirror locations must equal the
#### number of primary locations as specified in the
#### DATA_DIRECTORY parameter.
declare -a MIRROR_DATA_DIRECTORY=(/greenplum/gpdata/mirror1 /greenplum/gpdata/mirror2)
################################################
#### OTHER OPTIONAL PARAMETERS
################################################
#### Create a database of this name after initialization.
#DATABASE_NAME=name_of_database
#### Specify the location of the host address file here instead of
#### with the the -h option of gpinitsystem.
#MACHINE_LIST_FILE=/home/gpadmin/gpconfigs/hostfile_gpinitsystem
初始化 database
gpadmin 用户
gpinitsystem -c /home/gpadmin/gpconfig/gpinitsystem_config -h /home/gpadmin/gpconfig/hostfile_gpinitsystem
如何添加 master standby 和修改 mirror 分布策略 spread mirror
gpinitsystem -c gpconfigs/gpinitsystem_config -h gpconfigs/hostfile_gpinitsystem -s
standby_master_hostname -S (with a standby master and a spread mirror configuration)
2.13 配置和检查环境变量
MASTER_DATA_DIRECTORY=/data/master/gpseg-1
GPHOME=/usr/local/greenplum-db
PGDATABASE=gpadmin
[gpadmin@mdw ~]$ cat .bash_profile
# .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# User specific environment and startup programs
PATH=$PATH:$HOME/.local/bin:$HOME/bin
export PATH
export MASTER_DATA_DIRECTORY=/greenplum/gpdata/master/gpseg-1
source /usr/local/greenplum-db/greenplum_path.sh
export PGPORT=5432
export PGDATABASE=archdata
2.14 进行集群的检查和日常关闭
查看进程命令: $gpssh -f /home/gpadmin/gpconfig/all_host -e "ps -eaf|grep green"
设置 gpadmin 远程访问密码
psql postgres gpadmin
alter user gpadmin encrypted password 'gpadmin';
\q
查询测试
psql -hmdw -p 5432 -d postgres -U gpadmin -c 'select dfhostname, dfspace,dfdevice from gp_toolkit.gp_disk_free order by dfhostname;'
[gpadmin@mdw ~]$ psql -hmdw -p 5432 -d postgres -U gpadmin -c 'select dfhostname, dfspace,dfdevice from gp_toolkit.gp_disk_free order by dfhostname;'
dfhostname | dfspace | dfdevice
------------+----------+----------------------------
sdw1 | 98708120 | /dev/mapper/VolGroup-root
sdw1 | 98708120 | /dev/mapper/VolGroup-root
sdw2 | 98705600 | /dev/mapper/VolGroup-root
sdw2 | 98705600 | /dev/mapper/VolGroup-root
sdw3 | 98705144 | /dev/mapper/VolGroup-root
sdw3 | 98705144 | /dev/mapper/VolGroup-root
(6 rows)
psql -h hmdw -p 5432 -d postgres -U gpadmin -c '\l+'
[gpadmin@mdw ~]$ psql -h mdw -p 5432 -d postgres -U gpadmin -c '\l+'
List of databases
Name | Owner | Encoding | Access privileges | Size | Tablespace | Description
-----------+---------+----------+---------------------+-------+------------+---------------------------
postgres | gpadmin | UTF8 | | 73 MB | pg_default |
template0 | gpadmin | UTF8 | =c/gpadmin | 72 MB | pg_default |
: gpadmin=CTc/gpadmin
template1 | gpadmin | UTF8 | =c/gpadmin | 73 MB | pg_default | default template database
: gpadmin=CTc/gpadmin
(3 rows)
[gpadmin@mdw ~]$
------- 启停数据库,在 mdw 执行 --------
在 master 节点 gpadmin 用户执行 gpstart -a 启动集群,不加 a 需要输入 yes 确认。
在 master 节点 gpadmin 用户执行 gpstop -a 关闭集群,不加 a 需要输入 yes 确认。
gpstate
[gpadmin@mdw ~]$ gpstate
20200419:00:52:03:001506 gpstate:mdw:gpadmin-[INFO]:-Starting gpstate with args:
20200419:00:52:03:001506 gpstate:mdw:gpadmin-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 5.10.2 build commit:b3c02f3acd880e2d676dacea36be015e4a3826d4'
20200419:00:52:03:001506 gpstate:mdw:gpadmin-[INFO]:-master Greenplum Version: 'PostgreSQL 8.3.23 (Greenplum Database 5.10.2 build commit:b3c02f3acd880e2d676dacea36be015e4a3826d4) on x86_64-pc-linux-gnu, compiled by GCC gcc (GCC) 6.2.0, 64-bit compiled on Aug 10 2018 07:30:24'
20200419:00:52:03:001506 gpstate:mdw:gpadmin-[INFO]:-Obtaining Segment details from master...
20200419:00:52:03:001506 gpstate:mdw:gpadmin-[INFO]:-Gathering data from segments...
.
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:-Greenplum instance status summary
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:-----------------------------------------------------
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Master instance = Active
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Master standby = No master standby configured
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total segment instance count from metadata = 12
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:-----------------------------------------------------
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Primary Segment Status
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:-----------------------------------------------------
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total primary segments = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total primary segment valid (at master) = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total primary segment failures (at master) = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of postmaster.pid files missing = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of postmaster.pid files found = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of postmaster.pid PIDs missing = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of postmaster.pid PIDs found = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of /tmp lock files missing = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of /tmp lock files found = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number postmaster processes missing = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number postmaster processes found = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:-----------------------------------------------------
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Mirror Segment Status
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:-----------------------------------------------------
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total mirror segments = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total mirror segment valid (at master) = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total mirror segment failures (at master) = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of postmaster.pid files missing = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of postmaster.pid files found = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of postmaster.pid PIDs missing = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of postmaster.pid PIDs found = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of /tmp lock files missing = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of /tmp lock files found = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number postmaster processes missing = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number postmaster processes found = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number mirror segments acting as primary segments = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number mirror segments acting as mirror segments = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:-----------------------------------------------------
[gpadmin@mdw ~]$
