* iptables 확인 (both)
vi /etc/sysconfig/iptables
-A INPUT -m state --state NEW -m udp -p udp --dport 5404 -j ACCEPT
-A INPUT -m state --state NEW -m udp -p udp --dport 5405 -j ACCEPT
mkdir /var/log/cluster (both)
service iptables restart (both)
/etc/init.d/corosync start (node1)
* check if the service is ok (node 1)
grep -e "Corosync Cluster Engine" -e "configuration file" /var/log/messages
* check if corosync started on the right interface (node 1)
grep TOTEM /var/log/messages
* check if pacemaker is up (node 1)
grep pcmk_startup /var/log/messages
* check if the corosync process is up (node 1)
ps aux | grep corosync
* if everything is ok on node1, then we can bring corosync up on node2:
/etc/init.d/corosync start
* check the status of the cluster. Running on any node, the following command:
crm_mon -1
* 1 nodes configured 2 expected votes 에러 발생
-> /etc/corosync/corosync.conf 를 다른 것으로 수정
# Please read the corosync.conf.5 manual page
compatibility: whitetank
totem {
version: 2
secauth: off
threads: 0
interface {
ringnumber: 0
bindnetaddr: 192.168.21.0
bindnetaddr: 192.168.21.0
mcastaddr: 226.94.1.1
mcastport: 4000
ttl: 1
}
}
logging {
fileline: off
to_stderr: no
to_logfile: yes
logfile: /var/log/cluster/corosync.log
to_syslog: yes
debug: off
timestamp: on
logger_subsys {
subsys: AMF
debug: off
}
}
amf {
mode: disabled
}
aisexec {
user: root
group: root
}
service {
# Load the Pacemaker Cluster Resource Manager
name: pacemaker
ver: 0
}
* Set Corosync to automatic initialization (both nodes)
chkconfig --level 35 corosync on
* Pacemaker 설정
* crmsh 설치 (both)
-> 아래 에러 발생
error: Failed dependencies:
pssh is needed by crmsh-2.1-1.6.x86_64
python-dateutil is needed by crmsh-2.1-1.6.x86_64
python-lxml is needed by crmsh-2.1-1.6.x86_64
redhat-rpm-config is needed by crmsh-2.1-1.6.x86_64
-> yum -y install pssh python-dateutil python-lxml redhat-rpm-config
하고 다시 설치 시도
* 클러스터 일반 설정(node1) (방법 1)
crm configure property stonith-enabled=false
crm configure property no-quorum-policy=ignore
crm configure rsc_defaults resource-stickiness=100
* 클러스터 리소스 설정 (node1)
crm configure primitive DBIP ocf:heartbeat:IPaddr2 params ip=192.168.21.144 cidr_netmask=24 op monitor interval=30s
crm_mon -1
로 등록 상태를 확인하면, 아래와 같이 등록된 리소스를 확인할 수 있다.
[root@cos1 yum.repos.d]# crm_mon -1
Last updated: Tue Apr 21 03:30:03 2015
Last change: Tue Apr 21 03:29:57 2015
Stack: classic openais (with plugin)
Current DC: cos2.local - partition with quorum
Version: 1.1.11-97629de
2 Nodes configured, 2 expected votes
1 Resources configured
Online: [ cos1.local cos2.local ]
DBIP (ocf::heartbeat:IPaddr2): Started cos1.local
* [DRBD on cluster] (node1)
이어서, DRBD 설정도 순서대로 진행한다.
crm configure primitive drbd_postgres ocf:linbit:drbd params drbd_resource="postgres" op monitor interval="15s"
crm configure ms ms_drbd_postgres drbd_postgres meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true"
crm configure primitive postgres_fs ocf:heartbeat:Filesystem params device="/dev/drbd0" directory="/var/lib/ppas" fstype="ext4"
* [PostgreSQL on cluster] (node1)
PostgreSQL도 리소스로 등록해준다.
crm configure primitive postgresql ocf:heartbeat:pgsql op monitor depth="0" timeout="30" interval="30"
* [Resource Grouping]
PostgreSQL과 관련하여 등록한 리소스들을 그룹으로 묶어주고, 순서를 부여한다.
crm configure group postgres postgres_fs DBIP postgresql
crm configure colocation postgres_on_drbd inf: postgres ms_drbd_postgres:Master
crm configure order postgres_after_drbd inf: ms_drbd_postgres:promote postgres:start
crm configure location master-prefer-node1 DBIP 50: cos1.local
* crm 에 resource 삭제시 아래 링크 참고
https://www.suse.com/documentation/sle_ha/book_sleha/data/sec_ha_config_crm.html
댓글 없음:
댓글 쓰기