## host 정보이다. HB01(핫빗)과 fence는 같은 스위치와 같은 네트워크 대역을 사용하는 걸 권장.
[root@RHCS-NODE1 /]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.0.31 db01
192.168.0.32 db02
192.168.11.31 db01-HB01
192.168.11.32 db02-HB02
192.168.11.28 fence01

## VMWARE ESXi Fence - ESXi Fence Device로 부터 VM Guest의 UUID 정보를 알아낸다.
[root@RHCS-NODE1 /]# fence_vmware_soap -z -l root -p 'password' -a 192.168.11.28 -o list
CentOS65-mini,564d3676-690d-738d-9b96-c045770b7ec1
CentOS65-mini2,422b0f53-77d5-ef6a-1aec-8b0f3f9ec905

아래는 Node2를 펜싱 시키기
[root@RHCS-NODE1 /]# fence_vmware_soap  -a 192.168.0.28 -l root -p 'password' -v -z -U 422b0f53-77d5-ef6a-1aec-8b0f3f9ec905 -o reboot

## Cluster conf 파일이다.
[root@RHCS-NODE1 /]# cat /etc/cluster/cluster.conf
<?xml version="1.0"?>
<cluster config_version="22" name="MYCLUSTER">
    <clusternodes>
        <clusternode name="db01" nodeid="1">
            <fence>
                <method name="Method">
                    <device name="vmware_fence" port="CentOS65-mini" ssl="on" uuid="564d3676-690d-738d-9b96-c045770b7ec1"/>
                </method>
            </fence>
        </clusternode>
        <clusternode name="db02" nodeid="2">
            <fence>
                <method name="Method">
                    <device name="vmware_fence" port="CentOS65-mini2" ssl="on" uuid="422b0f53-77d5-ef6a-1aec-8b0f3f9ec905"/>
                </method>
            </fence>
        </clusternode>
    </clusternodes>
    <cman expected_votes="1" two_node="1"/>
    <fencedevices>
        <fencedevice agent="fence_vmware_soap" ipaddr="fence01" login="root" name="vmware_fence" passwd="password"/>
    </fencedevices>
    <rm>
        <failoverdomains>
            <failoverdomain name="myfailover" ordered="1">
                <failoverdomainnode name="db01" priority="1"/>
                <failoverdomainnode name="db02" priority="1"/>
            </failoverdomain>
        </failoverdomains>
        <resources>
            <ip address="192.168.0.30/24" sleeptime="10"/>
            <script file="/etc/init.d/mysqld" name="mysql"/>
            <fs device="/dev/vg01/lv01" force_unmount="1" fsid="58557" fstype="ext4" mountpoint="/opt/mysql/data" name="dbdata"/>
        </resources>
        <service domain="myfailover" name="svc01" recovery="relocate">
            <ip ref="192.168.0.30/24"/>
            <fs ref="dbdata"/>
            <script ref="mysql"/>
        </service>
    </rm>
</cluster>

## nas4free 에서 iSCSI 타겟 포털을 다른 NIC 쪽으로 하나 더 할당.
기존 : 192.168.11.22:3260
추가 : 192.168.0.22:3260

 

## 아래 iSCSI 포털은 ESXi의 공용 스토리지용 타겟이다. 192.168.11.22 ip를 이용하기 때문에 DB 볼륨을 위해 192.168.11.22:3261로 따로 iSCSI 설정을 함.
[root@RHCS-NODE1 /]# iscsiadm --mode discovery --type sendtargets --portal 192.168.11.22
192.168.11.22:3260,1 iqn.2007-09.jp.ne.peach.istgt:target0

** 만약 iscsi 클라이언트의 initiator (id)를 재 수정해서 새로운 반영을 하는 경우 iscsid 를 재시작 해줘야 한다.

(initiatorname 이름 변경 후 --> # vi /etc/iscsi/initiatorname.iscsi)

# service iscsid restart (systemctl restart iscsid)

 

## 클러스터 서버에서 로그인 두번 하여 두 패스 만들기
[root@RHCS-NODE2 /]# iscsiadm --mode discovery --type sendtargets --portal 192.168.0.22
192.168.0.22:3260,2 iqn.2007-09.jp.ne.peach.istgt:target1
192.168.11.22:3261,2 iqn.2007-09.jp.ne.peach.istgt:target1

[root@RHCS-NODE2 /]# iscsiadm --mode discovery --type sendtargets --portal 192.168.11.22:3261
192.168.0.22:3260,2 iqn.2007-09.jp.ne.peach.istgt:target1
192.168.11.22:3261,2 iqn.2007-09.jp.ne.peach.istgt:target1

[root@RHCS-NODE2 ~]# iscsiadm --mode node --targetname iqn.2007-09.jp.ne.peach.istgt:target1 --portal 192.168.0.22 --login
[root@RHCS-NODE2 ~]# iscsiadm --mode node --targetname iqn.2007-09.jp.ne.peach.istgt:target1 --portal 192.168.11.22:3261 --login

## iscsi 로그 아웃 하고, 지우기
[root@RHCS-NODE2 ~]# iscsiadm --mode node --targetname iqn.2007-09.jp.ne.peach.istgt:target1 --portal 192.168.0.22 -u
[root@RHCS-NODE2 ~]# iscsiadm --mode node --targetname iqn.2007-09.jp.ne.peach.istgt:target1 --portal 192.168.11.22:3261 -u
[root@RHCS-NODE2 ~]# iscsiadm --mode node --targetname iqn.2007-09.jp.ne.peach.istgt:target1 --portal 192.168.0.22 -o delete
[root@RHCS-NODE2 ~]# iscsiadm --mode node --targetname iqn.2007-09.jp.ne.peach.istgt:target1 --portal 192.168.11.22:3261 -o delete

## 연결 세션 보기
[root@RHCS-NODE2 ~]# iscsiadm -m session

## multipath.conf 예시 파일 복사하여 아래 내용 수정
[root@RHCS-NODE2 ~]# cp /usr/share/doc/device-mapper-multipath-0.4.9/multipath.conf /etc/multipath.conf

blacklist {
        #devnode "^sdb"
        wwid "3300000004d812570" --> 가리고 싶은 디스크
}

## Use user friendly names, instead of using WWIDs as names.
defaults {
    user_friendly_names yes
}

아래 주석처리된 것의 차이점은 잘 모르나 path 하나를 죽이면 (ifconfig eth1 down) 볼륨 엑세스에 딜레이가 발생됨. 따라서 아래 devices 부분으로 적용
#multipaths {
#        multipath {
#                   wwid    3300000004d812570
#                   alias   nas-iscsi
#                   path_grouping_policy    group_by_serial
#                   path_checker            tur
#                   path_selector         "round-robin 0"
#                   no_path_retry          queue
#                  rr_min_io               100
#        }
#}

devices {
        device {
                vendor                  "FreeBSD"
                product                 "iSCSI"
                path_grouping_policy    group_by_serial
                path_checker            tur
                path_selector           "round-robin 0"
                hardware_handler        "0"
                failback                immediate
                rr_weight               uniform
                no_path_retry           queue
                rr_min_io               100
        }
}

 

## 만약 패스가 안 보일경우
[root@RHCS-NODE2 /]# multipath -F

## 데몬 재구동
[root@RHCS-NODE2 /]# /etc/init.d/multipathd restart

## 멀티패스 보기
[root@RHCS-NODE2 ~]# multipath -ll
mpathb (33000000090c48fc8) dm-3 FreeBSD,iSCSI DISK
size=100G features='1 queue_if_no_path' hwhandler='0' wp=rw
`-+- policy='round-robin 0' prio=1 status=active
  |- 5:0:0:0 sdd 8:48 active ready running
  `- 6:0:0:0 sde 8:64 active ready running

[root@RHCS-NODE2 ~]# dmsetup info /dev/mapper/mpathb
Name:              mpathb
State:             ACTIVE
Read Ahead:        256
Tables present:    LIVE
Open count:        1
Event number:      0
Major, minor:      253, 3
Number of targets: 1
UUID: mpath-33000000090c48fc8

[root@RHCS-NODE2 /]# pvcreate /dev/mapper/mpathb
Physical volume "/dev/mapper/mpathb" successfully created

[root@RHCS-NODE2 /]# vgcreate vg01 /dev/mapper/mpathb
  Volume group "vg01" successfully created

[root@RHCS-NODE2 /]# lvcreate -l15359 -nlv01 vg01
  Logical volume "lv01" created

 

iscsiadm --mode node --targetname iqn.2007-09.jp.ne.peach.istgt:target1 --portal 192.168.0.22 --login
iscsiadm --mode node --targetname iqn.2007-09.jp.ne.peach.istgt:target1 --portal 192.168.11.22:3261 --login

iscsiadm --mode node --targetname iqn.2007-09.jp.ne.peach.istgt:target1 --portal 192.168.0.22 -u
iscsiadm --mode node --targetname iqn.2007-09.jp.ne.peach.istgt:target1 --portal 192.168.11.22:3261 -u
iscsiadm --mode node --targetname iqn.2007-09.jp.ne.peach.istgt:target1 --portal 192.168.0.22 -o delete
iscsiadm --mode node --targetname iqn.2007-09.jp.ne.peach.istgt:target1 --portal 192.168.11.22:3261 -o delete

RHCS multipath

답글 남기기

이메일 주소는 공개되지 않습니다. 필수 필드는 *로 표시됩니다