Menginstall iscsi initiator

sudo yum install -y iscsi-initiator-utils
[root@labs ~]# yum install -y iscsi-initiator-utils
Loaded plugins: fastestmirror, langpacks
Loading mirror speeds from cached hostfile
epel/x86_64/metalink                                                                                          | 5.1 kB  0
 * epel: d2lzkl7pfhq30w.cloudfront.net
base                                                                                                          | 3.6 kB  0
extras                                                                                                        | 2.9 kB  0
updates                                                                                                       | 2.9 kB  0
zimbra                                                                                                        | 3.0 kB  0
zimbra-8815-oss                                                                                               | 3.0 kB  0
Package iscsi-initiator-utils-6.2.0.874-22.el7_9.x86_64 already installed and latest version
Nothing to do
[root@labs ~]#

Melakukan discovery iscsi target

[root@labs ~]# iscsiadm -m discovery -t st -p 192.168.10.101
192.168.10.101:3260,1 iqn.2000-01.com.synology:NAS-mynas.default-target.0d52914317f
[root@labs ~]#
[root@labs ~]# iscsiadm -m node -T iqn.2000-01.com.synology:NAS-DARTAIR.default-target.0d52914317f –l
# BEGIN RECORD 6.2.0.874-22
node.name = iqn.2000-01.com.synology:NAS-mynas.default-target.0d52914317f
node.tpgt = 1
node.startup = automatic
node.leading_login = No
iface.hwaddress = <empty>
iface.ipaddress = <empty>
iface.iscsi_ifacename = default
iface.net_ifacename = <empty>
iface.gateway = <empty>
iface.subnet_mask = <empty>
iface.transport_name = tcp
iface.initiatorname = <empty>
iface.state = <empty>
iface.vlan_id = 0
iface.vlan_priority = 0
iface.vlan_state = <empty>
iface.iface_num = 0
iface.mtu = 0
iface.port = 0
iface.bootproto = <empty>
iface.dhcp_alt_client_id_state = <empty>
iface.dhcp_alt_client_id = <empty>
iface.dhcp_dns = <empty>
iface.dhcp_learn_iqn = <empty>
iface.dhcp_req_vendor_id_state = <empty>
iface.dhcp_vendor_id_state = <empty>
iface.dhcp_vendor_id = <empty>
iface.dhcp_slp_da = <empty>
iface.fragmentation = <empty>
iface.gratuitous_arp = <empty>
iface.incoming_forwarding = <empty>
iface.tos_state = <empty>
iface.tos = 0
iface.ttl = 0
iface.delayed_ack = <empty>
iface.tcp_nagle = <empty>
iface.tcp_wsf_state = <empty>
iface.tcp_wsf = 0
iface.tcp_timer_scale = 0
iface.tcp_timestamp = <empty>
iface.redirect = <empty>
iface.def_task_mgmt_timeout = 0
iface.header_digest = <empty>
iface.data_digest = <empty>
iface.immediate_data = <empty>
iface.initial_r2t = <empty>
iface.data_seq_inorder = <empty>
iface.data_pdu_inorder = <empty>
iface.erl = 0
iface.max_receive_data_len = 0
iface.first_burst_len = 0
iface.max_outstanding_r2t = 0
iface.max_burst_len = 0
iface.chap_auth = <empty>
iface.bidi_chap = <empty>
iface.strict_login_compliance = <empty>
iface.discovery_auth = <empty>
iface.discovery_logout = <empty>
node.discovery_address = 192.168.10.101
node.discovery_port = 3260
node.discovery_type = send_targets
node.session.initial_cmdsn = 0
node.session.initial_login_retry_max = 8
node.session.xmit_thread_priority = -20
node.session.cmds_max = 128
node.session.queue_depth = 32
node.session.nr_sessions = 1
node.session.auth.authmethod = None
node.session.auth.username = <empty>
node.session.auth.password = <empty>
node.session.auth.username_in = <empty>
node.session.auth.password_in = <empty>
node.session.auth.chap_algs = MD5
node.session.timeo.replacement_timeout = 120
node.session.err_timeo.abort_timeout = 15
node.session.err_timeo.lu_reset_timeout = 30
node.session.err_timeo.tgt_reset_timeout = 30
node.session.err_timeo.host_reset_timeout = 60
node.session.iscsi.FastAbort = Yes
node.session.iscsi.InitialR2T = No
node.session.iscsi.ImmediateData = Yes
node.session.iscsi.FirstBurstLength = 262144
node.session.iscsi.MaxBurstLength = 16776192
node.session.iscsi.DefaultTime2Retain = 0
node.session.iscsi.DefaultTime2Wait = 2
node.session.iscsi.MaxConnections = 1
node.session.iscsi.MaxOutstandingR2T = 1
node.session.iscsi.ERL = 0
node.session.scan = auto
node.conn[0].address = 192.168.10.101
node.conn[0].port = 3260
node.conn[0].startup = manual
node.conn[0].tcp.window_size = 524288
node.conn[0].tcp.type_of_service = 0
node.conn[0].timeo.logout_timeout = 15
node.conn[0].timeo.login_timeout = 15
node.conn[0].timeo.auth_timeout = 45
node.conn[0].timeo.noop_out_interval = 5
node.conn[0].timeo.noop_out_timeout = 5
node.conn[0].iscsi.MaxXmitDataSegmentLength = 0
node.conn[0].iscsi.MaxRecvDataSegmentLength = 262144
node.conn[0].iscsi.HeaderDigest = None
node.conn[0].iscsi.IFMarker = No
node.conn[0].iscsi.OFMarker = No
# END RECORD
[root@labs ~]#
[root@labs ~]# iscsiadm -m node --login
Logging in to [iface: default, target: iqn.2000-01.com.synology:NAS-mynas.default-target.0d52914317f, portal: 192.168.1(multiple)
Login to [iface: default, target: iqn.2000-01.com.synology:NAS-mynas.default-target.0d52914317f, portal: 192.168.10.101ssful.
[root@labs ~]#

[root@labs ~]# lsblk
NAME                     MAJ:MIN RM   SIZE RO TYPE  MOUNTPOINT
sda                        8:0    0   7.3T  0 disk
└─sda1                     8:1    0   7.3T  0 part
  └─md127                  9:127  0   7.3T  0 raid1 /opt
sdb                        8:16   0 931.5G  0 disk
├─sdb1                     8:17   0     1G  0 part  /boot
└─sdb2                     8:18   0 930.5G  0 part
  ├─cl_dartjkt--wdd-root 253:0    0    50G  0 lvm   /
  ├─cl_dartjkt--wdd-swap 253:1    0   7.8G  0 lvm   [SWAP]
  └─cl_dartjkt--wdd-home 253:2    0 872.7G  0 lvm   /home
sdc                        8:32   0   7.3T  0 disk
└─sdc1                     8:33   0   7.3T  0 part
  └─md127                  9:127  0   7.3T  0 raid1 /opt
sdd                        8:48   0   3.9T  0 disk
[root@labs ~]#

[root@labs ~]# gdisk /dev/sdd
GPT fdisk (gdisk) version 0.8.10

Partition table scan:
  MBR: not present
  BSD: not present
  APM: not present
  GPT: not present

Creating new GPT entries.

Command (? for help): p
Disk /dev/sdd: 8388608000 sectors, 3.9 TiB
Logical sector size: 512 bytes
Disk identifier (GUID): DB58673F-AAAD-4310-8A0C-CF6E40BB6814
Partition table holds up to 128 entries
First usable sector is 34, last usable sector is 8388607966
Partitions will be aligned on 2048-sector boundaries
Total free space is 8388607933 sectors (3.9 TiB)

Number  Start (sector)    End (sector)  Size       Code  Name

Command (? for help): n
Partition number (1-128, default 1):
First sector (34-8388607966, default = 2048) or {+-}size{KMGTP}:
Last sector (2048-8388607966, default = 8388607966) or {+-}size{KMGTP}:
Current type is 'Linux filesystem'
Hex code or GUID (L to show codes, Enter = 8300):
Changed type of partition to 'Linux filesystem'

Command (? for help): w

Final checks complete. About to write GPT data. THIS WILL OVERWRITE EXISTING
PARTITIONS!!

Do you want to proceed? (Y/N): Y
OK; writing new GUID partition table (GPT) to /dev/sdd.
The operation has completed successfully.
[root@labs ~]#

[root@labs ~]# mkdir /data-synology
[root@labs ~]#
[root@labs ~]# vgs
  VG             #PV #LV #SN Attr   VSize    VFree
  cl_dartjkt-wdd   1   3   0 wz--n- <930.51g 4.00m
[root@labs ~]#
[root@labs ~]# vgcreate synology /dev/sdd1
  Physical volume "/dev/sdd1" successfully created.
  Volume group "synology" successfully created
[root@labs ~]#
[root@labs ~]# lvcreate -l 100%FREE -n zimbralv synology
  Logical volume "zimbralv" created.
[root@labs ~]#
[root@labs ~]# mkfs.xfs /dev/mapper/synology-zimbralv
Discarding blocks...Done.
meta-data=/dev/mapper/synology-zimbralv isize=512    agcount=4, agsize=262143744 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=1048574976, imaxpct=5
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=511999, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
[root@labs ~]#
[root@labs ~]# mount /dev/mapper/synology-zimbralv /data-synology/
[root@labs ~]# ls -l
total 12
-rw-------. 1 root root 1527 Jul 30 13:05 anaconda-ks.cfg
-rw-r--r--. 1 root mail 1412 Sep  2 14:57 dead.letter
-rw-r--r--. 1 root root 1561 Jul 30 13:07 initial-setup-ks.cfg
[root@labs ~]# 

[root@labs ~]# df -hT
Filesystem                       Type      Size  Used Avail Use% Mounted on
devtmpfs                         devtmpfs  7.8G     0  7.8G   0% /dev
tmpfs                            tmpfs     7.8G     0  7.8G   0% /dev/shm
tmpfs                            tmpfs     7.8G  138M  7.7G   2% /run
tmpfs                            tmpfs     7.8G     0  7.8G   0% /sys/fs/cgroup
/dev/mapper/cl_dartjkt--wdd-root xfs        50G  6.7G   44G  14% /
tmpfs                            tmpfs     7.8G  144K  7.8G   1% /tmp
/dev/sdb1                        xfs      1014M  212M  803M  21% /boot
/dev/mapper/cl_dartjkt--wdd-home xfs       873G  5.5G  867G   1% /home
tmpfs                            tmpfs     1.6G   24K  1.6G   1% /run/user/1000
tmpfs                            tmpfs     1.6G     0  1.6G   0% /run/user/0
/dev/md127                       xfs       7.3T  5.2T  2.1T  72% /opt
/dev/mapper/synology-zimbralv    xfs       4.0T  109G  3.8T   3% /data-synology
[root@labs ~]#

Leave a Reply

Your email address will not be published. Required fields are marked *