When a portal fails to create (e.g. IP not on system), its ID is absent from portal_id_map. Previously this caused every target referencing that portal to be skipped entirely, which then cascaded to target-extent failures. Now each target's groups are filtered individually: groups with unmapped portal or initiator references are dropped with a warning, and the target is still created with the remaining valid groups. This keeps the target and all its extent associations intact while clearly indicating which groups need manual attention. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
588 lines
25 KiB
Python
588 lines
25 KiB
Python
"""Migration routines for SMB and NFS shares."""
|
||
from __future__ import annotations
|
||
|
||
import json
|
||
from typing import Any
|
||
|
||
from .colors import log, _bold, _bold_cyan, _bold_green, _bold_red, _cyan, _yellow
|
||
from .client import TrueNASClient
|
||
from .summary import Summary
|
||
|
||
|
||
# ─────────────────────────────────────────────────────────────────────────────
|
||
# Payload builders
|
||
# ─────────────────────────────────────────────────────────────────────────────
|
||
|
||
# Read-only / server-generated fields that must NOT be sent on create/update
|
||
_SMB_SHARE_READONLY = frozenset({"id", "locked", "path_local"})
|
||
|
||
# CORE SMB share fields that do not exist in the SCALE API
|
||
_SMB_SHARE_CORE_EXTRAS = frozenset({
|
||
"vuid", # server-generated Time Machine UUID; SCALE sets this automatically
|
||
})
|
||
|
||
# CORE NFS share fields that do not exist in the SCALE API
|
||
_NFS_SHARE_CORE_EXTRAS = frozenset({
|
||
"paths", # CORE uses a list; SCALE uses a single "path" string (converted below)
|
||
"alldirs", # removed in SCALE
|
||
"quiet", # removed in SCALE
|
||
})
|
||
|
||
|
||
def _smb_share_payload(share: dict) -> dict:
|
||
exclude = _SMB_SHARE_READONLY | _SMB_SHARE_CORE_EXTRAS
|
||
return {k: v for k, v in share.items() if k not in exclude}
|
||
|
||
|
||
def _nfs_share_payload(share: dict) -> dict:
|
||
payload = {k: v for k, v in share.items()
|
||
if k not in {"id", "locked"} | _NFS_SHARE_CORE_EXTRAS}
|
||
# CORE stores export paths as a list under "paths"; SCALE expects a single "path" string.
|
||
if "path" not in payload and share.get("paths"):
|
||
payload["path"] = share["paths"][0]
|
||
return payload
|
||
|
||
|
||
# ─────────────────────────────────────────────────────────────────────────────
|
||
# Migration routines
|
||
# ─────────────────────────────────────────────────────────────────────────────
|
||
|
||
async def migrate_smb_shares(
|
||
client: TrueNASClient,
|
||
shares: list[dict],
|
||
dry_run: bool,
|
||
summary: Summary,
|
||
) -> None:
|
||
summary.smb_found = len(shares)
|
||
if not shares:
|
||
log.info("No SMB shares found in archive.")
|
||
return
|
||
|
||
log.info("Querying existing SMB shares on destination …")
|
||
try:
|
||
existing = await client.call("sharing.smb.query") or []
|
||
except RuntimeError as exc:
|
||
msg = f"Could not query SMB shares: {exc}"
|
||
log.error(msg)
|
||
summary.errors.append(msg)
|
||
return
|
||
|
||
existing_names = {s.get("name", "").lower() for s in existing}
|
||
log.info(" Destination has %d existing SMB share(s).", len(existing_names))
|
||
|
||
for share in shares:
|
||
name = share.get("name", "<unnamed>")
|
||
log.info("%s SMB share %s", _bold("──"), _bold_cyan(repr(name)))
|
||
|
||
if name.lower() in existing_names:
|
||
log.info(" %s – already exists on destination.", _yellow("SKIP"))
|
||
summary.smb_skipped += 1
|
||
continue
|
||
|
||
payload = _smb_share_payload(share)
|
||
log.debug(" payload: %s", json.dumps(payload))
|
||
|
||
if dry_run:
|
||
log.info(" %s would create %s → %s",
|
||
_cyan("[DRY RUN]"), _bold_cyan(repr(name)), payload.get("path"))
|
||
summary.smb_created += 1
|
||
if payload.get("path"):
|
||
summary.paths_to_create.append(payload["path"])
|
||
continue
|
||
|
||
try:
|
||
r = await client.call("sharing.smb.create", [payload])
|
||
log.info(" %s id=%s", _bold_green("CREATED"), r.get("id"))
|
||
summary.smb_created += 1
|
||
except RuntimeError as exc:
|
||
log.error(" %s: %s", _bold_red("FAILED"), exc)
|
||
summary.smb_failed += 1
|
||
summary.errors.append(f"SMB share {name!r}: {exc}")
|
||
|
||
|
||
async def migrate_nfs_shares(
|
||
client: TrueNASClient,
|
||
shares: list[dict],
|
||
dry_run: bool,
|
||
summary: Summary,
|
||
) -> None:
|
||
summary.nfs_found = len(shares)
|
||
if not shares:
|
||
log.info("No NFS shares found in archive.")
|
||
return
|
||
|
||
log.info("Querying existing NFS shares on destination …")
|
||
try:
|
||
existing = await client.call("sharing.nfs.query") or []
|
||
except RuntimeError as exc:
|
||
msg = f"Could not query NFS shares: {exc}"
|
||
log.error(msg)
|
||
summary.errors.append(msg)
|
||
return
|
||
|
||
existing_paths = {s.get("path", "").rstrip("/") for s in existing}
|
||
log.info(" Destination has %d existing NFS share(s).", len(existing_paths))
|
||
|
||
for share in shares:
|
||
core_paths = share.get("paths") or []
|
||
path = (share.get("path") or (core_paths[0] if core_paths else "")).rstrip("/")
|
||
all_paths = [p.rstrip("/") for p in (core_paths if core_paths else ([path] if path else []))]
|
||
log.info("%s NFS export %s", _bold("──"), _bold_cyan(repr(path)))
|
||
|
||
if path in existing_paths:
|
||
log.info(" %s – path already exported on destination.", _yellow("SKIP"))
|
||
summary.nfs_skipped += 1
|
||
continue
|
||
|
||
payload = _nfs_share_payload(share)
|
||
log.debug(" payload: %s", json.dumps(payload))
|
||
|
||
if dry_run:
|
||
log.info(" %s would create NFS export for %s",
|
||
_cyan("[DRY RUN]"), _bold_cyan(repr(path)))
|
||
summary.nfs_created += 1
|
||
summary.paths_to_create.extend(all_paths)
|
||
continue
|
||
|
||
try:
|
||
r = await client.call("sharing.nfs.create", [payload])
|
||
log.info(" %s id=%s", _bold_green("CREATED"), r.get("id"))
|
||
summary.nfs_created += 1
|
||
except RuntimeError as exc:
|
||
log.error(" %s: %s", _bold_red("FAILED"), exc)
|
||
summary.nfs_failed += 1
|
||
summary.errors.append(f"NFS share {path!r}: {exc}")
|
||
|
||
|
||
# ─────────────────────────────────────────────────────────────────────────────
|
||
# iSCSI payload builders
|
||
# ─────────────────────────────────────────────────────────────────────────────
|
||
|
||
_ISCSI_EXTENT_READONLY = frozenset({"id", "serial", "naa", "vendor", "locked"})
|
||
_ISCSI_INITIATOR_READONLY = frozenset({"id"})
|
||
_ISCSI_PORTAL_READONLY = frozenset({"id", "tag"})
|
||
_ISCSI_TARGET_READONLY = frozenset({"id", "rel_tgt_id", "iscsi_parameters"})
|
||
|
||
|
||
def _iscsi_extent_payload(extent: dict) -> dict:
|
||
payload = {k: v for k, v in extent.items() if k not in _ISCSI_EXTENT_READONLY}
|
||
if extent.get("type") == "DISK":
|
||
payload.pop("path", None) # derived from disk on DISK extents
|
||
payload.pop("filesize", None) # only meaningful for FILE extents
|
||
else:
|
||
payload.pop("disk", None)
|
||
return payload
|
||
|
||
|
||
def _iscsi_initiator_payload(initiator: dict) -> dict:
|
||
return {k: v for k, v in initiator.items() if k not in _ISCSI_INITIATOR_READONLY}
|
||
|
||
|
||
def _iscsi_portal_payload(portal: dict) -> dict:
|
||
payload = {k: v for k, v in portal.items() if k not in _ISCSI_PORTAL_READONLY}
|
||
# The API only accepts {"ip": "..."} in listen entries — port is a global setting
|
||
payload["listen"] = [{"ip": l["ip"]} for l in payload.get("listen", [])]
|
||
return payload
|
||
|
||
|
||
def _iscsi_target_payload(
|
||
target: dict,
|
||
portal_id_map: dict[int, int],
|
||
initiator_id_map: dict[int, int],
|
||
) -> dict:
|
||
payload = {k: v for k, v in target.items() if k not in _ISCSI_TARGET_READONLY}
|
||
payload["groups"] = [
|
||
{**g,
|
||
"portal": portal_id_map.get(g["portal"], g["portal"]),
|
||
"initiator": initiator_id_map.get(g.get("initiator"), g.get("initiator"))}
|
||
for g in target.get("groups", [])
|
||
]
|
||
return payload
|
||
|
||
|
||
# ─────────────────────────────────────────────────────────────────────────────
|
||
# iSCSI migration sub-routines
|
||
# ─────────────────────────────────────────────────────────────────────────────
|
||
|
||
async def _migrate_iscsi_extents(
|
||
client: TrueNASClient,
|
||
extents: list[dict],
|
||
dry_run: bool,
|
||
summary: Summary,
|
||
id_map: dict[int, int],
|
||
) -> None:
|
||
log.info("Querying existing iSCSI extents on destination …")
|
||
try:
|
||
existing = await client.call("iscsi.extent.query") or []
|
||
except RuntimeError as exc:
|
||
msg = f"Could not query iSCSI extents: {exc}"
|
||
log.error(msg); summary.errors.append(msg); return
|
||
|
||
existing_by_name = {e["name"].lower(): e for e in existing}
|
||
log.info(" Destination has %d existing extent(s).", len(existing_by_name))
|
||
|
||
for ext in extents:
|
||
name = ext.get("name", "<unnamed>")
|
||
log.info("%s iSCSI extent %s", _bold("──"), _bold_cyan(repr(name)))
|
||
|
||
if name.lower() in existing_by_name:
|
||
log.info(" %s – already exists on destination.", _yellow("SKIP"))
|
||
id_map[ext["id"]] = existing_by_name[name.lower()]["id"]
|
||
summary.iscsi_extents_skipped += 1
|
||
continue
|
||
|
||
payload = _iscsi_extent_payload(ext)
|
||
log.debug(" payload: %s", json.dumps(payload))
|
||
|
||
if dry_run:
|
||
log.info(" %s would create extent %s → %s",
|
||
_cyan("[DRY RUN]"), _bold_cyan(repr(name)),
|
||
ext.get("disk") or ext.get("path"))
|
||
summary.iscsi_extents_created += 1
|
||
id_map[ext["id"]] = ext["id"] # placeholder — enables downstream dry-run remapping
|
||
if ext.get("type") == "DISK" and ext.get("disk"):
|
||
summary.zvols_to_check.append(ext["disk"].removeprefix("zvol/"))
|
||
continue
|
||
|
||
try:
|
||
r = await client.call("iscsi.extent.create", [payload])
|
||
log.info(" %s id=%s", _bold_green("CREATED"), r.get("id"))
|
||
id_map[ext["id"]] = r["id"]
|
||
summary.iscsi_extents_created += 1
|
||
except RuntimeError as exc:
|
||
log.error(" %s: %s", _bold_red("FAILED"), exc)
|
||
summary.iscsi_extents_failed += 1
|
||
summary.errors.append(f"iSCSI extent {name!r}: {exc}")
|
||
|
||
|
||
async def _migrate_iscsi_initiators(
|
||
client: TrueNASClient,
|
||
initiators: list[dict],
|
||
dry_run: bool,
|
||
summary: Summary,
|
||
id_map: dict[int, int],
|
||
) -> None:
|
||
log.info("Querying existing iSCSI initiator groups on destination …")
|
||
try:
|
||
existing = await client.call("iscsi.initiator.query") or []
|
||
except RuntimeError as exc:
|
||
msg = f"Could not query iSCSI initiators: {exc}"
|
||
log.error(msg); summary.errors.append(msg); return
|
||
|
||
existing_by_comment = {e["comment"].lower(): e for e in existing if e.get("comment")}
|
||
log.info(" Destination has %d existing initiator group(s).", len(existing))
|
||
|
||
for init in initiators:
|
||
comment = init.get("comment", "")
|
||
log.info("%s iSCSI initiator group %s", _bold("──"), _bold_cyan(repr(comment)))
|
||
|
||
if comment and comment.lower() in existing_by_comment:
|
||
log.info(" %s – comment already exists on destination.", _yellow("SKIP"))
|
||
id_map[init["id"]] = existing_by_comment[comment.lower()]["id"]
|
||
summary.iscsi_initiators_skipped += 1
|
||
continue
|
||
|
||
payload = _iscsi_initiator_payload(init)
|
||
log.debug(" payload: %s", json.dumps(payload))
|
||
|
||
if dry_run:
|
||
log.info(" %s would create initiator group %s",
|
||
_cyan("[DRY RUN]"), _bold_cyan(repr(comment)))
|
||
summary.iscsi_initiators_created += 1
|
||
id_map[init["id"]] = init["id"] # placeholder — enables downstream dry-run remapping
|
||
continue
|
||
|
||
try:
|
||
r = await client.call("iscsi.initiator.create", [payload])
|
||
log.info(" %s id=%s", _bold_green("CREATED"), r.get("id"))
|
||
id_map[init["id"]] = r["id"]
|
||
summary.iscsi_initiators_created += 1
|
||
except RuntimeError as exc:
|
||
log.error(" %s: %s", _bold_red("FAILED"), exc)
|
||
summary.iscsi_initiators_failed += 1
|
||
summary.errors.append(f"iSCSI initiator {comment!r}: {exc}")
|
||
|
||
|
||
async def _migrate_iscsi_portals(
|
||
client: TrueNASClient,
|
||
portals: list[dict],
|
||
dry_run: bool,
|
||
summary: Summary,
|
||
id_map: dict[int, int],
|
||
) -> None:
|
||
log.info("Querying existing iSCSI portals on destination …")
|
||
try:
|
||
existing = await client.call("iscsi.portal.query") or []
|
||
except RuntimeError as exc:
|
||
msg = f"Could not query iSCSI portals: {exc}"
|
||
log.error(msg); summary.errors.append(msg); return
|
||
|
||
def _ip_set(p: dict) -> frozenset:
|
||
return frozenset(l["ip"] for l in p.get("listen", []))
|
||
|
||
existing_ip_sets = [(_ip_set(p), p["id"]) for p in existing]
|
||
log.info(" Destination has %d existing portal(s).", len(existing))
|
||
|
||
for portal in portals:
|
||
comment = portal.get("comment", "")
|
||
ips = ", ".join(l['ip'] for l in portal.get("listen", []))
|
||
log.info("%s iSCSI portal %s [%s]", _bold("──"), _bold_cyan(repr(comment)), ips)
|
||
|
||
my_ips = _ip_set(portal)
|
||
match = next((eid for eips, eid in existing_ip_sets if eips == my_ips), None)
|
||
if match is not None:
|
||
log.info(" %s – IP set already exists on destination.", _yellow("SKIP"))
|
||
id_map[portal["id"]] = match
|
||
summary.iscsi_portals_skipped += 1
|
||
continue
|
||
|
||
payload = _iscsi_portal_payload(portal)
|
||
log.debug(" payload: %s", json.dumps(payload))
|
||
|
||
if dry_run:
|
||
log.info(" %s would create portal %s → %s",
|
||
_cyan("[DRY RUN]"), _bold_cyan(repr(comment)), ips)
|
||
summary.iscsi_portals_created += 1
|
||
id_map[portal["id"]] = portal["id"] # placeholder — enables downstream dry-run remapping
|
||
continue
|
||
|
||
try:
|
||
r = await client.call("iscsi.portal.create", [payload])
|
||
log.info(" %s id=%s", _bold_green("CREATED"), r.get("id"))
|
||
id_map[portal["id"]] = r["id"]
|
||
summary.iscsi_portals_created += 1
|
||
except RuntimeError as exc:
|
||
log.error(" %s: %s", _bold_red("FAILED"), exc)
|
||
summary.iscsi_portals_failed += 1
|
||
summary.errors.append(f"iSCSI portal {comment!r}: {exc}")
|
||
|
||
|
||
async def _migrate_iscsi_targets(
|
||
client: TrueNASClient,
|
||
targets: list[dict],
|
||
dry_run: bool,
|
||
summary: Summary,
|
||
id_map: dict[int, int],
|
||
portal_id_map: dict[int, int],
|
||
initiator_id_map: dict[int, int],
|
||
) -> None:
|
||
log.info("Querying existing iSCSI targets on destination …")
|
||
try:
|
||
existing = await client.call("iscsi.target.query") or []
|
||
except RuntimeError as exc:
|
||
msg = f"Could not query iSCSI targets: {exc}"
|
||
log.error(msg); summary.errors.append(msg); return
|
||
|
||
existing_by_name = {t["name"].lower(): t for t in existing}
|
||
log.info(" Destination has %d existing target(s).", len(existing_by_name))
|
||
|
||
for target in targets:
|
||
name = target.get("name", "<unnamed>")
|
||
log.info("%s iSCSI target %s", _bold("──"), _bold_cyan(repr(name)))
|
||
|
||
if name.lower() in existing_by_name:
|
||
log.info(" %s – already exists on destination.", _yellow("SKIP"))
|
||
id_map[target["id"]] = existing_by_name[name.lower()]["id"]
|
||
summary.iscsi_targets_skipped += 1
|
||
continue
|
||
|
||
# Filter out groups whose portal or initiator could not be mapped (e.g. portal
|
||
# creation failed). Warn per dropped group but still create the target — a
|
||
# target without every portal group is valid and preferable to no target at all.
|
||
valid_groups = []
|
||
for g in target.get("groups", []):
|
||
unmapped = []
|
||
if g.get("portal") not in portal_id_map:
|
||
unmapped.append(f"portal id={g['portal']}")
|
||
if g.get("initiator") not in initiator_id_map:
|
||
unmapped.append(f"initiator id={g['initiator']}")
|
||
if unmapped:
|
||
log.warning(" %s dropping group with unmapped %s",
|
||
_yellow("WARN"), ", ".join(unmapped))
|
||
else:
|
||
valid_groups.append(g)
|
||
|
||
payload = _iscsi_target_payload({**target, "groups": valid_groups},
|
||
portal_id_map, initiator_id_map)
|
||
log.debug(" payload: %s", json.dumps(payload))
|
||
|
||
if dry_run:
|
||
log.info(" %s would create target %s",
|
||
_cyan("[DRY RUN]"), _bold_cyan(repr(name)))
|
||
summary.iscsi_targets_created += 1
|
||
id_map[target["id"]] = target["id"] # placeholder — enables downstream dry-run remapping
|
||
continue
|
||
|
||
try:
|
||
r = await client.call("iscsi.target.create", [payload])
|
||
log.info(" %s id=%s", _bold_green("CREATED"), r.get("id"))
|
||
id_map[target["id"]] = r["id"]
|
||
summary.iscsi_targets_created += 1
|
||
except RuntimeError as exc:
|
||
log.error(" %s: %s", _bold_red("FAILED"), exc)
|
||
summary.iscsi_targets_failed += 1
|
||
summary.errors.append(f"iSCSI target {name!r}: {exc}")
|
||
|
||
|
||
async def _migrate_iscsi_targetextents(
|
||
client: TrueNASClient,
|
||
targetextents: list[dict],
|
||
dry_run: bool,
|
||
summary: Summary,
|
||
target_id_map: dict[int, int],
|
||
extent_id_map: dict[int, int],
|
||
) -> None:
|
||
log.info("Querying existing iSCSI target-extent associations on destination …")
|
||
try:
|
||
existing = await client.call("iscsi.targetextent.query") or []
|
||
except RuntimeError as exc:
|
||
msg = f"Could not query iSCSI target-extents: {exc}"
|
||
log.error(msg); summary.errors.append(msg); return
|
||
|
||
existing_keys = {(te["target"], te["lunid"]) for te in existing}
|
||
log.info(" Destination has %d existing association(s).", len(existing))
|
||
|
||
for te in targetextents:
|
||
src_tid = te["target"]
|
||
src_eid = te["extent"]
|
||
lunid = te["lunid"]
|
||
dest_tid = target_id_map.get(src_tid)
|
||
dest_eid = extent_id_map.get(src_eid)
|
||
|
||
if dest_tid is None or dest_eid is None:
|
||
missing = []
|
||
if dest_tid is None: missing.append(f"target id={src_tid}")
|
||
if dest_eid is None: missing.append(f"extent id={src_eid}")
|
||
msg = f"iSCSI target-extent (lun {lunid}): cannot remap {', '.join(missing)}"
|
||
log.error(" %s", msg)
|
||
summary.iscsi_targetextents_failed += 1
|
||
summary.errors.append(msg)
|
||
continue
|
||
|
||
log.info("%s iSCSI target↔extent target=%s lun=%s extent=%s",
|
||
_bold("──"), dest_tid, lunid, dest_eid)
|
||
|
||
if (dest_tid, lunid) in existing_keys:
|
||
log.info(" %s – target+LUN already assigned on destination.", _yellow("SKIP"))
|
||
summary.iscsi_targetextents_skipped += 1
|
||
continue
|
||
|
||
payload = {"target": dest_tid, "lunid": lunid, "extent": dest_eid}
|
||
log.debug(" payload: %s", json.dumps(payload))
|
||
|
||
if dry_run:
|
||
log.info(" %s would associate target=%s lun=%s extent=%s",
|
||
_cyan("[DRY RUN]"), dest_tid, lunid, dest_eid)
|
||
summary.iscsi_targetextents_created += 1
|
||
continue
|
||
|
||
try:
|
||
r = await client.call("iscsi.targetextent.create", [payload])
|
||
log.info(" %s id=%s", _bold_green("CREATED"), r.get("id"))
|
||
summary.iscsi_targetextents_created += 1
|
||
except RuntimeError as exc:
|
||
log.error(" %s: %s", _bold_red("FAILED"), exc)
|
||
summary.iscsi_targetextents_failed += 1
|
||
summary.errors.append(
|
||
f"iSCSI target-extent (target={dest_tid}, lun={lunid}): {exc}")
|
||
|
||
|
||
# ─────────────────────────────────────────────────────────────────────────────
|
||
# iSCSI pre-migration utilities
|
||
# ─────────────────────────────────────────────────────────────────────────────
|
||
|
||
async def query_existing_iscsi(client: TrueNASClient) -> dict:
|
||
"""
|
||
Query all iSCSI object counts from the destination.
|
||
Returns a dict with keys: extents, initiators, portals, targets, targetextents
|
||
Each value is a list of objects (may be empty).
|
||
"""
|
||
result = {}
|
||
for key, method in [
|
||
("extents", "iscsi.extent.query"),
|
||
("initiators", "iscsi.initiator.query"),
|
||
("portals", "iscsi.portal.query"),
|
||
("targets", "iscsi.target.query"),
|
||
("targetextents", "iscsi.targetextent.query"),
|
||
]:
|
||
try:
|
||
result[key] = await client.call(method) or []
|
||
except RuntimeError:
|
||
result[key] = []
|
||
return result
|
||
|
||
|
||
async def clear_iscsi_config(client: TrueNASClient) -> None:
|
||
"""
|
||
Delete all iSCSI configuration from the destination in safe dependency order:
|
||
target-extents → targets → portals → initiators → extents.
|
||
"""
|
||
for method_query, method_delete, label in [
|
||
("iscsi.targetextent.query", "iscsi.targetextent.delete", "target-extent"),
|
||
("iscsi.target.query", "iscsi.target.delete", "target"),
|
||
("iscsi.portal.query", "iscsi.portal.delete", "portal"),
|
||
("iscsi.initiator.query", "iscsi.initiator.delete", "initiator"),
|
||
("iscsi.extent.query", "iscsi.extent.delete", "extent"),
|
||
]:
|
||
try:
|
||
objects = await client.call(method_query) or []
|
||
except RuntimeError as exc:
|
||
log.warning(" Could not query iSCSI %ss: %s", label, exc)
|
||
continue
|
||
for obj in objects:
|
||
try:
|
||
await client.call(method_delete, [obj["id"]])
|
||
log.info(" Deleted iSCSI %s id=%s", label, obj["id"])
|
||
except RuntimeError as exc:
|
||
log.warning(" Failed to delete iSCSI %s id=%s: %s", label, obj["id"], exc)
|
||
|
||
|
||
# ─────────────────────────────────────────────────────────────────────────────
|
||
# Public iSCSI entry point
|
||
# ─────────────────────────────────────────────────────────────────────────────
|
||
|
||
async def migrate_iscsi(
|
||
client: TrueNASClient,
|
||
iscsi: dict,
|
||
dry_run: bool,
|
||
summary: Summary,
|
||
) -> None:
|
||
if not iscsi:
|
||
log.info("No iSCSI configuration found in archive.")
|
||
return
|
||
|
||
portals = iscsi.get("portals", [])
|
||
initiators = iscsi.get("initiators", [])
|
||
targets = iscsi.get("targets", [])
|
||
extents = iscsi.get("extents", [])
|
||
targetextents = iscsi.get("targetextents", [])
|
||
|
||
summary.iscsi_extents_found = len(extents)
|
||
summary.iscsi_initiators_found = len(initiators)
|
||
summary.iscsi_portals_found = len(portals)
|
||
summary.iscsi_targets_found = len(targets)
|
||
summary.iscsi_targetextents_found = len(targetextents)
|
||
|
||
gc = iscsi.get("global_config", {})
|
||
if gc.get("basename"):
|
||
log.info(" Source iSCSI basename: %s (destination keeps its own)", gc["basename"])
|
||
|
||
if not any([portals, initiators, targets, extents, targetextents]):
|
||
log.info("iSCSI configuration is empty – nothing to migrate.")
|
||
return
|
||
|
||
extent_id_map: dict[int, int] = {}
|
||
initiator_id_map: dict[int, int] = {}
|
||
portal_id_map: dict[int, int] = {}
|
||
target_id_map: dict[int, int] = {}
|
||
|
||
# Dependency order: extents and initiators first (no deps), then portals,
|
||
# then targets (need portal + initiator maps), then target-extent links.
|
||
await _migrate_iscsi_extents(client, extents, dry_run, summary, extent_id_map)
|
||
await _migrate_iscsi_initiators(client, initiators, dry_run, summary, initiator_id_map)
|
||
await _migrate_iscsi_portals(client, portals, dry_run, summary, portal_id_map)
|
||
await _migrate_iscsi_targets(
|
||
client, targets, dry_run, summary, target_id_map, portal_id_map, initiator_id_map)
|
||
await _migrate_iscsi_targetextents(
|
||
client, targetextents, dry_run, summary, target_id_map, extent_id_map)
|