Add iSCSI migration support (SCALE archive source)
archive.py: - Add iscsi_config.json to _CANDIDATES and _KEYWORDS - parse_archive() now extracts portals, initiators, targets, extents, targetextents, and global_config into archive["iscsi"] migrate.py: - Add payload builders for all five iSCSI object types (extents, initiators, portals, targets, target-extents) - Add migrate_iscsi() which creates objects in dependency order (extents+initiators first, then portals, then targets, then target-extent associations) and tracks old→new ID mappings at each step so downstream references are correctly remapped - Conflict detection: extents/targets by name, portals by IP set, initiators by comment, target-extents by target+LUN combination - Skipped objects still populate the ID map so dependent objects can remap their references correctly summary.py: - Add per-sub-type found/created/skipped/failed counters for iSCSI - iSCSI rows appear in the report only when iSCSI data was processed cli.py: - Add _prompt_iscsi_portals() — shows source IPs per portal and prompts for destination IPs in-place; supports MPIO (space-separated) - Wizard scope menu gains option 3 (iSCSI); portal prompt fires automatically after archive parse when iSCSI portals are present - run() wires in migrate_iscsi() - argparse --migrate now accepts "iscsi" as a valid choice Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -42,18 +42,24 @@ _CANDIDATES: dict[str, list[str]] = {
|
||||
"ixdiagnose/plugins/Sharing/sharing.nfs.query.json",
|
||||
"ixdiagnose/NFS/sharing.nfs.query.json",
|
||||
],
|
||||
"iscsi": [
|
||||
"ixdiagnose/plugins/iscsi/iscsi_config.json",
|
||||
"ixdiagnose/plugins/ISCSI/iscsi_config.json",
|
||||
],
|
||||
}
|
||||
|
||||
# When a candidate file bundles multiple datasets, pull out the right sub-key.
|
||||
_KEY_WITHIN_FILE: dict[str, str] = {
|
||||
"smb_shares": "sharing_smb_query",
|
||||
"nfs_shares": "sharing_nfs_query",
|
||||
# "iscsi" intentionally omitted — iscsi_config.json is used as-is
|
||||
}
|
||||
|
||||
# Keyword fragments for heuristic fallback scan (SCALE archives only)
|
||||
_KEYWORDS: dict[str, list[str]] = {
|
||||
"smb_shares": ["sharing.smb", "smb_share", "sharing/smb", "smb_info"],
|
||||
"nfs_shares": ["sharing.nfs", "nfs_share", "sharing/nfs", "nfs_config"],
|
||||
"iscsi": ["iscsi_config", "iscsi/iscsi"],
|
||||
}
|
||||
|
||||
# Presence of this path prefix identifies a TrueNAS CORE archive.
|
||||
@@ -251,13 +257,14 @@ def _open_source_tar(tar_path: str):
|
||||
|
||||
def parse_archive(tar_path: str) -> dict[str, Any]:
|
||||
"""
|
||||
Extract SMB shares and NFS shares from the debug archive.
|
||||
Returns: {"smb_shares": list, "nfs_shares": list}
|
||||
Extract SMB shares, NFS shares, and iSCSI configuration from the debug archive.
|
||||
Returns: {"smb_shares": list, "nfs_shares": list, "iscsi": dict}
|
||||
"""
|
||||
log.info("Opening archive: %s", tar_path)
|
||||
result: dict[str, Any] = {
|
||||
"smb_shares": [],
|
||||
"nfs_shares": [],
|
||||
"iscsi": {},
|
||||
}
|
||||
|
||||
try:
|
||||
@@ -288,14 +295,33 @@ def parse_archive(tar_path: str) -> dict[str, Any]:
|
||||
result[key] = v
|
||||
break
|
||||
|
||||
# iSCSI — combined dict file, not a bare list
|
||||
iscsi_raw = _find_data(tf, members, "iscsi")
|
||||
if iscsi_raw and isinstance(iscsi_raw, dict):
|
||||
result["iscsi"] = {
|
||||
"global_config": iscsi_raw.get("global_config", {}),
|
||||
"portals": iscsi_raw.get("portals", []),
|
||||
"initiators": iscsi_raw.get("initiators", []),
|
||||
"targets": iscsi_raw.get("targets", []),
|
||||
"extents": iscsi_raw.get("extents", []),
|
||||
"targetextents": iscsi_raw.get("targetextents", []),
|
||||
}
|
||||
elif iscsi_raw is not None:
|
||||
log.warning(" iscsi → unexpected format (expected dict)")
|
||||
|
||||
except (tarfile.TarError, OSError) as exc:
|
||||
log.error("Failed to open archive: %s", exc)
|
||||
sys.exit(1)
|
||||
|
||||
iscsi = result["iscsi"]
|
||||
log.info(
|
||||
"Parsed: %d SMB share(s), %d NFS share(s)",
|
||||
"Parsed: %d SMB share(s), %d NFS share(s), "
|
||||
"iSCSI: %d target(s) / %d extent(s) / %d portal(s)",
|
||||
len(result["smb_shares"]),
|
||||
len(result["nfs_shares"]),
|
||||
len(iscsi.get("targets", [])),
|
||||
len(iscsi.get("extents", [])),
|
||||
len(iscsi.get("portals", [])),
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ from .archive import parse_archive, list_archive_and_exit
|
||||
from .client import TrueNASClient, check_dataset_paths, create_missing_datasets
|
||||
from .colors import log, _bold, _bold_cyan, _bold_red, _bold_yellow, _cyan, _dim, _green, _yellow
|
||||
from .csv_source import parse_csv_sources
|
||||
from .migrate import migrate_smb_shares, migrate_nfs_shares
|
||||
from .migrate import migrate_smb_shares, migrate_nfs_shares, migrate_iscsi
|
||||
from .summary import Summary
|
||||
|
||||
|
||||
@@ -103,6 +103,10 @@ async def run(
|
||||
await migrate_nfs_shares(
|
||||
client, archive["nfs_shares"], args.dry_run, summary)
|
||||
|
||||
if "iscsi" in migrate_set:
|
||||
await migrate_iscsi(
|
||||
client, archive.get("iscsi", {}), args.dry_run, summary)
|
||||
|
||||
if args.dry_run and summary.paths_to_create:
|
||||
summary.missing_datasets = await check_dataset_paths(
|
||||
client, summary.paths_to_create,
|
||||
@@ -156,6 +160,40 @@ def _prompt_csv_path(share_type: str) -> Optional[str]:
|
||||
print(f" {_bold_red('File not found:')} {raw}")
|
||||
|
||||
|
||||
|
||||
def _prompt_iscsi_portals(iscsi: dict) -> None:
|
||||
"""Walk each portal and prompt for destination IPs in-place."""
|
||||
portals = iscsi.get("portals", [])
|
||||
if not portals:
|
||||
return
|
||||
|
||||
print(f"\n {_bold('iSCSI Portal Configuration')}")
|
||||
print(f" {_dim('Portal IP addresses are unique per system and must be updated.')}")
|
||||
print(f" {_dim('For MPIO, enter multiple IPs separated by spaces.')}")
|
||||
|
||||
for portal in portals:
|
||||
comment = portal.get("comment", "")
|
||||
listen = portal.get("listen", [])
|
||||
src_ips = " ".join(f"{l['ip']}:{l['port']}" for l in listen)
|
||||
default_port = listen[0]["port"] if listen else 3260
|
||||
|
||||
label = f"Portal {portal['id']}" + (f" ({comment!r})" if comment else "")
|
||||
print(f"\n {_bold(label)}")
|
||||
print(f" {_dim('Source IP(s):')} {src_ips}")
|
||||
|
||||
raw = _prompt(" Destination IP(s)").strip()
|
||||
if not raw:
|
||||
print(f" {_yellow('⚠')} No IPs entered — keeping source IPs.")
|
||||
continue
|
||||
|
||||
port_raw = _prompt(" Port", default=str(default_port))
|
||||
port = int(port_raw) if port_raw.isdigit() else default_port
|
||||
dest_ips = raw.split()
|
||||
portal["listen"] = [{"ip": ip, "port": port} for ip in dest_ips]
|
||||
print(f" {_green('✓')} Portal: {', '.join(f'{ip}:{port}' for ip in dest_ips)}")
|
||||
print()
|
||||
|
||||
|
||||
def _select_shares(shares: list[dict], share_type: str) -> list[dict]:
|
||||
"""
|
||||
Display a numbered list of *shares* and return only those the user selects.
|
||||
@@ -298,22 +336,27 @@ def interactive_mode() -> None:
|
||||
print(f"\n {_bold('What to migrate?')}")
|
||||
print(f" {_cyan('1.')} SMB shares")
|
||||
print(f" {_cyan('2.')} NFS shares")
|
||||
print(f" {_cyan('3.')} iSCSI (targets, extents, portals, initiator groups)")
|
||||
sel_raw = _prompt(
|
||||
"Selection (space-separated numbers, Enter for all)", default="1 2"
|
||||
"Selection (space-separated numbers, Enter for all)", default="1 2 3"
|
||||
)
|
||||
_sel_map = {"1": "smb", "2": "nfs"}
|
||||
_sel_map = {"1": "smb", "2": "nfs", "3": "iscsi"}
|
||||
migrate = []
|
||||
for tok in sel_raw.split():
|
||||
if tok in _sel_map and _sel_map[tok] not in migrate:
|
||||
migrate.append(_sel_map[tok])
|
||||
if not migrate:
|
||||
migrate = ["smb", "nfs"]
|
||||
migrate = ["smb", "nfs", "iscsi"]
|
||||
|
||||
# ── Parse archive ───────────────────────────────────────────────────────
|
||||
print()
|
||||
archive_data = parse_archive(str(chosen))
|
||||
extra_ns = {"debug_tar": str(chosen)}
|
||||
|
||||
# ── iSCSI portal IP remapping ────────────────────────────────────────
|
||||
if "iscsi" in migrate and archive_data.get("iscsi", {}).get("portals"):
|
||||
_prompt_iscsi_portals(archive_data["iscsi"])
|
||||
|
||||
# ── Select individual shares (common) ──────────────────────────────────────
|
||||
if "smb" in migrate and archive_data["smb_shares"]:
|
||||
archive_data["smb_shares"] = _select_shares(archive_data["smb_shares"], "SMB")
|
||||
@@ -447,11 +490,11 @@ def main() -> None:
|
||||
p.add_argument(
|
||||
"--migrate",
|
||||
nargs="+",
|
||||
choices=["smb", "nfs"],
|
||||
default=["smb", "nfs"],
|
||||
choices=["smb", "nfs", "iscsi"],
|
||||
default=["smb", "nfs", "iscsi"],
|
||||
metavar="TYPE",
|
||||
help=(
|
||||
"What to migrate. Choices: smb nfs "
|
||||
"What to migrate. Choices: smb nfs iscsi "
|
||||
"(default: both). Example: --migrate smb"
|
||||
),
|
||||
)
|
||||
|
||||
@@ -152,3 +152,374 @@ async def migrate_nfs_shares(
|
||||
log.error(" %s: %s", _bold_red("FAILED"), exc)
|
||||
summary.nfs_failed += 1
|
||||
summary.errors.append(f"NFS share {path!r}: {exc}")
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
# iSCSI payload builders
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
_ISCSI_EXTENT_READONLY = frozenset({"id", "serial", "naa", "vendor", "locked"})
|
||||
_ISCSI_INITIATOR_READONLY = frozenset({"id"})
|
||||
_ISCSI_PORTAL_READONLY = frozenset({"id", "tag"})
|
||||
_ISCSI_TARGET_READONLY = frozenset({"id", "rel_tgt_id", "iscsi_parameters"})
|
||||
|
||||
|
||||
def _iscsi_extent_payload(extent: dict) -> dict:
|
||||
payload = {k: v for k, v in extent.items() if k not in _ISCSI_EXTENT_READONLY}
|
||||
if extent.get("type") == "DISK":
|
||||
payload.pop("path", None) # derived from disk on DISK extents
|
||||
payload.pop("filesize", None) # only meaningful for FILE extents
|
||||
else:
|
||||
payload.pop("disk", None)
|
||||
return payload
|
||||
|
||||
|
||||
def _iscsi_initiator_payload(initiator: dict) -> dict:
|
||||
return {k: v for k, v in initiator.items() if k not in _ISCSI_INITIATOR_READONLY}
|
||||
|
||||
|
||||
def _iscsi_portal_payload(portal: dict) -> dict:
|
||||
return {k: v for k, v in portal.items() if k not in _ISCSI_PORTAL_READONLY}
|
||||
|
||||
|
||||
def _iscsi_target_payload(
|
||||
target: dict,
|
||||
portal_id_map: dict[int, int],
|
||||
initiator_id_map: dict[int, int],
|
||||
) -> dict:
|
||||
payload = {k: v for k, v in target.items() if k not in _ISCSI_TARGET_READONLY}
|
||||
payload["groups"] = [
|
||||
{**g,
|
||||
"portal": portal_id_map.get(g["portal"], g["portal"]),
|
||||
"initiator": initiator_id_map.get(g.get("initiator"), g.get("initiator"))}
|
||||
for g in target.get("groups", [])
|
||||
]
|
||||
return payload
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
# iSCSI migration sub-routines
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
async def _migrate_iscsi_extents(
|
||||
client: TrueNASClient,
|
||||
extents: list[dict],
|
||||
dry_run: bool,
|
||||
summary: Summary,
|
||||
id_map: dict[int, int],
|
||||
) -> None:
|
||||
log.info("Querying existing iSCSI extents on destination …")
|
||||
try:
|
||||
existing = await client.call("iscsi.extent.query") or []
|
||||
except RuntimeError as exc:
|
||||
msg = f"Could not query iSCSI extents: {exc}"
|
||||
log.error(msg); summary.errors.append(msg); return
|
||||
|
||||
existing_by_name = {e["name"].lower(): e for e in existing}
|
||||
log.info(" Destination has %d existing extent(s).", len(existing_by_name))
|
||||
|
||||
for ext in extents:
|
||||
name = ext.get("name", "<unnamed>")
|
||||
log.info("%s iSCSI extent %s", _bold("──"), _bold_cyan(repr(name)))
|
||||
|
||||
if name.lower() in existing_by_name:
|
||||
log.info(" %s – already exists on destination.", _yellow("SKIP"))
|
||||
id_map[ext["id"]] = existing_by_name[name.lower()]["id"]
|
||||
summary.iscsi_extents_skipped += 1
|
||||
continue
|
||||
|
||||
payload = _iscsi_extent_payload(ext)
|
||||
log.debug(" payload: %s", json.dumps(payload))
|
||||
|
||||
if dry_run:
|
||||
log.info(" %s would create extent %s → %s",
|
||||
_cyan("[DRY RUN]"), _bold_cyan(repr(name)),
|
||||
ext.get("disk") or ext.get("path"))
|
||||
summary.iscsi_extents_created += 1
|
||||
continue
|
||||
|
||||
try:
|
||||
r = await client.call("iscsi.extent.create", [payload])
|
||||
log.info(" %s id=%s", _bold_green("CREATED"), r.get("id"))
|
||||
id_map[ext["id"]] = r["id"]
|
||||
summary.iscsi_extents_created += 1
|
||||
except RuntimeError as exc:
|
||||
log.error(" %s: %s", _bold_red("FAILED"), exc)
|
||||
summary.iscsi_extents_failed += 1
|
||||
summary.errors.append(f"iSCSI extent {name!r}: {exc}")
|
||||
|
||||
|
||||
async def _migrate_iscsi_initiators(
|
||||
client: TrueNASClient,
|
||||
initiators: list[dict],
|
||||
dry_run: bool,
|
||||
summary: Summary,
|
||||
id_map: dict[int, int],
|
||||
) -> None:
|
||||
log.info("Querying existing iSCSI initiator groups on destination …")
|
||||
try:
|
||||
existing = await client.call("iscsi.initiator.query") or []
|
||||
except RuntimeError as exc:
|
||||
msg = f"Could not query iSCSI initiators: {exc}"
|
||||
log.error(msg); summary.errors.append(msg); return
|
||||
|
||||
existing_by_comment = {e["comment"].lower(): e for e in existing if e.get("comment")}
|
||||
log.info(" Destination has %d existing initiator group(s).", len(existing))
|
||||
|
||||
for init in initiators:
|
||||
comment = init.get("comment", "")
|
||||
log.info("%s iSCSI initiator group %s", _bold("──"), _bold_cyan(repr(comment)))
|
||||
|
||||
if comment and comment.lower() in existing_by_comment:
|
||||
log.info(" %s – comment already exists on destination.", _yellow("SKIP"))
|
||||
id_map[init["id"]] = existing_by_comment[comment.lower()]["id"]
|
||||
summary.iscsi_initiators_skipped += 1
|
||||
continue
|
||||
|
||||
payload = _iscsi_initiator_payload(init)
|
||||
log.debug(" payload: %s", json.dumps(payload))
|
||||
|
||||
if dry_run:
|
||||
log.info(" %s would create initiator group %s",
|
||||
_cyan("[DRY RUN]"), _bold_cyan(repr(comment)))
|
||||
summary.iscsi_initiators_created += 1
|
||||
continue
|
||||
|
||||
try:
|
||||
r = await client.call("iscsi.initiator.create", [payload])
|
||||
log.info(" %s id=%s", _bold_green("CREATED"), r.get("id"))
|
||||
id_map[init["id"]] = r["id"]
|
||||
summary.iscsi_initiators_created += 1
|
||||
except RuntimeError as exc:
|
||||
log.error(" %s: %s", _bold_red("FAILED"), exc)
|
||||
summary.iscsi_initiators_failed += 1
|
||||
summary.errors.append(f"iSCSI initiator {comment!r}: {exc}")
|
||||
|
||||
|
||||
async def _migrate_iscsi_portals(
|
||||
client: TrueNASClient,
|
||||
portals: list[dict],
|
||||
dry_run: bool,
|
||||
summary: Summary,
|
||||
id_map: dict[int, int],
|
||||
) -> None:
|
||||
log.info("Querying existing iSCSI portals on destination …")
|
||||
try:
|
||||
existing = await client.call("iscsi.portal.query") or []
|
||||
except RuntimeError as exc:
|
||||
msg = f"Could not query iSCSI portals: {exc}"
|
||||
log.error(msg); summary.errors.append(msg); return
|
||||
|
||||
def _ip_set(p: dict) -> frozenset:
|
||||
return frozenset((l["ip"], l["port"]) for l in p.get("listen", []))
|
||||
|
||||
existing_ip_sets = [(_ip_set(p), p["id"]) for p in existing]
|
||||
log.info(" Destination has %d existing portal(s).", len(existing))
|
||||
|
||||
for portal in portals:
|
||||
comment = portal.get("comment", "")
|
||||
ips = ", ".join(f"{l['ip']}:{l['port']}" for l in portal.get("listen", []))
|
||||
log.info("%s iSCSI portal %s [%s]", _bold("──"), _bold_cyan(repr(comment)), ips)
|
||||
|
||||
my_ips = _ip_set(portal)
|
||||
match = next((eid for eips, eid in existing_ip_sets if eips == my_ips), None)
|
||||
if match is not None:
|
||||
log.info(" %s – IP set already exists on destination.", _yellow("SKIP"))
|
||||
id_map[portal["id"]] = match
|
||||
summary.iscsi_portals_skipped += 1
|
||||
continue
|
||||
|
||||
payload = _iscsi_portal_payload(portal)
|
||||
log.debug(" payload: %s", json.dumps(payload))
|
||||
|
||||
if dry_run:
|
||||
log.info(" %s would create portal %s → %s",
|
||||
_cyan("[DRY RUN]"), _bold_cyan(repr(comment)), ips)
|
||||
summary.iscsi_portals_created += 1
|
||||
continue
|
||||
|
||||
try:
|
||||
r = await client.call("iscsi.portal.create", [payload])
|
||||
log.info(" %s id=%s", _bold_green("CREATED"), r.get("id"))
|
||||
id_map[portal["id"]] = r["id"]
|
||||
summary.iscsi_portals_created += 1
|
||||
except RuntimeError as exc:
|
||||
log.error(" %s: %s", _bold_red("FAILED"), exc)
|
||||
summary.iscsi_portals_failed += 1
|
||||
summary.errors.append(f"iSCSI portal {comment!r}: {exc}")
|
||||
|
||||
|
||||
async def _migrate_iscsi_targets(
|
||||
client: TrueNASClient,
|
||||
targets: list[dict],
|
||||
dry_run: bool,
|
||||
summary: Summary,
|
||||
id_map: dict[int, int],
|
||||
portal_id_map: dict[int, int],
|
||||
initiator_id_map: dict[int, int],
|
||||
) -> None:
|
||||
log.info("Querying existing iSCSI targets on destination …")
|
||||
try:
|
||||
existing = await client.call("iscsi.target.query") or []
|
||||
except RuntimeError as exc:
|
||||
msg = f"Could not query iSCSI targets: {exc}"
|
||||
log.error(msg); summary.errors.append(msg); return
|
||||
|
||||
existing_by_name = {t["name"].lower(): t for t in existing}
|
||||
log.info(" Destination has %d existing target(s).", len(existing_by_name))
|
||||
|
||||
for target in targets:
|
||||
name = target.get("name", "<unnamed>")
|
||||
log.info("%s iSCSI target %s", _bold("──"), _bold_cyan(repr(name)))
|
||||
|
||||
if name.lower() in existing_by_name:
|
||||
log.info(" %s – already exists on destination.", _yellow("SKIP"))
|
||||
id_map[target["id"]] = existing_by_name[name.lower()]["id"]
|
||||
summary.iscsi_targets_skipped += 1
|
||||
continue
|
||||
|
||||
# Verify all referenced portals and initiators were successfully mapped
|
||||
missing = []
|
||||
for g in target.get("groups", []):
|
||||
if g.get("portal") not in portal_id_map:
|
||||
missing.append(f"portal id={g['portal']}")
|
||||
if g.get("initiator") not in initiator_id_map:
|
||||
missing.append(f"initiator id={g['initiator']}")
|
||||
if missing:
|
||||
msg = f"iSCSI target {name!r}: cannot remap {', '.join(missing)}"
|
||||
log.error(" %s: %s", _bold_red("SKIP"), msg)
|
||||
summary.iscsi_targets_failed += 1
|
||||
summary.errors.append(msg)
|
||||
continue
|
||||
|
||||
payload = _iscsi_target_payload(target, portal_id_map, initiator_id_map)
|
||||
log.debug(" payload: %s", json.dumps(payload))
|
||||
|
||||
if dry_run:
|
||||
log.info(" %s would create target %s",
|
||||
_cyan("[DRY RUN]"), _bold_cyan(repr(name)))
|
||||
summary.iscsi_targets_created += 1
|
||||
continue
|
||||
|
||||
try:
|
||||
r = await client.call("iscsi.target.create", [payload])
|
||||
log.info(" %s id=%s", _bold_green("CREATED"), r.get("id"))
|
||||
id_map[target["id"]] = r["id"]
|
||||
summary.iscsi_targets_created += 1
|
||||
except RuntimeError as exc:
|
||||
log.error(" %s: %s", _bold_red("FAILED"), exc)
|
||||
summary.iscsi_targets_failed += 1
|
||||
summary.errors.append(f"iSCSI target {name!r}: {exc}")
|
||||
|
||||
|
||||
async def _migrate_iscsi_targetextents(
|
||||
client: TrueNASClient,
|
||||
targetextents: list[dict],
|
||||
dry_run: bool,
|
||||
summary: Summary,
|
||||
target_id_map: dict[int, int],
|
||||
extent_id_map: dict[int, int],
|
||||
) -> None:
|
||||
log.info("Querying existing iSCSI target-extent associations on destination …")
|
||||
try:
|
||||
existing = await client.call("iscsi.targetextent.query") or []
|
||||
except RuntimeError as exc:
|
||||
msg = f"Could not query iSCSI target-extents: {exc}"
|
||||
log.error(msg); summary.errors.append(msg); return
|
||||
|
||||
existing_keys = {(te["target"], te["lunid"]) for te in existing}
|
||||
log.info(" Destination has %d existing association(s).", len(existing))
|
||||
|
||||
for te in targetextents:
|
||||
src_tid = te["target"]
|
||||
src_eid = te["extent"]
|
||||
lunid = te["lunid"]
|
||||
dest_tid = target_id_map.get(src_tid)
|
||||
dest_eid = extent_id_map.get(src_eid)
|
||||
|
||||
if dest_tid is None or dest_eid is None:
|
||||
missing = []
|
||||
if dest_tid is None: missing.append(f"target id={src_tid}")
|
||||
if dest_eid is None: missing.append(f"extent id={src_eid}")
|
||||
msg = f"iSCSI target-extent (lun {lunid}): cannot remap {', '.join(missing)}"
|
||||
log.error(" %s", msg)
|
||||
summary.iscsi_targetextents_failed += 1
|
||||
summary.errors.append(msg)
|
||||
continue
|
||||
|
||||
log.info("%s iSCSI target↔extent target=%s lun=%s extent=%s",
|
||||
_bold("──"), dest_tid, lunid, dest_eid)
|
||||
|
||||
if (dest_tid, lunid) in existing_keys:
|
||||
log.info(" %s – target+LUN already assigned on destination.", _yellow("SKIP"))
|
||||
summary.iscsi_targetextents_skipped += 1
|
||||
continue
|
||||
|
||||
payload = {"target": dest_tid, "lunid": lunid, "extent": dest_eid}
|
||||
log.debug(" payload: %s", json.dumps(payload))
|
||||
|
||||
if dry_run:
|
||||
log.info(" %s would associate target=%s lun=%s extent=%s",
|
||||
_cyan("[DRY RUN]"), dest_tid, lunid, dest_eid)
|
||||
summary.iscsi_targetextents_created += 1
|
||||
continue
|
||||
|
||||
try:
|
||||
r = await client.call("iscsi.targetextent.create", [payload])
|
||||
log.info(" %s id=%s", _bold_green("CREATED"), r.get("id"))
|
||||
summary.iscsi_targetextents_created += 1
|
||||
except RuntimeError as exc:
|
||||
log.error(" %s: %s", _bold_red("FAILED"), exc)
|
||||
summary.iscsi_targetextents_failed += 1
|
||||
summary.errors.append(
|
||||
f"iSCSI target-extent (target={dest_tid}, lun={lunid}): {exc}")
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
# Public iSCSI entry point
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
async def migrate_iscsi(
|
||||
client: TrueNASClient,
|
||||
iscsi: dict,
|
||||
dry_run: bool,
|
||||
summary: Summary,
|
||||
) -> None:
|
||||
if not iscsi:
|
||||
log.info("No iSCSI configuration found in archive.")
|
||||
return
|
||||
|
||||
portals = iscsi.get("portals", [])
|
||||
initiators = iscsi.get("initiators", [])
|
||||
targets = iscsi.get("targets", [])
|
||||
extents = iscsi.get("extents", [])
|
||||
targetextents = iscsi.get("targetextents", [])
|
||||
|
||||
summary.iscsi_extents_found = len(extents)
|
||||
summary.iscsi_initiators_found = len(initiators)
|
||||
summary.iscsi_portals_found = len(portals)
|
||||
summary.iscsi_targets_found = len(targets)
|
||||
summary.iscsi_targetextents_found = len(targetextents)
|
||||
|
||||
gc = iscsi.get("global_config", {})
|
||||
if gc.get("basename"):
|
||||
log.info(" Source iSCSI basename: %s (destination keeps its own)", gc["basename"])
|
||||
|
||||
if not any([portals, initiators, targets, extents, targetextents]):
|
||||
log.info("iSCSI configuration is empty – nothing to migrate.")
|
||||
return
|
||||
|
||||
extent_id_map: dict[int, int] = {}
|
||||
initiator_id_map: dict[int, int] = {}
|
||||
portal_id_map: dict[int, int] = {}
|
||||
target_id_map: dict[int, int] = {}
|
||||
|
||||
# Dependency order: extents and initiators first (no deps), then portals,
|
||||
# then targets (need portal + initiator maps), then target-extent links.
|
||||
await _migrate_iscsi_extents(client, extents, dry_run, summary, extent_id_map)
|
||||
await _migrate_iscsi_initiators(client, initiators, dry_run, summary, initiator_id_map)
|
||||
await _migrate_iscsi_portals(client, portals, dry_run, summary, portal_id_map)
|
||||
await _migrate_iscsi_targets(
|
||||
client, targets, dry_run, summary, target_id_map, portal_id_map, initiator_id_map)
|
||||
await _migrate_iscsi_targetextents(
|
||||
client, targetextents, dry_run, summary, target_id_map, extent_id_map)
|
||||
|
||||
@@ -21,12 +21,43 @@ class Summary:
|
||||
nfs_skipped: int = 0
|
||||
nfs_failed: int = 0
|
||||
|
||||
iscsi_extents_found: int = 0
|
||||
iscsi_extents_created: int = 0
|
||||
iscsi_extents_skipped: int = 0
|
||||
iscsi_extents_failed: int = 0
|
||||
|
||||
iscsi_initiators_found: int = 0
|
||||
iscsi_initiators_created: int = 0
|
||||
iscsi_initiators_skipped: int = 0
|
||||
iscsi_initiators_failed: int = 0
|
||||
|
||||
iscsi_portals_found: int = 0
|
||||
iscsi_portals_created: int = 0
|
||||
iscsi_portals_skipped: int = 0
|
||||
iscsi_portals_failed: int = 0
|
||||
|
||||
iscsi_targets_found: int = 0
|
||||
iscsi_targets_created: int = 0
|
||||
iscsi_targets_skipped: int = 0
|
||||
iscsi_targets_failed: int = 0
|
||||
|
||||
iscsi_targetextents_found: int = 0
|
||||
iscsi_targetextents_created: int = 0
|
||||
iscsi_targetextents_skipped: int = 0
|
||||
iscsi_targetextents_failed: int = 0
|
||||
|
||||
errors: list[str] = field(default_factory=list)
|
||||
|
||||
# Populated during dry-run dataset safety checks
|
||||
paths_to_create: list[str] = field(default_factory=list)
|
||||
missing_datasets: list[str] = field(default_factory=list)
|
||||
|
||||
@property
|
||||
def _has_iscsi(self) -> bool:
|
||||
return (self.iscsi_extents_found + self.iscsi_initiators_found +
|
||||
self.iscsi_portals_found + self.iscsi_targets_found +
|
||||
self.iscsi_targetextents_found) > 0
|
||||
|
||||
def report(self) -> str:
|
||||
w = 60
|
||||
|
||||
@@ -34,6 +65,14 @@ class Summary:
|
||||
s = f"{label}={n}"
|
||||
return color_fn(s) if n > 0 else _dim(s)
|
||||
|
||||
def _iscsi_val(found, created, skipped, failed) -> str:
|
||||
return (
|
||||
f"{_dim('found=' + str(found))} "
|
||||
f"{_stat('created', created, _bold_green)} "
|
||||
f"{_stat('skipped', skipped, _yellow)} "
|
||||
f"{_stat('failed', failed, _bold_red)}"
|
||||
)
|
||||
|
||||
smb_val = (
|
||||
f"{_dim('found=' + str(self.smb_found))} "
|
||||
f"{_stat('created', self.smb_created, _bold_green)} "
|
||||
@@ -67,11 +106,30 @@ class Summary:
|
||||
f"{tl}{hr}{tr}",
|
||||
title_row,
|
||||
f"{ml}{hr}{mr}",
|
||||
row("SMB shares : ", smb_val),
|
||||
row("NFS shares : ", nfs_val),
|
||||
f"{bl}{hr}{br}",
|
||||
row("SMB shares : ", smb_val),
|
||||
row("NFS shares : ", nfs_val),
|
||||
]
|
||||
|
||||
if self._has_iscsi:
|
||||
lines.append(f"{ml}{hr}{mr}")
|
||||
lines.append(row("iSCSI extents : ", _iscsi_val(
|
||||
self.iscsi_extents_found, self.iscsi_extents_created,
|
||||
self.iscsi_extents_skipped, self.iscsi_extents_failed)))
|
||||
lines.append(row("iSCSI initiators: ", _iscsi_val(
|
||||
self.iscsi_initiators_found, self.iscsi_initiators_created,
|
||||
self.iscsi_initiators_skipped, self.iscsi_initiators_failed)))
|
||||
lines.append(row("iSCSI portals : ", _iscsi_val(
|
||||
self.iscsi_portals_found, self.iscsi_portals_created,
|
||||
self.iscsi_portals_skipped, self.iscsi_portals_failed)))
|
||||
lines.append(row("iSCSI targets : ", _iscsi_val(
|
||||
self.iscsi_targets_found, self.iscsi_targets_created,
|
||||
self.iscsi_targets_skipped, self.iscsi_targets_failed)))
|
||||
lines.append(row("iSCSI tgt↔ext : ", _iscsi_val(
|
||||
self.iscsi_targetextents_found, self.iscsi_targetextents_created,
|
||||
self.iscsi_targetextents_skipped, self.iscsi_targetextents_failed)))
|
||||
|
||||
lines.append(f"{bl}{hr}{br}")
|
||||
|
||||
if self.errors:
|
||||
lines.append(f"\n {_bold_red(str(len(self.errors)) + ' error(s):')} ")
|
||||
for e in self.errors:
|
||||
|
||||
Reference in New Issue
Block a user