Merge devel: iSCSI support, audit wizard, CSV improvements, bug fixes
This commit is contained in:
@@ -42,18 +42,24 @@ _CANDIDATES: dict[str, list[str]] = {
|
||||
"ixdiagnose/plugins/Sharing/sharing.nfs.query.json",
|
||||
"ixdiagnose/NFS/sharing.nfs.query.json",
|
||||
],
|
||||
"iscsi": [
|
||||
"ixdiagnose/plugins/iscsi/iscsi_config.json",
|
||||
"ixdiagnose/plugins/ISCSI/iscsi_config.json",
|
||||
],
|
||||
}
|
||||
|
||||
# When a candidate file bundles multiple datasets, pull out the right sub-key.
|
||||
_KEY_WITHIN_FILE: dict[str, str] = {
|
||||
"smb_shares": "sharing_smb_query",
|
||||
"nfs_shares": "sharing_nfs_query",
|
||||
# "iscsi" intentionally omitted — iscsi_config.json is used as-is
|
||||
}
|
||||
|
||||
# Keyword fragments for heuristic fallback scan (SCALE archives only)
|
||||
_KEYWORDS: dict[str, list[str]] = {
|
||||
"smb_shares": ["sharing.smb", "smb_share", "sharing/smb", "smb_info"],
|
||||
"nfs_shares": ["sharing.nfs", "nfs_share", "sharing/nfs", "nfs_config"],
|
||||
"iscsi": ["iscsi_config", "iscsi/iscsi"],
|
||||
}
|
||||
|
||||
# Presence of this path prefix identifies a TrueNAS CORE archive.
|
||||
@@ -251,13 +257,14 @@ def _open_source_tar(tar_path: str):
|
||||
|
||||
def parse_archive(tar_path: str) -> dict[str, Any]:
|
||||
"""
|
||||
Extract SMB shares and NFS shares from the debug archive.
|
||||
Returns: {"smb_shares": list, "nfs_shares": list}
|
||||
Extract SMB shares, NFS shares, and iSCSI configuration from the debug archive.
|
||||
Returns: {"smb_shares": list, "nfs_shares": list, "iscsi": dict}
|
||||
"""
|
||||
log.info("Opening archive: %s", tar_path)
|
||||
result: dict[str, Any] = {
|
||||
"smb_shares": [],
|
||||
"nfs_shares": [],
|
||||
"iscsi": {},
|
||||
}
|
||||
|
||||
try:
|
||||
@@ -288,14 +295,33 @@ def parse_archive(tar_path: str) -> dict[str, Any]:
|
||||
result[key] = v
|
||||
break
|
||||
|
||||
# iSCSI — combined dict file, not a bare list
|
||||
iscsi_raw = _find_data(tf, members, "iscsi")
|
||||
if iscsi_raw and isinstance(iscsi_raw, dict):
|
||||
result["iscsi"] = {
|
||||
"global_config": iscsi_raw.get("global_config", {}),
|
||||
"portals": iscsi_raw.get("portals", []),
|
||||
"initiators": iscsi_raw.get("initiators", []),
|
||||
"targets": iscsi_raw.get("targets", []),
|
||||
"extents": iscsi_raw.get("extents", []),
|
||||
"targetextents": iscsi_raw.get("targetextents", []),
|
||||
}
|
||||
elif iscsi_raw is not None:
|
||||
log.warning(" iscsi → unexpected format (expected dict)")
|
||||
|
||||
except (tarfile.TarError, OSError) as exc:
|
||||
log.error("Failed to open archive: %s", exc)
|
||||
sys.exit(1)
|
||||
|
||||
iscsi = result["iscsi"]
|
||||
log.info(
|
||||
"Parsed: %d SMB share(s), %d NFS share(s)",
|
||||
"Parsed: %d SMB share(s), %d NFS share(s), "
|
||||
"iSCSI: %d target(s) / %d extent(s) / %d portal(s)",
|
||||
len(result["smb_shares"]),
|
||||
len(result["nfs_shares"]),
|
||||
len(iscsi.get("targets", [])),
|
||||
len(iscsi.get("extents", [])),
|
||||
len(iscsi.get("portals", [])),
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
@@ -54,10 +54,16 @@ from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from .archive import parse_archive, list_archive_and_exit
|
||||
from .client import TrueNASClient, check_dataset_paths, create_missing_datasets
|
||||
from .colors import log, _bold, _bold_cyan, _bold_red, _bold_yellow, _cyan, _dim, _green, _yellow
|
||||
from .client import (
|
||||
TrueNASClient,
|
||||
check_dataset_paths, create_missing_datasets,
|
||||
check_iscsi_zvols, create_missing_zvols,
|
||||
query_destination_inventory,
|
||||
delete_smb_shares, delete_nfs_exports, delete_zvols, delete_datasets,
|
||||
)
|
||||
from .colors import log, _bold, _bold_cyan, _bold_green, _bold_red, _bold_yellow, _cyan, _dim, _green, _yellow
|
||||
from .csv_source import parse_csv_sources
|
||||
from .migrate import migrate_smb_shares, migrate_nfs_shares
|
||||
from .migrate import migrate_smb_shares, migrate_nfs_shares, migrate_iscsi, query_existing_iscsi, clear_iscsi_config
|
||||
from .summary import Summary
|
||||
|
||||
|
||||
@@ -103,11 +109,20 @@ async def run(
|
||||
await migrate_nfs_shares(
|
||||
client, archive["nfs_shares"], args.dry_run, summary)
|
||||
|
||||
if "iscsi" in migrate_set:
|
||||
await migrate_iscsi(
|
||||
client, archive.get("iscsi", {}), args.dry_run, summary)
|
||||
|
||||
if args.dry_run and summary.paths_to_create:
|
||||
summary.missing_datasets = await check_dataset_paths(
|
||||
client, summary.paths_to_create,
|
||||
)
|
||||
|
||||
if args.dry_run and summary.zvols_to_check:
|
||||
summary.missing_zvols = await check_iscsi_zvols(
|
||||
client, summary.zvols_to_check,
|
||||
)
|
||||
|
||||
return summary
|
||||
|
||||
|
||||
@@ -115,6 +130,32 @@ async def run(
|
||||
# Interactive wizard helpers
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
def _parse_size(s: str) -> int:
|
||||
"""Parse a human-friendly size string to bytes. E.g. '100G', '500GiB', '1T'."""
|
||||
s = s.strip().upper()
|
||||
for suffix, mult in [
|
||||
("PIB", 1 << 50), ("PB", 1 << 50), ("P", 1 << 50),
|
||||
("TIB", 1 << 40), ("TB", 1 << 40), ("T", 1 << 40),
|
||||
("GIB", 1 << 30), ("GB", 1 << 30), ("G", 1 << 30),
|
||||
("MIB", 1 << 20), ("MB", 1 << 20), ("M", 1 << 20),
|
||||
("KIB", 1 << 10), ("KB", 1 << 10), ("K", 1 << 10),
|
||||
]:
|
||||
if s.endswith(suffix):
|
||||
try:
|
||||
return int(float(s[:-len(suffix)]) * mult)
|
||||
except ValueError:
|
||||
pass
|
||||
return int(s) # plain bytes
|
||||
|
||||
|
||||
def _fmt_bytes(n: int) -> str:
|
||||
"""Format a byte count as a human-readable string."""
|
||||
for suffix, div in [("TiB", 1 << 40), ("GiB", 1 << 30), ("MiB", 1 << 20), ("KiB", 1 << 10)]:
|
||||
if n >= div:
|
||||
return f"{n / div:.1f} {suffix}"
|
||||
return f"{n} B"
|
||||
|
||||
|
||||
def _find_debug_archives(directory: str = ".") -> list[Path]:
|
||||
"""Return sorted list of TrueNAS debug archives found in *directory*."""
|
||||
patterns = ("*.tgz", "*.tar.gz", "*.tar", "*.txz", "*.tar.xz")
|
||||
@@ -156,6 +197,84 @@ def _prompt_csv_path(share_type: str) -> Optional[str]:
|
||||
print(f" {_bold_red('File not found:')} {raw}")
|
||||
|
||||
|
||||
|
||||
def _prompt_iscsi_portals(iscsi: dict) -> None:
|
||||
"""Walk each portal and prompt for destination IPs in-place."""
|
||||
portals = iscsi.get("portals", [])
|
||||
if not portals:
|
||||
return
|
||||
|
||||
print(f"\n {_bold('iSCSI Portal Configuration')}")
|
||||
print(f" {_dim('Portal IP addresses are unique per system and must be updated.')}")
|
||||
print(f" {_dim('For MPIO, enter multiple IPs separated by spaces.')}")
|
||||
|
||||
for portal in portals:
|
||||
comment = portal.get("comment", "")
|
||||
listen = portal.get("listen", [])
|
||||
src_ips = " ".join(f"{l['ip']}" for l in listen)
|
||||
|
||||
label = f"Portal {portal['id']}" + (f" ({comment!r})" if comment else "")
|
||||
print(f"\n {_bold(label)}")
|
||||
print(f" {_dim('Source IP(s):')} {src_ips}")
|
||||
|
||||
raw = _prompt(" Destination IP(s)").strip()
|
||||
if not raw:
|
||||
print(f" {_yellow('⚠')} No IPs entered — keeping source IPs.")
|
||||
continue
|
||||
|
||||
dest_ips = raw.split()
|
||||
portal["listen"] = [{"ip": ip} for ip in dest_ips]
|
||||
print(f" {_green('✓')} Portal: {', '.join(dest_ips)}")
|
||||
print()
|
||||
|
||||
|
||||
def _prompt_clear_existing_iscsi(host: str, port: int, api_key: str) -> None:
|
||||
"""
|
||||
Check whether the destination already has iSCSI configuration.
|
||||
If so, summarise what exists and offer to remove it before migration.
|
||||
"""
|
||||
async def _check():
|
||||
async with TrueNASClient(host=host, port=port, api_key=api_key, verify_ssl=False) as client:
|
||||
return await query_existing_iscsi(client)
|
||||
|
||||
existing = asyncio.run(_check())
|
||||
counts = {k: len(v) for k, v in existing.items()}
|
||||
total = sum(counts.values())
|
||||
if total == 0:
|
||||
return
|
||||
|
||||
print(f"\n {_bold_yellow('WARNING:')} Destination already has iSCSI configuration:")
|
||||
labels = [
|
||||
("extents", "extent(s)"),
|
||||
("initiators", "initiator group(s)"),
|
||||
("portals", "portal(s)"),
|
||||
("targets", "target(s)"),
|
||||
("targetextents", "target-extent association(s)"),
|
||||
]
|
||||
for key, label in labels:
|
||||
n = counts[key]
|
||||
if n:
|
||||
print(f" • {n} {label}")
|
||||
print()
|
||||
print(f" {_dim('Keep existing: new objects will be skipped if conflicts are detected.')}")
|
||||
print(f" {_dim('Remove existing: ALL iSCSI config will be deleted before migration.')}")
|
||||
print()
|
||||
|
||||
raw = _prompt(" [K]eep existing / [R]emove all existing iSCSI config", default="K")
|
||||
if raw.strip().lower().startswith("r"):
|
||||
if _confirm(f" Remove ALL {total} iSCSI object(s) from {host}?"):
|
||||
async def _clear():
|
||||
async with TrueNASClient(host=host, port=port, api_key=api_key, verify_ssl=False) as client:
|
||||
await clear_iscsi_config(client)
|
||||
print()
|
||||
asyncio.run(_clear())
|
||||
print(f" {_bold_cyan('✓')} iSCSI configuration cleared.\n")
|
||||
else:
|
||||
print(f" {_yellow('–')} Removal cancelled — keeping existing config.\n")
|
||||
else:
|
||||
print(f" {_dim('Keeping existing iSCSI configuration.')}\n")
|
||||
|
||||
|
||||
def _select_shares(shares: list[dict], share_type: str) -> list[dict]:
|
||||
"""
|
||||
Display a numbered list of *shares* and return only those the user selects.
|
||||
@@ -207,6 +326,272 @@ def _select_shares(shares: list[dict], share_type: str) -> list[dict]:
|
||||
return selected
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
# Destination audit wizard
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
def _print_inventory_report(host: str, inv: dict) -> None:
|
||||
"""Print a structured inventory of all configuration on the destination."""
|
||||
smb = inv.get("smb_shares", [])
|
||||
nfs = inv.get("nfs_exports", [])
|
||||
ds = inv.get("datasets", [])
|
||||
zvols = inv.get("zvols", [])
|
||||
ext = inv.get("iscsi_extents", [])
|
||||
init = inv.get("iscsi_initiators", [])
|
||||
portals = inv.get("iscsi_portals", [])
|
||||
tgt = inv.get("iscsi_targets", [])
|
||||
te = inv.get("iscsi_targetextents", [])
|
||||
|
||||
header = f"DESTINATION INVENTORY: {host}"
|
||||
rule = _bold_cyan("─" * (len(header) + 4))
|
||||
print(f"\n {rule}")
|
||||
print(f" {_bold_cyan('│')} {_bold(header)} {_bold_cyan('│')}")
|
||||
print(f" {rule}")
|
||||
|
||||
# SMB
|
||||
if smb:
|
||||
print(f"\n {_bold(f'SMB Shares ({len(smb)})')}")
|
||||
for s in smb:
|
||||
name = s.get("name", "<unnamed>")
|
||||
path = s.get("path", "")
|
||||
enabled = "" if s.get("enabled", True) else _dim(" [disabled]")
|
||||
print(f" {_cyan('•')} {name:<24} {_dim(path)}{enabled}")
|
||||
else:
|
||||
print(f"\n {_dim('SMB Shares: none')}")
|
||||
|
||||
# NFS
|
||||
if nfs:
|
||||
print(f"\n {_bold(f'NFS Exports ({len(nfs)})')}")
|
||||
for n in nfs:
|
||||
path = n.get("path", "<no path>")
|
||||
enabled = "" if n.get("enabled", True) else _dim(" [disabled]")
|
||||
print(f" {_cyan('•')} {path}{enabled}")
|
||||
else:
|
||||
print(f"\n {_dim('NFS Exports: none')}")
|
||||
|
||||
# iSCSI
|
||||
has_iscsi = any([ext, init, portals, tgt, te])
|
||||
if has_iscsi:
|
||||
iscsi_total = len(ext) + len(init) + len(portals) + len(tgt) + len(te)
|
||||
print(f"\n {_bold(f'iSCSI Configuration ({iscsi_total} objects)')}")
|
||||
if ext:
|
||||
print(f" {_bold('Extents')} ({len(ext)}):")
|
||||
for e in ext:
|
||||
kind = e.get("type", "")
|
||||
backing = e.get("disk") or e.get("path") or ""
|
||||
print(f" {_cyan('•')} {e.get('name', '<unnamed>'):<22} {_dim(kind + ' ' + backing)}")
|
||||
if init:
|
||||
print(f" {_bold('Initiator Groups')} ({len(init)}):")
|
||||
for i in init:
|
||||
print(f" {_cyan('•')} {i.get('comment') or '<no comment>'}")
|
||||
if portals:
|
||||
print(f" {_bold('Portals')} ({len(portals)}):")
|
||||
for p in portals:
|
||||
ips = ", ".join(l["ip"] for l in p.get("listen", []))
|
||||
comment = p.get("comment", "")
|
||||
label = f"{comment} " if comment else ""
|
||||
print(f" {_cyan('•')} {label}{_dim(ips)}")
|
||||
if tgt:
|
||||
print(f" {_bold('Targets')} ({len(tgt)}):")
|
||||
for t in tgt:
|
||||
print(f" {_cyan('•')} {t.get('name', '<unnamed>')}")
|
||||
if te:
|
||||
print(f" {_bold('Target-Extent Associations')} ({len(te)})")
|
||||
else:
|
||||
print(f"\n {_dim('iSCSI: none')}")
|
||||
|
||||
# Datasets
|
||||
if ds:
|
||||
print(f"\n {_bold(f'Datasets ({len(ds)})')}")
|
||||
for d in ds[:20]:
|
||||
name = d.get("id", "")
|
||||
is_root = "/" not in name
|
||||
used_raw = d.get("used", {})
|
||||
used_bytes = used_raw.get("parsed", 0) if isinstance(used_raw, dict) else 0
|
||||
used_str = f" {_fmt_bytes(used_bytes)} used" if used_bytes else ""
|
||||
root_tag = _dim(" (pool root)") if is_root else ""
|
||||
print(f" {_cyan('•')} {name}{root_tag}{_dim(used_str)}")
|
||||
if len(ds) > 20:
|
||||
print(f" {_dim(f'… and {len(ds) - 20} more')}")
|
||||
else:
|
||||
print(f"\n {_dim('Datasets: none')}")
|
||||
|
||||
# Zvols
|
||||
if zvols:
|
||||
print(f"\n {_bold(f'Zvols ({len(zvols)})')}")
|
||||
for z in zvols:
|
||||
name = z.get("id", "")
|
||||
vs_raw = z.get("volsize", {})
|
||||
vs = vs_raw.get("parsed", 0) if isinstance(vs_raw, dict) else 0
|
||||
vs_str = f" {_fmt_bytes(vs)}" if vs else ""
|
||||
print(f" {_cyan('•')} {name}{_dim(vs_str)}")
|
||||
else:
|
||||
print(f"\n {_dim('Zvols: none')}")
|
||||
|
||||
print()
|
||||
|
||||
|
||||
def _run_audit_wizard(host: str, port: int, api_key: str) -> None:
|
||||
"""Query destination inventory and offer to selectively delete configuration."""
|
||||
print(f"\n Querying {_bold(host)} …\n")
|
||||
|
||||
async def _query() -> dict:
|
||||
async with TrueNASClient(host=host, port=port, api_key=api_key, verify_ssl=False) as client:
|
||||
return await query_destination_inventory(client)
|
||||
|
||||
try:
|
||||
inv = asyncio.run(_query())
|
||||
except (OSError, PermissionError) as exc:
|
||||
print(f" {_bold_red('Connection failed:')} {exc}\n")
|
||||
return
|
||||
|
||||
_print_inventory_report(host, inv)
|
||||
|
||||
total = sum(len(v) for v in inv.values())
|
||||
if total == 0:
|
||||
print(f" {_dim('The destination appears to have no configuration.')}\n")
|
||||
return
|
||||
|
||||
# ── Deletion options ───────────────────────────────────────────────────────
|
||||
print(f" {_bold_yellow('─' * 60)}")
|
||||
print(f" {_bold_yellow('DELETION OPTIONS')}")
|
||||
print(f" {_dim('You may choose to delete some or all of the configuration above.')}")
|
||||
print(f" {_bold_red('WARNING: Deleted datasets and zvols cannot be recovered — all data will be permanently lost.')}")
|
||||
print()
|
||||
|
||||
has_iscsi = any(inv[k] for k in ("iscsi_extents", "iscsi_initiators",
|
||||
"iscsi_portals", "iscsi_targets",
|
||||
"iscsi_targetextents"))
|
||||
iscsi_count = sum(len(inv[k]) for k in ("iscsi_extents", "iscsi_initiators",
|
||||
"iscsi_portals", "iscsi_targets",
|
||||
"iscsi_targetextents"))
|
||||
deletable_ds = [d for d in inv["datasets"] if "/" in d["id"]]
|
||||
|
||||
del_iscsi = False
|
||||
del_smb = False
|
||||
del_nfs = False
|
||||
del_zvols = False
|
||||
del_datasets = False
|
||||
|
||||
# iSCSI (must go first — uses zvols as backing)
|
||||
if has_iscsi:
|
||||
del_iscsi = _confirm(
|
||||
f" Delete ALL iSCSI configuration ({iscsi_count} objects)?"
|
||||
)
|
||||
|
||||
# SMB
|
||||
if inv["smb_shares"]:
|
||||
del_smb = _confirm(
|
||||
f" Delete all {len(inv['smb_shares'])} SMB share(s)?"
|
||||
)
|
||||
|
||||
# NFS
|
||||
if inv["nfs_exports"]:
|
||||
del_nfs = _confirm(
|
||||
f" Delete all {len(inv['nfs_exports'])} NFS export(s)?"
|
||||
)
|
||||
|
||||
# Zvols — require explicit confirmation phrase
|
||||
if inv["zvols"]:
|
||||
print()
|
||||
print(f" {_bold_red('⚠ DATA DESTRUCTION WARNING ⚠')}")
|
||||
print(f" Deleting zvols PERMANENTLY DESTROYS all data stored in them.")
|
||||
print(f" This action cannot be undone. Affected zvols:")
|
||||
for z in inv["zvols"]:
|
||||
print(f" {_yellow('•')} {z['id']}")
|
||||
print()
|
||||
raw = _prompt(
|
||||
f" Type DELETE to confirm deletion of {len(inv['zvols'])} zvol(s),"
|
||||
" or Enter to skip"
|
||||
).strip()
|
||||
del_zvols = (raw == "DELETE")
|
||||
if raw and raw != "DELETE":
|
||||
print(f" {_dim('Confirmation not matched — zvols will not be deleted.')}")
|
||||
print()
|
||||
|
||||
# Datasets — strongest warning
|
||||
if deletable_ds:
|
||||
print(f" {_bold_red('⚠⚠ CRITICAL DATA DESTRUCTION WARNING ⚠⚠')}")
|
||||
print(f" Deleting datasets PERMANENTLY DESTROYS ALL DATA including all files,")
|
||||
print(f" snapshots, and child datasets. Pool root datasets (e.g. 'tank') will")
|
||||
print(f" be skipped, but all child datasets WILL be deleted.")
|
||||
print(f" This action cannot be undone. {len(deletable_ds)} dataset(s) would be deleted.")
|
||||
print()
|
||||
raw = _prompt(
|
||||
f" Type DELETE to confirm deletion of {len(deletable_ds)} dataset(s),"
|
||||
" or Enter to skip"
|
||||
).strip()
|
||||
del_datasets = (raw == "DELETE")
|
||||
if raw and raw != "DELETE":
|
||||
print(f" {_dim('Confirmation not matched — datasets will not be deleted.')}")
|
||||
print()
|
||||
|
||||
# ── Nothing selected ───────────────────────────────────────────────────────
|
||||
if not any([del_iscsi, del_smb, del_nfs, del_zvols, del_datasets]):
|
||||
print(f" {_dim('Nothing selected for deletion. No changes made.')}\n")
|
||||
return
|
||||
|
||||
# ── Final confirmation ─────────────────────────────────────────────────────
|
||||
print(f" {_bold_yellow('─' * 60)}")
|
||||
print(f" {_bold_yellow('PENDING DELETIONS on ' + host + ':')}")
|
||||
if del_iscsi:
|
||||
print(f" {_yellow('•')} ALL iSCSI configuration ({iscsi_count} objects)")
|
||||
if del_smb:
|
||||
print(f" {_yellow('•')} {len(inv['smb_shares'])} SMB share(s)")
|
||||
if del_nfs:
|
||||
print(f" {_yellow('•')} {len(inv['nfs_exports'])} NFS export(s)")
|
||||
if del_zvols:
|
||||
print(f" {_bold_red('•')} {len(inv['zvols'])} zvol(s) "
|
||||
f"{_bold_red('⚠ ALL DATA WILL BE PERMANENTLY DESTROYED')}")
|
||||
if del_datasets:
|
||||
print(f" {_bold_red('•')} {len(deletable_ds)} dataset(s) "
|
||||
f"{_bold_red('⚠ ALL DATA WILL BE PERMANENTLY DESTROYED')}")
|
||||
print()
|
||||
print(f" {_bold_red('THIS ACTION CANNOT BE UNDONE.')}")
|
||||
print()
|
||||
|
||||
if not _confirm(f" Proceed with all selected deletions on {host}?"):
|
||||
print(f" {_dim('Aborted – no changes made.')}\n")
|
||||
return
|
||||
|
||||
# ── Execute ────────────────────────────────────────────────────────────────
|
||||
print()
|
||||
|
||||
async def _execute() -> None:
|
||||
async with TrueNASClient(host=host, port=port, api_key=api_key, verify_ssl=False) as client:
|
||||
if del_iscsi:
|
||||
print(f" Removing iSCSI configuration …")
|
||||
await clear_iscsi_config(client)
|
||||
print(f" {_bold_green('✓')} iSCSI configuration removed.")
|
||||
|
||||
if del_smb:
|
||||
print(f" Removing SMB shares …")
|
||||
ok, fail = await delete_smb_shares(client, inv["smb_shares"])
|
||||
suffix = f" {_bold_red(str(fail) + ' failed')}" if fail else ""
|
||||
print(f" {_bold_green('✓')} {ok} deleted{suffix}")
|
||||
|
||||
if del_nfs:
|
||||
print(f" Removing NFS exports …")
|
||||
ok, fail = await delete_nfs_exports(client, inv["nfs_exports"])
|
||||
suffix = f" {_bold_red(str(fail) + ' failed')}" if fail else ""
|
||||
print(f" {_bold_green('✓')} {ok} deleted{suffix}")
|
||||
|
||||
if del_zvols:
|
||||
print(f" Removing zvols …")
|
||||
ok, fail = await delete_zvols(client, inv["zvols"])
|
||||
suffix = f" {_bold_red(str(fail) + ' failed')}" if fail else ""
|
||||
print(f" {_bold_green('✓')} {ok} deleted{suffix}")
|
||||
|
||||
if del_datasets:
|
||||
print(f" Removing datasets …")
|
||||
ok, fail = await delete_datasets(client, deletable_ds)
|
||||
suffix = f" {_bold_red(str(fail) + ' failed')}" if fail else ""
|
||||
print(f" {_bold_green('✓')} {ok} deleted{suffix}")
|
||||
|
||||
asyncio.run(_execute())
|
||||
print(f"\n {_bold_cyan('Done.')}\n")
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
# Interactive wizard
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
@@ -218,6 +603,33 @@ def interactive_mode() -> None:
|
||||
f" {_dim('Migrate SMB/NFS shares to a live TrueNAS system.')}\n"
|
||||
)
|
||||
|
||||
# 0 ── Top-level action ─────────────────────────────────────────────────────
|
||||
print(f" {_bold('What would you like to do?')}")
|
||||
print(f" {_cyan('1.')} Migrate configuration to a destination system")
|
||||
print(f" {_cyan('2.')} Audit destination system (view and manage existing config)")
|
||||
action_raw = _prompt(" Select [1/2]", default="1")
|
||||
print()
|
||||
|
||||
if action_raw.strip() == "2":
|
||||
audit_host = ""
|
||||
while not audit_host:
|
||||
audit_host = _prompt("Destination TrueNAS host or IP")
|
||||
if not audit_host:
|
||||
print(" Host is required.")
|
||||
audit_port_raw = _prompt("WebSocket port", default="443")
|
||||
audit_port = int(audit_port_raw) if audit_port_raw.isdigit() else 443
|
||||
audit_key = ""
|
||||
while not audit_key:
|
||||
try:
|
||||
audit_key = getpass.getpass("API key (input hidden): ").strip()
|
||||
except (EOFError, KeyboardInterrupt):
|
||||
print()
|
||||
sys.exit(0)
|
||||
if not audit_key:
|
||||
print(" API key is required.")
|
||||
_run_audit_wizard(audit_host, audit_port, audit_key)
|
||||
return
|
||||
|
||||
# 1 ── Source type ──────────────────────────────────────────────────────────
|
||||
print(f" {_bold('Source type:')}")
|
||||
print(f" {_cyan('1.')} TrueNAS debug archive (.tgz / .tar)")
|
||||
@@ -298,22 +710,31 @@ def interactive_mode() -> None:
|
||||
print(f"\n {_bold('What to migrate?')}")
|
||||
print(f" {_cyan('1.')} SMB shares")
|
||||
print(f" {_cyan('2.')} NFS shares")
|
||||
print(f" {_cyan('3.')} iSCSI (targets, extents, portals, initiator groups)")
|
||||
sel_raw = _prompt(
|
||||
"Selection (space-separated numbers, Enter for all)", default="1 2"
|
||||
"Selection (space-separated numbers, Enter for all)", default="1 2 3"
|
||||
)
|
||||
_sel_map = {"1": "smb", "2": "nfs"}
|
||||
_sel_map = {"1": "smb", "2": "nfs", "3": "iscsi"}
|
||||
migrate = []
|
||||
for tok in sel_raw.split():
|
||||
if tok in _sel_map and _sel_map[tok] not in migrate:
|
||||
migrate.append(_sel_map[tok])
|
||||
if not migrate:
|
||||
migrate = ["smb", "nfs"]
|
||||
migrate = ["smb", "nfs", "iscsi"]
|
||||
|
||||
# ── Parse archive ───────────────────────────────────────────────────────
|
||||
print()
|
||||
archive_data = parse_archive(str(chosen))
|
||||
extra_ns = {"debug_tar": str(chosen)}
|
||||
|
||||
# ── iSCSI portal IP remapping ────────────────────────────────────────
|
||||
if "iscsi" in migrate and archive_data.get("iscsi", {}).get("portals"):
|
||||
_prompt_iscsi_portals(archive_data["iscsi"])
|
||||
|
||||
# ── iSCSI pre-migration check ────────────────────────────────────────
|
||||
if "iscsi" in migrate:
|
||||
_prompt_clear_existing_iscsi(host, port, api_key)
|
||||
|
||||
# ── Select individual shares (common) ──────────────────────────────────────
|
||||
if "smb" in migrate and archive_data["smb_shares"]:
|
||||
archive_data["smb_shares"] = _select_shares(archive_data["smb_shares"], "SMB")
|
||||
@@ -362,6 +783,35 @@ def interactive_mode() -> None:
|
||||
))
|
||||
print()
|
||||
|
||||
if dry_summary.missing_zvols:
|
||||
print(f"\n {len(dry_summary.missing_zvols)} zvol(s) need to be created for iSCSI extents:")
|
||||
for z in dry_summary.missing_zvols:
|
||||
print(f" • {z}")
|
||||
print()
|
||||
if _confirm(f"Create these {len(dry_summary.missing_zvols)} zvol(s) on {host} now?"):
|
||||
zvol_sizes: dict[str, int] = {}
|
||||
for zvol in dry_summary.missing_zvols:
|
||||
while True:
|
||||
raw = _prompt(f" Size for {zvol} (e.g. 100G, 500GiB, 1T)").strip()
|
||||
if not raw:
|
||||
print(" Size is required.")
|
||||
continue
|
||||
try:
|
||||
zvol_sizes[zvol] = _parse_size(raw)
|
||||
break
|
||||
except ValueError:
|
||||
print(f" Cannot parse {raw!r} — try a format like 100G or 500GiB.")
|
||||
asyncio.run(create_missing_zvols(
|
||||
host=host, port=port, api_key=api_key, zvols=zvol_sizes,
|
||||
))
|
||||
print()
|
||||
print(f" Re-running dry run to verify zvol creation …")
|
||||
print()
|
||||
dry_summary = asyncio.run(
|
||||
run(argparse.Namespace(**base_ns, dry_run=True), archive_data)
|
||||
)
|
||||
print(dry_summary.report())
|
||||
|
||||
if not _confirm(f"Apply these changes to {host}?"):
|
||||
print("Aborted – no changes made.")
|
||||
sys.exit(0)
|
||||
@@ -447,11 +897,11 @@ def main() -> None:
|
||||
p.add_argument(
|
||||
"--migrate",
|
||||
nargs="+",
|
||||
choices=["smb", "nfs"],
|
||||
default=["smb", "nfs"],
|
||||
choices=["smb", "nfs", "iscsi"],
|
||||
default=["smb", "nfs", "iscsi"],
|
||||
metavar="TYPE",
|
||||
help=(
|
||||
"What to migrate. Choices: smb nfs "
|
||||
"What to migrate. Choices: smb nfs iscsi "
|
||||
"(default: both). Example: --migrate smb"
|
||||
),
|
||||
)
|
||||
|
||||
@@ -306,3 +306,183 @@ async def create_missing_datasets(
|
||||
) as client:
|
||||
for path in paths:
|
||||
await create_dataset(client, path)
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
# iSCSI zvol utilities
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
async def check_iscsi_zvols(
|
||||
client: TrueNASClient,
|
||||
zvol_names: list[str],
|
||||
) -> list[str]:
|
||||
"""
|
||||
Return the subset of *zvol_names* that do not exist on the destination.
|
||||
Names are the dataset path without the leading 'zvol/' prefix
|
||||
(e.g. 'tank/VMWARE001'). Returns [] when the query itself fails.
|
||||
"""
|
||||
if not zvol_names:
|
||||
return []
|
||||
|
||||
unique = sorted(set(zvol_names))
|
||||
log.info("Checking %d zvol(s) against destination datasets …", len(unique))
|
||||
try:
|
||||
datasets = await client.call(
|
||||
"pool.dataset.query", [[["type", "=", "VOLUME"]]]
|
||||
) or []
|
||||
except RuntimeError as exc:
|
||||
log.warning("Could not query zvols (skipping check): %s", exc)
|
||||
return []
|
||||
|
||||
existing = {d["name"] for d in datasets}
|
||||
missing = [n for n in unique if n not in existing]
|
||||
if missing:
|
||||
for n in missing:
|
||||
log.warning(" MISSING zvol: %s", n)
|
||||
else:
|
||||
log.info(" All iSCSI zvols exist on destination.")
|
||||
return missing
|
||||
|
||||
|
||||
async def create_zvol(
|
||||
client: TrueNASClient,
|
||||
name: str,
|
||||
volsize: int,
|
||||
) -> bool:
|
||||
"""
|
||||
Create a ZFS volume (zvol) on the destination.
|
||||
*name* is the dataset path (e.g. 'tank/VMWARE001').
|
||||
*volsize* is the size in bytes.
|
||||
Returns True on success, False on failure.
|
||||
"""
|
||||
log.info("Creating zvol %r (%d bytes) …", name, volsize)
|
||||
try:
|
||||
await client.call("pool.dataset.create", [{
|
||||
"name": name,
|
||||
"type": "VOLUME",
|
||||
"volsize": volsize,
|
||||
}])
|
||||
log.info(" Created: %s", name)
|
||||
return True
|
||||
except RuntimeError as exc:
|
||||
log.error(" Failed to create zvol %r: %s", name, exc)
|
||||
return False
|
||||
|
||||
|
||||
async def create_missing_zvols(
|
||||
host: str,
|
||||
port: int,
|
||||
api_key: str,
|
||||
zvols: dict[str, int],
|
||||
verify_ssl: bool = False,
|
||||
) -> None:
|
||||
"""Open a fresh connection and create zvols from {name: volsize_bytes}."""
|
||||
async with TrueNASClient(
|
||||
host=host, port=port, api_key=api_key, verify_ssl=verify_ssl,
|
||||
) as client:
|
||||
for name, volsize in zvols.items():
|
||||
await create_zvol(client, name, volsize)
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
# Destination inventory
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
async def query_destination_inventory(client: TrueNASClient) -> dict[str, list]:
|
||||
"""
|
||||
Query all current configuration from the destination system.
|
||||
Returns a dict with keys: smb_shares, nfs_exports, datasets, zvols,
|
||||
iscsi_extents, iscsi_initiators, iscsi_portals, iscsi_targets, iscsi_targetextents.
|
||||
Each value is a list (may be empty if the query fails or returns nothing).
|
||||
"""
|
||||
result: dict[str, list] = {}
|
||||
for key, method, params in [
|
||||
("smb_shares", "sharing.smb.query", None),
|
||||
("nfs_exports", "sharing.nfs.query", None),
|
||||
("datasets", "pool.dataset.query", [[["type", "=", "FILESYSTEM"]]]),
|
||||
("zvols", "pool.dataset.query", [[["type", "=", "VOLUME"]]]),
|
||||
("iscsi_extents", "iscsi.extent.query", None),
|
||||
("iscsi_initiators", "iscsi.initiator.query", None),
|
||||
("iscsi_portals", "iscsi.portal.query", None),
|
||||
("iscsi_targets", "iscsi.target.query", None),
|
||||
("iscsi_targetextents", "iscsi.targetextent.query", None),
|
||||
]:
|
||||
try:
|
||||
result[key] = await client.call(method, params) or []
|
||||
except RuntimeError as exc:
|
||||
log.warning("Could not query %s: %s", key, exc)
|
||||
result[key] = []
|
||||
return result
|
||||
|
||||
|
||||
async def delete_smb_shares(
|
||||
client: TrueNASClient, shares: list[dict]
|
||||
) -> tuple[int, int]:
|
||||
"""Delete SMB shares by ID. Returns (deleted, failed)."""
|
||||
deleted = failed = 0
|
||||
for share in shares:
|
||||
try:
|
||||
await client.call("sharing.smb.delete", [share["id"]])
|
||||
log.info(" Deleted SMB share %r", share.get("name"))
|
||||
deleted += 1
|
||||
except RuntimeError as exc:
|
||||
log.error(" Failed to delete SMB share %r: %s", share.get("name"), exc)
|
||||
failed += 1
|
||||
return deleted, failed
|
||||
|
||||
|
||||
async def delete_nfs_exports(
|
||||
client: TrueNASClient, exports: list[dict]
|
||||
) -> tuple[int, int]:
|
||||
"""Delete NFS exports by ID. Returns (deleted, failed)."""
|
||||
deleted = failed = 0
|
||||
for export in exports:
|
||||
try:
|
||||
await client.call("sharing.nfs.delete", [export["id"]])
|
||||
log.info(" Deleted NFS export %r", export.get("path"))
|
||||
deleted += 1
|
||||
except RuntimeError as exc:
|
||||
log.error(" Failed to delete NFS export %r: %s", export.get("path"), exc)
|
||||
failed += 1
|
||||
return deleted, failed
|
||||
|
||||
|
||||
async def delete_zvols(
|
||||
client: TrueNASClient, zvols: list[dict]
|
||||
) -> tuple[int, int]:
|
||||
"""Delete zvols. Returns (deleted, failed)."""
|
||||
deleted = failed = 0
|
||||
for zvol in zvols:
|
||||
try:
|
||||
await client.call("pool.dataset.delete", [zvol["id"], {"recursive": True}])
|
||||
log.info(" Deleted zvol %r", zvol["id"])
|
||||
deleted += 1
|
||||
except RuntimeError as exc:
|
||||
log.error(" Failed to delete zvol %r: %s", zvol["id"], exc)
|
||||
failed += 1
|
||||
return deleted, failed
|
||||
|
||||
|
||||
async def delete_datasets(
|
||||
client: TrueNASClient, datasets: list[dict]
|
||||
) -> tuple[int, int]:
|
||||
"""
|
||||
Delete datasets deepest-first to avoid parent-before-child errors.
|
||||
Skips pool root datasets (no '/' in the dataset name).
|
||||
Returns (deleted, failed).
|
||||
"""
|
||||
sorted_ds = sorted(
|
||||
(d for d in datasets if "/" in d["id"]),
|
||||
key=lambda d: d["id"].count("/"),
|
||||
reverse=True,
|
||||
)
|
||||
deleted = failed = 0
|
||||
for ds in sorted_ds:
|
||||
try:
|
||||
await client.call("pool.dataset.delete", [ds["id"], {"recursive": True}])
|
||||
log.info(" Deleted dataset %r", ds["id"])
|
||||
deleted += 1
|
||||
except RuntimeError as exc:
|
||||
log.error(" Failed to delete dataset %r: %s", ds["id"], exc)
|
||||
failed += 1
|
||||
return deleted, failed
|
||||
|
||||
@@ -14,7 +14,7 @@ from .summary import Summary
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
# Read-only / server-generated fields that must NOT be sent on create/update
|
||||
_SMB_SHARE_READONLY = frozenset({"id", "locked"})
|
||||
_SMB_SHARE_READONLY = frozenset({"id", "locked", "path_local"})
|
||||
|
||||
# CORE SMB share fields that do not exist in the SCALE API
|
||||
_SMB_SHARE_CORE_EXTRAS = frozenset({
|
||||
@@ -152,3 +152,433 @@ async def migrate_nfs_shares(
|
||||
log.error(" %s: %s", _bold_red("FAILED"), exc)
|
||||
summary.nfs_failed += 1
|
||||
summary.errors.append(f"NFS share {path!r}: {exc}")
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
# iSCSI payload builders
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
_ISCSI_EXTENT_READONLY = frozenset({"id", "serial", "naa", "vendor", "locked"})
|
||||
_ISCSI_INITIATOR_READONLY = frozenset({"id"})
|
||||
_ISCSI_PORTAL_READONLY = frozenset({"id", "tag"})
|
||||
_ISCSI_TARGET_READONLY = frozenset({"id", "rel_tgt_id", "iscsi_parameters"})
|
||||
|
||||
|
||||
def _iscsi_extent_payload(extent: dict) -> dict:
|
||||
payload = {k: v for k, v in extent.items() if k not in _ISCSI_EXTENT_READONLY}
|
||||
if extent.get("type") == "DISK":
|
||||
payload.pop("path", None) # derived from disk on DISK extents
|
||||
payload.pop("filesize", None) # only meaningful for FILE extents
|
||||
else:
|
||||
payload.pop("disk", None)
|
||||
return payload
|
||||
|
||||
|
||||
def _iscsi_initiator_payload(initiator: dict) -> dict:
|
||||
return {k: v for k, v in initiator.items() if k not in _ISCSI_INITIATOR_READONLY}
|
||||
|
||||
|
||||
def _iscsi_portal_payload(portal: dict) -> dict:
|
||||
payload = {k: v for k, v in portal.items() if k not in _ISCSI_PORTAL_READONLY}
|
||||
# The API only accepts {"ip": "..."} in listen entries — port is a global setting
|
||||
payload["listen"] = [{"ip": l["ip"]} for l in payload.get("listen", [])]
|
||||
return payload
|
||||
|
||||
|
||||
def _iscsi_target_payload(
|
||||
target: dict,
|
||||
portal_id_map: dict[int, int],
|
||||
initiator_id_map: dict[int, int],
|
||||
) -> dict:
|
||||
payload = {k: v for k, v in target.items() if k not in _ISCSI_TARGET_READONLY}
|
||||
payload["groups"] = [
|
||||
{**g,
|
||||
"portal": portal_id_map.get(g["portal"], g["portal"]),
|
||||
"initiator": initiator_id_map.get(g.get("initiator"), g.get("initiator"))}
|
||||
for g in target.get("groups", [])
|
||||
]
|
||||
return payload
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
# iSCSI migration sub-routines
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
async def _migrate_iscsi_extents(
|
||||
client: TrueNASClient,
|
||||
extents: list[dict],
|
||||
dry_run: bool,
|
||||
summary: Summary,
|
||||
id_map: dict[int, int],
|
||||
) -> None:
|
||||
log.info("Querying existing iSCSI extents on destination …")
|
||||
try:
|
||||
existing = await client.call("iscsi.extent.query") or []
|
||||
except RuntimeError as exc:
|
||||
msg = f"Could not query iSCSI extents: {exc}"
|
||||
log.error(msg); summary.errors.append(msg); return
|
||||
|
||||
existing_by_name = {e["name"].lower(): e for e in existing}
|
||||
log.info(" Destination has %d existing extent(s).", len(existing_by_name))
|
||||
|
||||
for ext in extents:
|
||||
name = ext.get("name", "<unnamed>")
|
||||
log.info("%s iSCSI extent %s", _bold("──"), _bold_cyan(repr(name)))
|
||||
|
||||
if name.lower() in existing_by_name:
|
||||
log.info(" %s – already exists on destination.", _yellow("SKIP"))
|
||||
id_map[ext["id"]] = existing_by_name[name.lower()]["id"]
|
||||
summary.iscsi_extents_skipped += 1
|
||||
continue
|
||||
|
||||
payload = _iscsi_extent_payload(ext)
|
||||
log.debug(" payload: %s", json.dumps(payload))
|
||||
|
||||
if dry_run:
|
||||
log.info(" %s would create extent %s → %s",
|
||||
_cyan("[DRY RUN]"), _bold_cyan(repr(name)),
|
||||
ext.get("disk") or ext.get("path"))
|
||||
summary.iscsi_extents_created += 1
|
||||
id_map[ext["id"]] = ext["id"] # placeholder — enables downstream dry-run remapping
|
||||
if ext.get("type") == "DISK" and ext.get("disk"):
|
||||
summary.zvols_to_check.append(ext["disk"].removeprefix("zvol/"))
|
||||
continue
|
||||
|
||||
try:
|
||||
r = await client.call("iscsi.extent.create", [payload])
|
||||
log.info(" %s id=%s", _bold_green("CREATED"), r.get("id"))
|
||||
id_map[ext["id"]] = r["id"]
|
||||
summary.iscsi_extents_created += 1
|
||||
except RuntimeError as exc:
|
||||
log.error(" %s: %s", _bold_red("FAILED"), exc)
|
||||
summary.iscsi_extents_failed += 1
|
||||
summary.errors.append(f"iSCSI extent {name!r}: {exc}")
|
||||
|
||||
|
||||
async def _migrate_iscsi_initiators(
|
||||
client: TrueNASClient,
|
||||
initiators: list[dict],
|
||||
dry_run: bool,
|
||||
summary: Summary,
|
||||
id_map: dict[int, int],
|
||||
) -> None:
|
||||
log.info("Querying existing iSCSI initiator groups on destination …")
|
||||
try:
|
||||
existing = await client.call("iscsi.initiator.query") or []
|
||||
except RuntimeError as exc:
|
||||
msg = f"Could not query iSCSI initiators: {exc}"
|
||||
log.error(msg); summary.errors.append(msg); return
|
||||
|
||||
existing_by_comment = {e["comment"].lower(): e for e in existing if e.get("comment")}
|
||||
log.info(" Destination has %d existing initiator group(s).", len(existing))
|
||||
|
||||
for init in initiators:
|
||||
comment = init.get("comment", "")
|
||||
log.info("%s iSCSI initiator group %s", _bold("──"), _bold_cyan(repr(comment)))
|
||||
|
||||
if comment and comment.lower() in existing_by_comment:
|
||||
log.info(" %s – comment already exists on destination.", _yellow("SKIP"))
|
||||
id_map[init["id"]] = existing_by_comment[comment.lower()]["id"]
|
||||
summary.iscsi_initiators_skipped += 1
|
||||
continue
|
||||
|
||||
payload = _iscsi_initiator_payload(init)
|
||||
log.debug(" payload: %s", json.dumps(payload))
|
||||
|
||||
if dry_run:
|
||||
log.info(" %s would create initiator group %s",
|
||||
_cyan("[DRY RUN]"), _bold_cyan(repr(comment)))
|
||||
summary.iscsi_initiators_created += 1
|
||||
id_map[init["id"]] = init["id"] # placeholder — enables downstream dry-run remapping
|
||||
continue
|
||||
|
||||
try:
|
||||
r = await client.call("iscsi.initiator.create", [payload])
|
||||
log.info(" %s id=%s", _bold_green("CREATED"), r.get("id"))
|
||||
id_map[init["id"]] = r["id"]
|
||||
summary.iscsi_initiators_created += 1
|
||||
except RuntimeError as exc:
|
||||
log.error(" %s: %s", _bold_red("FAILED"), exc)
|
||||
summary.iscsi_initiators_failed += 1
|
||||
summary.errors.append(f"iSCSI initiator {comment!r}: {exc}")
|
||||
|
||||
|
||||
async def _migrate_iscsi_portals(
|
||||
client: TrueNASClient,
|
||||
portals: list[dict],
|
||||
dry_run: bool,
|
||||
summary: Summary,
|
||||
id_map: dict[int, int],
|
||||
) -> None:
|
||||
log.info("Querying existing iSCSI portals on destination …")
|
||||
try:
|
||||
existing = await client.call("iscsi.portal.query") or []
|
||||
except RuntimeError as exc:
|
||||
msg = f"Could not query iSCSI portals: {exc}"
|
||||
log.error(msg); summary.errors.append(msg); return
|
||||
|
||||
def _ip_set(p: dict) -> frozenset:
|
||||
return frozenset(l["ip"] for l in p.get("listen", []))
|
||||
|
||||
existing_ip_sets = [(_ip_set(p), p["id"]) for p in existing]
|
||||
log.info(" Destination has %d existing portal(s).", len(existing))
|
||||
|
||||
for portal in portals:
|
||||
comment = portal.get("comment", "")
|
||||
ips = ", ".join(l['ip'] for l in portal.get("listen", []))
|
||||
log.info("%s iSCSI portal %s [%s]", _bold("──"), _bold_cyan(repr(comment)), ips)
|
||||
|
||||
my_ips = _ip_set(portal)
|
||||
match = next((eid for eips, eid in existing_ip_sets if eips == my_ips), None)
|
||||
if match is not None:
|
||||
log.info(" %s – IP set already exists on destination.", _yellow("SKIP"))
|
||||
id_map[portal["id"]] = match
|
||||
summary.iscsi_portals_skipped += 1
|
||||
continue
|
||||
|
||||
payload = _iscsi_portal_payload(portal)
|
||||
log.debug(" payload: %s", json.dumps(payload))
|
||||
|
||||
if dry_run:
|
||||
log.info(" %s would create portal %s → %s",
|
||||
_cyan("[DRY RUN]"), _bold_cyan(repr(comment)), ips)
|
||||
summary.iscsi_portals_created += 1
|
||||
id_map[portal["id"]] = portal["id"] # placeholder — enables downstream dry-run remapping
|
||||
continue
|
||||
|
||||
try:
|
||||
r = await client.call("iscsi.portal.create", [payload])
|
||||
log.info(" %s id=%s", _bold_green("CREATED"), r.get("id"))
|
||||
id_map[portal["id"]] = r["id"]
|
||||
summary.iscsi_portals_created += 1
|
||||
except RuntimeError as exc:
|
||||
log.error(" %s: %s", _bold_red("FAILED"), exc)
|
||||
summary.iscsi_portals_failed += 1
|
||||
summary.errors.append(f"iSCSI portal {comment!r}: {exc}")
|
||||
|
||||
|
||||
async def _migrate_iscsi_targets(
|
||||
client: TrueNASClient,
|
||||
targets: list[dict],
|
||||
dry_run: bool,
|
||||
summary: Summary,
|
||||
id_map: dict[int, int],
|
||||
portal_id_map: dict[int, int],
|
||||
initiator_id_map: dict[int, int],
|
||||
) -> None:
|
||||
log.info("Querying existing iSCSI targets on destination …")
|
||||
try:
|
||||
existing = await client.call("iscsi.target.query") or []
|
||||
except RuntimeError as exc:
|
||||
msg = f"Could not query iSCSI targets: {exc}"
|
||||
log.error(msg); summary.errors.append(msg); return
|
||||
|
||||
existing_by_name = {t["name"].lower(): t for t in existing}
|
||||
log.info(" Destination has %d existing target(s).", len(existing_by_name))
|
||||
|
||||
for target in targets:
|
||||
name = target.get("name", "<unnamed>")
|
||||
log.info("%s iSCSI target %s", _bold("──"), _bold_cyan(repr(name)))
|
||||
|
||||
if name.lower() in existing_by_name:
|
||||
log.info(" %s – already exists on destination.", _yellow("SKIP"))
|
||||
id_map[target["id"]] = existing_by_name[name.lower()]["id"]
|
||||
summary.iscsi_targets_skipped += 1
|
||||
continue
|
||||
|
||||
# Verify all referenced portals and initiators were successfully mapped
|
||||
missing = []
|
||||
for g in target.get("groups", []):
|
||||
if g.get("portal") not in portal_id_map:
|
||||
missing.append(f"portal id={g['portal']}")
|
||||
if g.get("initiator") not in initiator_id_map:
|
||||
missing.append(f"initiator id={g['initiator']}")
|
||||
if missing:
|
||||
msg = f"iSCSI target {name!r}: cannot remap {', '.join(missing)}"
|
||||
log.error(" %s: %s", _bold_red("SKIP"), msg)
|
||||
summary.iscsi_targets_failed += 1
|
||||
summary.errors.append(msg)
|
||||
continue
|
||||
|
||||
payload = _iscsi_target_payload(target, portal_id_map, initiator_id_map)
|
||||
log.debug(" payload: %s", json.dumps(payload))
|
||||
|
||||
if dry_run:
|
||||
log.info(" %s would create target %s",
|
||||
_cyan("[DRY RUN]"), _bold_cyan(repr(name)))
|
||||
summary.iscsi_targets_created += 1
|
||||
id_map[target["id"]] = target["id"] # placeholder — enables downstream dry-run remapping
|
||||
continue
|
||||
|
||||
try:
|
||||
r = await client.call("iscsi.target.create", [payload])
|
||||
log.info(" %s id=%s", _bold_green("CREATED"), r.get("id"))
|
||||
id_map[target["id"]] = r["id"]
|
||||
summary.iscsi_targets_created += 1
|
||||
except RuntimeError as exc:
|
||||
log.error(" %s: %s", _bold_red("FAILED"), exc)
|
||||
summary.iscsi_targets_failed += 1
|
||||
summary.errors.append(f"iSCSI target {name!r}: {exc}")
|
||||
|
||||
|
||||
async def _migrate_iscsi_targetextents(
|
||||
client: TrueNASClient,
|
||||
targetextents: list[dict],
|
||||
dry_run: bool,
|
||||
summary: Summary,
|
||||
target_id_map: dict[int, int],
|
||||
extent_id_map: dict[int, int],
|
||||
) -> None:
|
||||
log.info("Querying existing iSCSI target-extent associations on destination …")
|
||||
try:
|
||||
existing = await client.call("iscsi.targetextent.query") or []
|
||||
except RuntimeError as exc:
|
||||
msg = f"Could not query iSCSI target-extents: {exc}"
|
||||
log.error(msg); summary.errors.append(msg); return
|
||||
|
||||
existing_keys = {(te["target"], te["lunid"]) for te in existing}
|
||||
log.info(" Destination has %d existing association(s).", len(existing))
|
||||
|
||||
for te in targetextents:
|
||||
src_tid = te["target"]
|
||||
src_eid = te["extent"]
|
||||
lunid = te["lunid"]
|
||||
dest_tid = target_id_map.get(src_tid)
|
||||
dest_eid = extent_id_map.get(src_eid)
|
||||
|
||||
if dest_tid is None or dest_eid is None:
|
||||
missing = []
|
||||
if dest_tid is None: missing.append(f"target id={src_tid}")
|
||||
if dest_eid is None: missing.append(f"extent id={src_eid}")
|
||||
msg = f"iSCSI target-extent (lun {lunid}): cannot remap {', '.join(missing)}"
|
||||
log.error(" %s", msg)
|
||||
summary.iscsi_targetextents_failed += 1
|
||||
summary.errors.append(msg)
|
||||
continue
|
||||
|
||||
log.info("%s iSCSI target↔extent target=%s lun=%s extent=%s",
|
||||
_bold("──"), dest_tid, lunid, dest_eid)
|
||||
|
||||
if (dest_tid, lunid) in existing_keys:
|
||||
log.info(" %s – target+LUN already assigned on destination.", _yellow("SKIP"))
|
||||
summary.iscsi_targetextents_skipped += 1
|
||||
continue
|
||||
|
||||
payload = {"target": dest_tid, "lunid": lunid, "extent": dest_eid}
|
||||
log.debug(" payload: %s", json.dumps(payload))
|
||||
|
||||
if dry_run:
|
||||
log.info(" %s would associate target=%s lun=%s extent=%s",
|
||||
_cyan("[DRY RUN]"), dest_tid, lunid, dest_eid)
|
||||
summary.iscsi_targetextents_created += 1
|
||||
continue
|
||||
|
||||
try:
|
||||
r = await client.call("iscsi.targetextent.create", [payload])
|
||||
log.info(" %s id=%s", _bold_green("CREATED"), r.get("id"))
|
||||
summary.iscsi_targetextents_created += 1
|
||||
except RuntimeError as exc:
|
||||
log.error(" %s: %s", _bold_red("FAILED"), exc)
|
||||
summary.iscsi_targetextents_failed += 1
|
||||
summary.errors.append(
|
||||
f"iSCSI target-extent (target={dest_tid}, lun={lunid}): {exc}")
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
# iSCSI pre-migration utilities
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
async def query_existing_iscsi(client: TrueNASClient) -> dict:
|
||||
"""
|
||||
Query all iSCSI object counts from the destination.
|
||||
Returns a dict with keys: extents, initiators, portals, targets, targetextents
|
||||
Each value is a list of objects (may be empty).
|
||||
"""
|
||||
result = {}
|
||||
for key, method in [
|
||||
("extents", "iscsi.extent.query"),
|
||||
("initiators", "iscsi.initiator.query"),
|
||||
("portals", "iscsi.portal.query"),
|
||||
("targets", "iscsi.target.query"),
|
||||
("targetextents", "iscsi.targetextent.query"),
|
||||
]:
|
||||
try:
|
||||
result[key] = await client.call(method) or []
|
||||
except RuntimeError:
|
||||
result[key] = []
|
||||
return result
|
||||
|
||||
|
||||
async def clear_iscsi_config(client: TrueNASClient) -> None:
|
||||
"""
|
||||
Delete all iSCSI configuration from the destination in safe dependency order:
|
||||
target-extents → targets → portals → initiators → extents.
|
||||
"""
|
||||
for method_query, method_delete, label in [
|
||||
("iscsi.targetextent.query", "iscsi.targetextent.delete", "target-extent"),
|
||||
("iscsi.target.query", "iscsi.target.delete", "target"),
|
||||
("iscsi.portal.query", "iscsi.portal.delete", "portal"),
|
||||
("iscsi.initiator.query", "iscsi.initiator.delete", "initiator"),
|
||||
("iscsi.extent.query", "iscsi.extent.delete", "extent"),
|
||||
]:
|
||||
try:
|
||||
objects = await client.call(method_query) or []
|
||||
except RuntimeError as exc:
|
||||
log.warning(" Could not query iSCSI %ss: %s", label, exc)
|
||||
continue
|
||||
for obj in objects:
|
||||
try:
|
||||
await client.call(method_delete, [obj["id"]])
|
||||
log.info(" Deleted iSCSI %s id=%s", label, obj["id"])
|
||||
except RuntimeError as exc:
|
||||
log.warning(" Failed to delete iSCSI %s id=%s: %s", label, obj["id"], exc)
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
# Public iSCSI entry point
|
||||
# ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
async def migrate_iscsi(
|
||||
client: TrueNASClient,
|
||||
iscsi: dict,
|
||||
dry_run: bool,
|
||||
summary: Summary,
|
||||
) -> None:
|
||||
if not iscsi:
|
||||
log.info("No iSCSI configuration found in archive.")
|
||||
return
|
||||
|
||||
portals = iscsi.get("portals", [])
|
||||
initiators = iscsi.get("initiators", [])
|
||||
targets = iscsi.get("targets", [])
|
||||
extents = iscsi.get("extents", [])
|
||||
targetextents = iscsi.get("targetextents", [])
|
||||
|
||||
summary.iscsi_extents_found = len(extents)
|
||||
summary.iscsi_initiators_found = len(initiators)
|
||||
summary.iscsi_portals_found = len(portals)
|
||||
summary.iscsi_targets_found = len(targets)
|
||||
summary.iscsi_targetextents_found = len(targetextents)
|
||||
|
||||
gc = iscsi.get("global_config", {})
|
||||
if gc.get("basename"):
|
||||
log.info(" Source iSCSI basename: %s (destination keeps its own)", gc["basename"])
|
||||
|
||||
if not any([portals, initiators, targets, extents, targetextents]):
|
||||
log.info("iSCSI configuration is empty – nothing to migrate.")
|
||||
return
|
||||
|
||||
extent_id_map: dict[int, int] = {}
|
||||
initiator_id_map: dict[int, int] = {}
|
||||
portal_id_map: dict[int, int] = {}
|
||||
target_id_map: dict[int, int] = {}
|
||||
|
||||
# Dependency order: extents and initiators first (no deps), then portals,
|
||||
# then targets (need portal + initiator maps), then target-extent links.
|
||||
await _migrate_iscsi_extents(client, extents, dry_run, summary, extent_id_map)
|
||||
await _migrate_iscsi_initiators(client, initiators, dry_run, summary, initiator_id_map)
|
||||
await _migrate_iscsi_portals(client, portals, dry_run, summary, portal_id_map)
|
||||
await _migrate_iscsi_targets(
|
||||
client, targets, dry_run, summary, target_id_map, portal_id_map, initiator_id_map)
|
||||
await _migrate_iscsi_targetextents(
|
||||
client, targetextents, dry_run, summary, target_id_map, extent_id_map)
|
||||
|
||||
@@ -21,12 +21,47 @@ class Summary:
|
||||
nfs_skipped: int = 0
|
||||
nfs_failed: int = 0
|
||||
|
||||
iscsi_extents_found: int = 0
|
||||
iscsi_extents_created: int = 0
|
||||
iscsi_extents_skipped: int = 0
|
||||
iscsi_extents_failed: int = 0
|
||||
|
||||
iscsi_initiators_found: int = 0
|
||||
iscsi_initiators_created: int = 0
|
||||
iscsi_initiators_skipped: int = 0
|
||||
iscsi_initiators_failed: int = 0
|
||||
|
||||
iscsi_portals_found: int = 0
|
||||
iscsi_portals_created: int = 0
|
||||
iscsi_portals_skipped: int = 0
|
||||
iscsi_portals_failed: int = 0
|
||||
|
||||
iscsi_targets_found: int = 0
|
||||
iscsi_targets_created: int = 0
|
||||
iscsi_targets_skipped: int = 0
|
||||
iscsi_targets_failed: int = 0
|
||||
|
||||
iscsi_targetextents_found: int = 0
|
||||
iscsi_targetextents_created: int = 0
|
||||
iscsi_targetextents_skipped: int = 0
|
||||
iscsi_targetextents_failed: int = 0
|
||||
|
||||
errors: list[str] = field(default_factory=list)
|
||||
|
||||
# Populated during dry-run dataset safety checks
|
||||
paths_to_create: list[str] = field(default_factory=list)
|
||||
missing_datasets: list[str] = field(default_factory=list)
|
||||
|
||||
# Populated during iSCSI dry-run zvol safety checks
|
||||
zvols_to_check: list[str] = field(default_factory=list)
|
||||
missing_zvols: list[str] = field(default_factory=list)
|
||||
|
||||
@property
|
||||
def _has_iscsi(self) -> bool:
|
||||
return (self.iscsi_extents_found + self.iscsi_initiators_found +
|
||||
self.iscsi_portals_found + self.iscsi_targets_found +
|
||||
self.iscsi_targetextents_found) > 0
|
||||
|
||||
def report(self) -> str:
|
||||
w = 60
|
||||
|
||||
@@ -34,6 +69,14 @@ class Summary:
|
||||
s = f"{label}={n}"
|
||||
return color_fn(s) if n > 0 else _dim(s)
|
||||
|
||||
def _iscsi_val(found, created, skipped, failed) -> str:
|
||||
return (
|
||||
f"{_dim('found=' + str(found))} "
|
||||
f"{_stat('created', created, _bold_green)} "
|
||||
f"{_stat('skipped', skipped, _yellow)} "
|
||||
f"{_stat('failed', failed, _bold_red)}"
|
||||
)
|
||||
|
||||
smb_val = (
|
||||
f"{_dim('found=' + str(self.smb_found))} "
|
||||
f"{_stat('created', self.smb_created, _bold_green)} "
|
||||
@@ -67,11 +110,30 @@ class Summary:
|
||||
f"{tl}{hr}{tr}",
|
||||
title_row,
|
||||
f"{ml}{hr}{mr}",
|
||||
row("SMB shares : ", smb_val),
|
||||
row("NFS shares : ", nfs_val),
|
||||
f"{bl}{hr}{br}",
|
||||
row("SMB shares : ", smb_val),
|
||||
row("NFS shares : ", nfs_val),
|
||||
]
|
||||
|
||||
if self._has_iscsi:
|
||||
lines.append(f"{ml}{hr}{mr}")
|
||||
lines.append(row("iSCSI extents : ", _iscsi_val(
|
||||
self.iscsi_extents_found, self.iscsi_extents_created,
|
||||
self.iscsi_extents_skipped, self.iscsi_extents_failed)))
|
||||
lines.append(row("iSCSI initiators: ", _iscsi_val(
|
||||
self.iscsi_initiators_found, self.iscsi_initiators_created,
|
||||
self.iscsi_initiators_skipped, self.iscsi_initiators_failed)))
|
||||
lines.append(row("iSCSI portals : ", _iscsi_val(
|
||||
self.iscsi_portals_found, self.iscsi_portals_created,
|
||||
self.iscsi_portals_skipped, self.iscsi_portals_failed)))
|
||||
lines.append(row("iSCSI targets : ", _iscsi_val(
|
||||
self.iscsi_targets_found, self.iscsi_targets_created,
|
||||
self.iscsi_targets_skipped, self.iscsi_targets_failed)))
|
||||
lines.append(row("iSCSI tgt↔ext : ", _iscsi_val(
|
||||
self.iscsi_targetextents_found, self.iscsi_targetextents_created,
|
||||
self.iscsi_targetextents_skipped, self.iscsi_targetextents_failed)))
|
||||
|
||||
lines.append(f"{bl}{hr}{br}")
|
||||
|
||||
if self.errors:
|
||||
lines.append(f"\n {_bold_red(str(len(self.errors)) + ' error(s):')} ")
|
||||
for e in self.errors:
|
||||
@@ -89,5 +151,16 @@ class Summary:
|
||||
" These paths must exist before shares can be created.\n"
|
||||
" Use interactive mode or answer 'y' at the dataset prompt to create them."
|
||||
)
|
||||
if self.missing_zvols:
|
||||
lines.append(
|
||||
f"\n {_bold_yellow('WARNING:')} "
|
||||
f"{len(self.missing_zvols)} zvol(s) do not exist on the destination:"
|
||||
)
|
||||
for z in self.missing_zvols:
|
||||
lines.append(f" {_yellow('•')} {z}")
|
||||
lines.append(
|
||||
" These zvols must exist before iSCSI extents can be created.\n"
|
||||
" Use interactive mode to be prompted for size and auto-create them."
|
||||
)
|
||||
lines.append("")
|
||||
return "\n".join(lines)
|
||||
|
||||
Reference in New Issue
Block a user