Skip to content

Commit d566b6a

Browse files
committed
[Compute] migrate vm disk attach/detach to aaz
fix: resolve review issues fix: check cli style dev dev
1 parent 857476a commit d566b6a

2 files changed

Lines changed: 230 additions & 126 deletions

File tree

src/azure-cli/azure/cli/command_modules/vm/_vm_utils.py

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -757,3 +757,31 @@ def _open(filename, mode):
757757
f.write(public_bytes)
758758

759759
return public_bytes.decode()
760+
761+
762+
def safe_get(d: dict, path: str, default=None):
763+
"""
764+
Safely fetch nested keys from a dict.
765+
Path format supports lists by index, e.g. 'storageProfile.dataDisks.0.managedDisk.id'.
766+
Returns `default` when any segment is missing or type doesn't match.
767+
"""
768+
cur = d
769+
for key in path.split('.'):
770+
if isinstance(cur, list):
771+
# list index access: only allow integer segments
772+
try:
773+
idx = int(key)
774+
except ValueError:
775+
return default
776+
try:
777+
cur = cur[idx]
778+
except IndexError:
779+
return default
780+
elif isinstance(cur, dict):
781+
# dict key access
782+
if key not in cur:
783+
return default
784+
cur = cur[key]
785+
else:
786+
return default
787+
return cur

src/azure-cli/azure/cli/command_modules/vm/custom.py

Lines changed: 202 additions & 126 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838
from azure.cli.core.profiles import ResourceType
3939
from azure.cli.core.util import sdk_no_wait
4040

41-
from ._vm_utils import read_content_if_is_file, import_aaz_by_profile
41+
from ._vm_utils import read_content_if_is_file, import_aaz_by_profile, safe_get
4242
from ._vm_diagnostics_templates import get_default_diag_config
4343

4444
from ._actions import (load_images_from_aliases_doc, load_extension_images_thru_services,
@@ -2233,120 +2233,165 @@ def show_default_diagnostics_configuration(is_windows_os=False):
22332233

22342234

22352235
# region VirtualMachines Disks (Managed)
2236-
def attach_managed_data_disk(cmd, resource_group_name, vm_name, disk=None, ids=None, disks=None, new=False, sku=None,
2237-
size_gb=None, lun=None, caching=None, enable_write_accelerator=False, disk_ids=None,
2238-
source_snapshots_or_disks=None, source_disk_restore_point=None,
2239-
new_names_of_source_snapshots_or_disks=None, new_names_of_source_disk_restore_point=None):
2240-
# attach multiple managed disks using disk attach API
2241-
vm = get_vm_to_update(cmd, resource_group_name, vm_name)
2236+
def attach_managed_data_disk(cmd, resource_group_name, vm_name,
2237+
disk=None, ids=None, disks=None, new=False, sku=None,
2238+
size_gb=None, lun=None, caching=None,
2239+
enable_write_accelerator=False, disk_ids=None,
2240+
source_snapshots_or_disks=None,
2241+
source_disk_restore_point=None,
2242+
new_names_of_source_snapshots_or_disks=None,
2243+
new_names_of_source_disk_restore_point=None,
2244+
no_wait=False):
2245+
if caching is None:
2246+
caching = 'None'
2247+
2248+
# attach existing managed disks
22422249
if not new and not sku and not size_gb and disk_ids is not None:
2243-
if lun:
2244-
disk_lun = lun
2245-
else:
2246-
disk_lun = _get_disk_lun(vm.storage_profile.data_disks)
2247-
2248-
data_disks = []
2249-
for disk_item in disk_ids:
2250-
disk = {
2251-
'diskId': disk_item,
2252-
'caching': caching,
2250+
from .operations.vm import VMShow
2251+
vm = VMShow(cli_ctx=cmd.cli_ctx)(command_args={
2252+
'resource_group': resource_group_name,
2253+
'vm_name': vm_name
2254+
})
2255+
vm_dict = vm if isinstance(vm, dict) else getattr(vm, 'result', vm)
2256+
data_disks = vm_dict.get('storageProfile', {}).get('dataDisks', []) or []
2257+
used_luns = {d.get('lun') for d in data_disks if isinstance(d, dict) and d.get('lun') is not None}
2258+
2259+
def _next_lun(start=0):
2260+
i = start
2261+
while i in used_luns:
2262+
i += 1
2263+
used_luns.add(i)
2264+
return i
2265+
attach_payload = []
2266+
current_lun = lun
2267+
for disk_id in disk_ids:
2268+
if current_lun is not None:
2269+
disk_lun = _next_lun(start=current_lun)
2270+
current_lun = disk_lun + 1
2271+
else:
2272+
disk_lun = _next_lun()
2273+
payload = {
2274+
'diskId': disk_id,
22532275
'lun': disk_lun,
2254-
'writeAcceleratorEnabled': enable_write_accelerator
2276+
'caching': caching,
22552277
}
2256-
data_disks.append(disk)
2257-
disk_lun += 1
2258-
result = AttachDetachDataDisk(cli_ctx=cmd.cli_ctx)(command_args={
2278+
if enable_write_accelerator:
2279+
payload['writeAcceleratorEnabled'] = enable_write_accelerator
2280+
2281+
attach_payload.append(payload)
2282+
return AttachDetachDataDisk(cli_ctx=cmd.cli_ctx)(command_args={
22592283
'vm_name': vm_name,
22602284
'resource_group': resource_group_name,
2261-
'data_disks_to_attach': data_disks
2285+
'data_disks_to_attach': attach_payload,
2286+
'no_wait': no_wait
22622287
})
2263-
return result
2264-
else:
2265-
# attach multiple managed disks using vm PUT API
2266-
from azure.mgmt.core.tools import parse_resource_id
2267-
DataDisk, ManagedDiskParameters, DiskCreateOption = cmd.get_models(
2268-
'DataDisk', 'ManagedDiskParameters', 'DiskCreateOptionTypes')
2269-
if size_gb is None:
2270-
default_size_gb = 1023
22712288

2272-
if disk_ids is not None:
2273-
disks = disk_ids
2274-
2275-
for disk_item in disks:
2276-
if lun:
2277-
disk_lun = lun
2278-
else:
2279-
disk_lun = _get_disk_lun(vm.storage_profile.data_disks)
2280-
2281-
if new:
2282-
data_disk = DataDisk(lun=disk_lun, create_option=DiskCreateOption.empty,
2283-
name=parse_resource_id(disk_item)['name'],
2284-
disk_size_gb=size_gb if size_gb else default_size_gb, caching=caching,
2285-
managed_disk=ManagedDiskParameters(storage_account_type=sku))
2286-
else:
2287-
params = ManagedDiskParameters(id=disk_item, storage_account_type=sku)
2288-
data_disk = DataDisk(lun=disk_lun, create_option=DiskCreateOption.attach, managed_disk=params,
2289-
caching=caching)
2289+
# new / copy / restore
2290+
from azure.mgmt.core.tools import parse_resource_id
2291+
from .operations.vm import VMUpdate as _VMUpdate
22902292

2291-
if enable_write_accelerator:
2292-
data_disk.write_accelerator_enabled = enable_write_accelerator
2293-
2294-
vm.storage_profile.data_disks.append(data_disk)
2295-
disk_lun = _get_disk_lun(vm.storage_profile.data_disks)
2296-
if source_snapshots_or_disks is not None:
2297-
if new_names_of_source_snapshots_or_disks is None:
2298-
new_names_of_source_snapshots_or_disks = [None] * len(source_snapshots_or_disks)
2299-
for disk_id, disk_name in zip(source_snapshots_or_disks, new_names_of_source_snapshots_or_disks):
2300-
disk = {
2301-
'name': disk_name,
2302-
'create_option': 'Copy',
2303-
'caching': caching,
2304-
'lun': disk_lun,
2305-
'writeAcceleratorEnabled': enable_write_accelerator,
2306-
"sourceResource": {
2307-
"id": disk_id
2308-
}
2309-
}
2310-
if size_gb is not None:
2311-
disk.update({
2312-
'diskSizeGb': size_gb
2313-
})
2314-
if sku is not None:
2315-
disk.update({
2316-
"managedDisk": {
2317-
"storageAccountType": sku
2293+
class VMUpdate(_VMUpdate):
2294+
def pre_instance_update(self, instance):
2295+
storage_profile = instance.properties.storage_profile
2296+
data_disks = storage_profile.data_disks
2297+
data_disks_list = data_disks.to_serialized_data() if hasattr(data_disks, 'to_serialized_data') \
2298+
else data_disks
2299+
used_luns = set()
2300+
if isinstance(data_disks_list, list):
2301+
for d in data_disks_list:
2302+
if isinstance(d, dict) and 'lun' in d and d['lun'] is not None:
2303+
used_luns.add(d['lun'])
2304+
2305+
def _next_lun(start=0):
2306+
i = start
2307+
while i in used_luns:
2308+
i += 1
2309+
used_luns.add(i)
2310+
return i
2311+
default_size_gb = 1023
2312+
disks_to_process = disk_ids if disk_ids is not None else disks
2313+
2314+
# attach existing / new disks
2315+
if disks_to_process:
2316+
# if a user-specified LUN is provided, allocate it (or the next available
2317+
# starting from that value) for the first disk, then auto-increment for
2318+
# subsequent disks to avoid LUN conflicts.
2319+
next_lun = _next_lun(start=lun) if lun is not None else None
2320+
for disk_item in disks_to_process:
2321+
if lun is not None:
2322+
disk_lun = next_lun
2323+
next_lun = _next_lun()
2324+
else:
2325+
disk_lun = _next_lun()
2326+
2327+
if new:
2328+
disk_name = parse_resource_id(disk_item)['name']
2329+
disk_obj = {
2330+
'name': disk_name,
2331+
'lun': disk_lun,
2332+
'createOption': 'Empty',
2333+
'diskSizeGb': size_gb if size_gb else default_size_gb,
2334+
'caching': caching
23182335
}
2319-
})
2320-
disk_lun += 1
2321-
vm.storage_profile.data_disks.append(disk)
2322-
if source_disk_restore_point is not None:
2323-
if new_names_of_source_disk_restore_point is None:
2324-
new_names_of_source_disk_restore_point = [None] * len(source_disk_restore_point)
2325-
for disk_id, disk_name in zip(source_disk_restore_point, new_names_of_source_disk_restore_point):
2326-
disk = {
2327-
'name': disk_name,
2328-
'create_option': 'Restore',
2329-
'caching': caching,
2330-
'lun': disk_lun,
2331-
'writeAcceleratorEnabled': enable_write_accelerator,
2332-
"sourceResource": {
2333-
"id": disk_id
2334-
}
2335-
}
2336-
if size_gb is not None:
2337-
disk.update({
2338-
'diskSizeGb': size_gb
2339-
})
2340-
if sku is not None:
2341-
disk.update({
2342-
"managedDisk": {
2343-
"storageAccountType": sku
2336+
if sku:
2337+
disk_obj['managedDisk'] = {'storageAccountType': sku}
2338+
else:
2339+
disk_obj = {
2340+
'lun': disk_lun,
2341+
'createOption': 'Attach',
2342+
'caching': caching,
2343+
'managedDisk': {'id': disk_item}
23442344
}
2345-
})
2346-
disk_lun += 1
2347-
vm.storage_profile.data_disks.append(disk)
2345+
if sku:
2346+
disk_obj['managedDisk']['storageAccountType'] = sku
2347+
if enable_write_accelerator:
2348+
disk_obj['writeAcceleratorEnabled'] = True
2349+
data_disks.append(disk_obj)
2350+
2351+
# snapshot / copy
2352+
if source_snapshots_or_disks:
2353+
_new_names = new_names_of_source_snapshots_or_disks or [None] * len(source_snapshots_or_disks)
2354+
for src_id, name in zip(source_snapshots_or_disks, _new_names):
2355+
disk_lun = _next_lun()
2356+
disk_obj = {
2357+
'name': name,
2358+
'lun': disk_lun,
2359+
'createOption': 'Copy',
2360+
'sourceResource': {'id': src_id},
2361+
'caching': caching,
2362+
'writeAcceleratorEnabled': enable_write_accelerator
2363+
}
2364+
if size_gb is not None:
2365+
disk_obj['diskSizeGb'] = size_gb
2366+
if sku is not None:
2367+
disk_obj['managedDisk'] = {'storageAccountType': sku}
2368+
data_disks.append(disk_obj)
2369+
2370+
# restore point
2371+
if source_disk_restore_point:
2372+
_new_names_rp = new_names_of_source_disk_restore_point or [None] * len(source_disk_restore_point)
2373+
for src_id, name in zip(source_disk_restore_point, _new_names_rp):
2374+
disk_lun = _next_lun()
2375+
disk_obj = {
2376+
'name': name,
2377+
'lun': disk_lun,
2378+
'createOption': 'Restore',
2379+
'sourceResource': {'id': src_id},
2380+
'caching': caching,
2381+
'writeAcceleratorEnabled': enable_write_accelerator
2382+
}
2383+
if size_gb is not None:
2384+
disk_obj['diskSizeGb'] = size_gb
2385+
if sku is not None:
2386+
disk_obj['managedDisk'] = {'storageAccountType': sku}
2387+
data_disks.append(disk_obj)
23482388

2349-
set_vm(cmd, vm)
2389+
args = {
2390+
'resource_group': resource_group_name,
2391+
'vm_name': vm_name,
2392+
'no_wait': no_wait
2393+
}
2394+
return VMUpdate(cli_ctx=cmd.cli_ctx)(command_args=args)
23502395

23512396

23522397
def detach_unmanaged_data_disk(cmd, resource_group_name, vm_name, disk_name):
@@ -2361,7 +2406,9 @@ def detach_unmanaged_data_disk(cmd, resource_group_name, vm_name, disk_name):
23612406
# endregion
23622407

23632408

2364-
def detach_managed_data_disk(cmd, resource_group_name, vm_name, disk_name=None, force_detach=None, disk_ids=None):
2409+
def detach_managed_data_disk(cmd, resource_group_name, vm_name, disk_name=None,
2410+
force_detach=None, disk_ids=None,
2411+
no_wait=False):
23652412
if disk_ids is not None:
23662413
data_disks = []
23672414
for disk_item in disk_ids:
@@ -2375,27 +2422,56 @@ def detach_managed_data_disk(cmd, resource_group_name, vm_name, disk_name=None,
23752422
return result
23762423
else:
23772424
# here we handle managed disk
2378-
vm = get_vm_to_update(cmd, resource_group_name, vm_name)
2379-
if not force_detach:
2380-
# pylint: disable=no-member
2381-
leftovers = [d for d in vm.storage_profile.data_disks if d.name.lower() != disk_name.lower()]
2382-
if len(vm.storage_profile.data_disks) == len(leftovers):
2383-
raise ResourceNotFoundError("No disk with the name '{}' was found".format(disk_name))
2384-
else:
2385-
DiskDetachOptionTypes = cmd.get_models('DiskDetachOptionTypes', resource_type=ResourceType.MGMT_COMPUTE,
2386-
operation_group='virtual_machines')
2387-
leftovers = vm.storage_profile.data_disks
2388-
is_contains = False
2389-
for d in leftovers:
2390-
if d.name.lower() == disk_name.lower():
2391-
d.to_be_detached = True
2392-
d.detach_option = DiskDetachOptionTypes.FORCE_DETACH
2393-
is_contains = True
2394-
break
2395-
if not is_contains:
2396-
raise ResourceNotFoundError("No disk with the name '{}' was found".format(disk_name))
2397-
vm.storage_profile.data_disks = leftovers
2398-
set_vm(cmd, vm)
2425+
from .operations.vm import VMShow
2426+
2427+
vm = VMShow(cli_ctx=cmd.cli_ctx)(command_args={
2428+
'resource_group': resource_group_name,
2429+
'vm_name': vm_name
2430+
})
2431+
2432+
# work on a local copy of the VM dict to avoid mutating the original object.
2433+
vm_result = vm if isinstance(vm, dict) else getattr(vm, 'result', vm)
2434+
vm_dict = json.loads(json.dumps(vm_result))
2435+
2436+
# to avoid unnecessary permission check of image
2437+
storage_profile = vm_dict.get('storageProfile', {})
2438+
storage_profile["imageReference"] = None
2439+
2440+
target_disk = None
2441+
data_disks = safe_get(vm_dict, 'storageProfile.dataDisks', default=[]) or []
2442+
for d in data_disks:
2443+
# Use dict-style access; AAZ returns dicts.
2444+
name = (d.get('name') or '').lower()
2445+
if name == (disk_name or '').lower():
2446+
target_disk = d
2447+
break
2448+
2449+
if not target_disk:
2450+
attached_names = [d.get('name') for d in (safe_get(vm_dict, 'storageProfile.dataDisks', []) or [])]
2451+
raise ResourceNotFoundError(
2452+
"No disk with the name '{}' was found. Attached: {}".format(disk_name, attached_names)
2453+
)
2454+
2455+
disk_id = safe_get(target_disk, 'managedDisk.id')
2456+
if not disk_id:
2457+
raise CLIError(
2458+
"Disk '{}' is not a managed disk (no managedDisk.id). Only managed disks are supported for this "
2459+
"operation."
2460+
.format(disk_name)
2461+
)
2462+
2463+
args = {
2464+
'vm_name': vm_name,
2465+
'resource_group': resource_group_name,
2466+
'data_disks_to_detach': [{
2467+
'diskId': disk_id,
2468+
'detachOption': 'ForceDetach' if force_detach else None
2469+
}],
2470+
'no_wait': no_wait
2471+
}
2472+
2473+
result = AttachDetachDataDisk(cli_ctx=cmd.cli_ctx)(command_args=args)
2474+
return result
23992475
# endregion
24002476

24012477

0 commit comments

Comments
 (0)