mirror of
https://github.com/retspen/webvirtcloud
synced 2025-07-31 12:41:08 +00:00
format python code with black
This commit is contained in:
parent
ea409ca863
commit
217e106c8b
55 changed files with 2510 additions and 1454 deletions
|
@ -4,90 +4,88 @@ from instances.models import Flavor, Instance, MigrateInstance, CreateInstance
|
|||
|
||||
|
||||
class InstanceSerializer(serializers.ModelSerializer):
|
||||
|
||||
class Meta:
|
||||
model = Instance
|
||||
fields = ['id', 'compute', 'name', 'uuid', 'is_template', 'created', 'drbd']
|
||||
fields = ["id", "compute", "name", "uuid", "is_template", "created", "drbd"]
|
||||
|
||||
|
||||
class InstanceDetailsSerializer(serializers.ModelSerializer):
|
||||
|
||||
class Meta:
|
||||
model = Instance
|
||||
fields = [
|
||||
'id',
|
||||
'compute',
|
||||
'status',
|
||||
'uuid',
|
||||
'name',
|
||||
'title',
|
||||
'description',
|
||||
'is_template',
|
||||
'created',
|
||||
'drbd',
|
||||
'arch',
|
||||
'machine',
|
||||
'vcpu',
|
||||
'memory',
|
||||
'firmware',
|
||||
'nvram',
|
||||
'bootmenu',
|
||||
'boot_order',
|
||||
'disks',
|
||||
'media',
|
||||
'media_iso',
|
||||
'snapshots',
|
||||
'networks',
|
||||
'console_type',
|
||||
'console_port',
|
||||
'console_keymap',
|
||||
'console_listener_address',
|
||||
'video_model',
|
||||
'guest_agent_ready',
|
||||
'autostart']
|
||||
"id",
|
||||
"compute",
|
||||
"status",
|
||||
"uuid",
|
||||
"name",
|
||||
"title",
|
||||
"description",
|
||||
"is_template",
|
||||
"created",
|
||||
"drbd",
|
||||
"arch",
|
||||
"machine",
|
||||
"vcpu",
|
||||
"memory",
|
||||
"firmware",
|
||||
"nvram",
|
||||
"bootmenu",
|
||||
"boot_order",
|
||||
"disks",
|
||||
"media",
|
||||
"media_iso",
|
||||
"snapshots",
|
||||
"networks",
|
||||
"console_type",
|
||||
"console_port",
|
||||
"console_keymap",
|
||||
"console_listener_address",
|
||||
"video_model",
|
||||
"guest_agent_ready",
|
||||
"autostart",
|
||||
]
|
||||
|
||||
|
||||
class FlavorSerializer(serializers.ModelSerializer):
|
||||
|
||||
class Meta:
|
||||
model = Flavor
|
||||
fields = ['label', 'memory', 'vcpu', 'disk']
|
||||
fields = ["label", "memory", "vcpu", "disk"]
|
||||
|
||||
|
||||
class CreateInstanceSerializer(serializers.ModelSerializer):
|
||||
firmware_choices = (
|
||||
('', 'BIOS'),
|
||||
#('UEFI', 'UEFI'),
|
||||
("", "BIOS"),
|
||||
# ('UEFI', 'UEFI'),
|
||||
)
|
||||
firmware = serializers.ChoiceField(choices = firmware_choices)
|
||||
graphics = serializers.CharField(initial='vnc')
|
||||
video = serializers.CharField(initial='vga')
|
||||
storage = serializers.CharField(initial='default')
|
||||
cache_mode = serializers.CharField(initial='none')
|
||||
firmware = serializers.ChoiceField(choices=firmware_choices)
|
||||
graphics = serializers.CharField(initial="vnc")
|
||||
video = serializers.CharField(initial="vga")
|
||||
storage = serializers.CharField(initial="default")
|
||||
cache_mode = serializers.CharField(initial="none")
|
||||
virtio = serializers.BooleanField(initial=True)
|
||||
qemu_ga = serializers.BooleanField(initial=True)
|
||||
|
||||
class Meta:
|
||||
model = CreateInstance
|
||||
fields = [
|
||||
'name',
|
||||
'firmware',
|
||||
'vcpu',
|
||||
'vcpu_mode',
|
||||
'memory',
|
||||
'networks',
|
||||
'mac',
|
||||
'nwfilter',
|
||||
'storage',
|
||||
'hdd_size',
|
||||
'cache_mode',
|
||||
'meta_prealloc',
|
||||
'virtio',
|
||||
'qemu_ga',
|
||||
'console_pass',
|
||||
'graphics',
|
||||
'video',
|
||||
'listener_addr'
|
||||
"name",
|
||||
"firmware",
|
||||
"vcpu",
|
||||
"vcpu_mode",
|
||||
"memory",
|
||||
"networks",
|
||||
"mac",
|
||||
"nwfilter",
|
||||
"storage",
|
||||
"hdd_size",
|
||||
"cache_mode",
|
||||
"meta_prealloc",
|
||||
"virtio",
|
||||
"qemu_ga",
|
||||
"console_pass",
|
||||
"graphics",
|
||||
"video",
|
||||
"listener_addr",
|
||||
]
|
||||
|
||||
|
||||
|
@ -95,6 +93,17 @@ class MigrateSerializer(serializers.ModelSerializer):
|
|||
instance = Instance.objects.all().prefetch_related("userinstance_set")
|
||||
live = serializers.BooleanField(initial=True)
|
||||
xml_del = serializers.BooleanField(initial=True)
|
||||
|
||||
class Meta:
|
||||
model = MigrateInstance
|
||||
fields = ['instance', 'target_compute', 'live', 'xml_del', 'offline', 'autoconverge', 'compress', 'postcopy', 'unsafe']
|
||||
fields = [
|
||||
"instance",
|
||||
"target_compute",
|
||||
"live",
|
||||
"xml_del",
|
||||
"offline",
|
||||
"autoconverge",
|
||||
"compress",
|
||||
"postcopy",
|
||||
"unsafe",
|
||||
]
|
||||
|
|
|
@ -5,7 +5,15 @@ from computes import utils
|
|||
from instances.models import Flavor, Instance
|
||||
from instances.views import get_instance
|
||||
from instances.utils import migrate_instance
|
||||
from instances.views import poweron, powercycle, poweroff, force_off, suspend, resume, destroy as instance_destroy
|
||||
from instances.views import (
|
||||
poweron,
|
||||
powercycle,
|
||||
poweroff,
|
||||
force_off,
|
||||
suspend,
|
||||
resume,
|
||||
destroy as instance_destroy,
|
||||
)
|
||||
|
||||
from rest_framework import status, viewsets, permissions
|
||||
from rest_framework.decorators import action
|
||||
|
@ -14,26 +22,39 @@ from rest_framework.response import Response
|
|||
from vrtManager import util
|
||||
|
||||
from vrtManager.create import wvmCreate
|
||||
from .serializers import FlavorSerializer, InstanceSerializer, InstanceDetailsSerializer, MigrateSerializer, CreateInstanceSerializer
|
||||
from .serializers import (
|
||||
FlavorSerializer,
|
||||
InstanceSerializer,
|
||||
InstanceDetailsSerializer,
|
||||
MigrateSerializer,
|
||||
CreateInstanceSerializer,
|
||||
)
|
||||
|
||||
|
||||
class InstancesViewSet(viewsets.ViewSet):
|
||||
"""
|
||||
A simple ViewSet for listing or retrieving ALL/Compute Instances.
|
||||
"""
|
||||
|
||||
permission_classes = [permissions.IsAuthenticated]
|
||||
|
||||
def list(self, request):
|
||||
|
||||
if request.user.is_superuser or request.user.has_perm("instances.view_instances"):
|
||||
queryset = Instance.objects.all().prefetch_related("userinstance_set")
|
||||
else:
|
||||
queryset = Instance.objects.filter(userinstance__user=request.user).prefetch_related("userinstance_set")
|
||||
serializer = InstanceSerializer(queryset, many=True, context={'request': request})
|
||||
serializer = InstanceSerializer(
|
||||
queryset,
|
||||
many=True,
|
||||
context={"request": request}
|
||||
)
|
||||
|
||||
return Response(serializer.data)
|
||||
|
||||
def retrieve(self, request, pk=None, compute_pk=None):
|
||||
queryset = get_instance(request.user, pk)
|
||||
serializer = InstanceSerializer(queryset, context={'request': request})
|
||||
serializer = InstanceSerializer(queryset, context={"request": request})
|
||||
|
||||
return Response(serializer.data)
|
||||
|
||||
|
@ -42,94 +63,110 @@ class InstanceViewSet(viewsets.ViewSet):
|
|||
"""
|
||||
A simple ViewSet for listing or retrieving Compute Instances.
|
||||
"""
|
||||
#serializer_class = CreateInstanceSerializer
|
||||
|
||||
# serializer_class = CreateInstanceSerializer
|
||||
permission_classes = [permissions.IsAuthenticated]
|
||||
|
||||
def list(self, request, compute_pk=None):
|
||||
compute = get_object_or_404(Compute, pk=compute_pk)
|
||||
|
||||
utils.refresh_instance_database(compute)
|
||||
|
||||
queryset = Instance.objects.filter(compute=compute).prefetch_related("userinstance_set")
|
||||
serializer = InstanceSerializer(queryset, many=True, context={'request': request})
|
||||
|
||||
return Response(serializer.data)
|
||||
|
||||
queryset = Instance.objects.filter(compute=compute).prefetch_related("userinstance_set")
|
||||
serializer = InstanceSerializer(
|
||||
queryset,
|
||||
many=True,
|
||||
context={"request": request}
|
||||
)
|
||||
|
||||
return Response(serializer.data)
|
||||
|
||||
def retrieve(self, request, pk=None, compute_pk=None):
|
||||
queryset = get_instance(request.user, pk)
|
||||
serializer = InstanceDetailsSerializer(queryset, context={'request': request})
|
||||
serializer = InstanceDetailsSerializer(queryset, context={"request": request})
|
||||
|
||||
return Response(serializer.data)
|
||||
|
||||
|
||||
def destroy(self, request, pk=None, compute_pk=None):
|
||||
instance_destroy(request, pk)
|
||||
return Response({'status': 'Instance is destroyed'})
|
||||
return Response({"status": "Instance is destroyed"})
|
||||
|
||||
@action(detail=True, methods=['post'])
|
||||
@action(detail=True, methods=["post"])
|
||||
def poweron(self, request, pk=None):
|
||||
poweron(request, pk)
|
||||
return Response({'status': 'poweron command send'})
|
||||
|
||||
@action(detail=True, methods=['post'])
|
||||
return Response({"status": "poweron command send"})
|
||||
|
||||
@action(detail=True, methods=["post"])
|
||||
def poweroff(self, request, pk=None):
|
||||
poweroff(request, pk)
|
||||
return Response({'status': 'poweroff command send'})
|
||||
|
||||
@action(detail=True, methods=['post'])
|
||||
return Response({"status": "poweroff command send"})
|
||||
|
||||
@action(detail=True, methods=["post"])
|
||||
def powercycle(self, request, pk=None):
|
||||
powercycle(request, pk)
|
||||
return Response({'status': 'powercycle command send'})
|
||||
|
||||
@action(detail=True, methods=['post'])
|
||||
return Response({"status": "powercycle command send"})
|
||||
|
||||
@action(detail=True, methods=["post"])
|
||||
def forceoff(self, request, pk=None):
|
||||
force_off(request, pk)
|
||||
return Response({'status': 'force off command send'})
|
||||
return Response({"status": "force off command send"})
|
||||
|
||||
@action(detail=True, methods=['post'])
|
||||
@action(detail=True, methods=["post"])
|
||||
def suspend(self, request, pk=None):
|
||||
suspend(request, pk)
|
||||
return Response({'status': 'suspend command send'})
|
||||
return Response({"status": "suspend command send"})
|
||||
|
||||
@action(detail=True, methods=['post'])
|
||||
@action(detail=True, methods=["post"])
|
||||
def resume(self, request, pk=None):
|
||||
resume(request, pk)
|
||||
return Response({'status': 'resume command send'})
|
||||
|
||||
return Response({"status": "resume command send"})
|
||||
|
||||
|
||||
class MigrateViewSet(viewsets.ViewSet):
|
||||
"""
|
||||
A viewset for migrating instances.
|
||||
"""
|
||||
|
||||
serializer_class = MigrateSerializer
|
||||
queryset = ""
|
||||
|
||||
def create(self, request):
|
||||
serializer = MigrateSerializer(data=request.data)
|
||||
if serializer.is_valid():
|
||||
instance = serializer.validated_data['instance']
|
||||
target_host = serializer.validated_data['target_compute']
|
||||
live = serializer.validated_data['live']
|
||||
unsafe = serializer.validated_data['unsafe']
|
||||
xml_del = serializer.validated_data['xml_del']
|
||||
offline = serializer.validated_data['offline']
|
||||
autoconverge = serializer.validated_data['autoconverge']
|
||||
postcopy = serializer.validated_data['postcopy']
|
||||
compress = serializer.validated_data['compress']
|
||||
instance = serializer.validated_data["instance"]
|
||||
target_host = serializer.validated_data["target_compute"]
|
||||
live = serializer.validated_data["live"]
|
||||
unsafe = serializer.validated_data["unsafe"]
|
||||
xml_del = serializer.validated_data["xml_del"]
|
||||
offline = serializer.validated_data["offline"]
|
||||
autoconverge = serializer.validated_data["autoconverge"]
|
||||
postcopy = serializer.validated_data["postcopy"]
|
||||
compress = serializer.validated_data["compress"]
|
||||
|
||||
migrate_instance(target_host, instance, request.user, live, unsafe, xml_del, offline, autoconverge, compress, postcopy)
|
||||
migrate_instance(
|
||||
target_host,
|
||||
instance,
|
||||
request.user,
|
||||
live,
|
||||
unsafe,
|
||||
xml_del,
|
||||
offline,
|
||||
autoconverge,
|
||||
compress,
|
||||
postcopy,
|
||||
)
|
||||
|
||||
return Response({'status': 'instance migrate is started'})
|
||||
return Response({"status": "instance migrate is started"})
|
||||
else:
|
||||
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
class FlavorViewSet(viewsets.ModelViewSet):
|
||||
"""
|
||||
API endpoint that allows flavor to be viewed.
|
||||
"""
|
||||
queryset = Flavor.objects.all().order_by('id')
|
||||
|
||||
queryset = Flavor.objects.all().order_by("id")
|
||||
serializer_class = FlavorSerializer
|
||||
permission_classes = [permissions.IsAuthenticated]
|
||||
|
||||
|
@ -138,15 +175,15 @@ class CreateInstanceViewSet(viewsets.ViewSet):
|
|||
"""
|
||||
A viewset for creating instances.
|
||||
"""
|
||||
|
||||
serializer_class = CreateInstanceSerializer
|
||||
queryset = ""
|
||||
|
||||
|
||||
def create(self, request, compute_pk=None, arch=None, machine=None):
|
||||
serializer = CreateInstanceSerializer(data=request.data,
|
||||
context = {'compute_pk': compute_pk,
|
||||
'arch': arch,
|
||||
'machine': machine
|
||||
})
|
||||
serializer = CreateInstanceSerializer(
|
||||
data=request.data,
|
||||
context={"compute_pk": compute_pk, "arch": arch, "machine": machine},
|
||||
)
|
||||
if serializer.is_valid():
|
||||
volume_list = []
|
||||
default_bus = app_settings.INSTANCE_VOLUME_DEFAULT_BUS
|
||||
|
@ -166,14 +203,14 @@ class CreateInstanceViewSet(viewsets.ViewSet):
|
|||
)
|
||||
|
||||
path = conn.create_volume(
|
||||
serializer.validated_data['storage'],
|
||||
serializer.validated_data['name'],
|
||||
serializer.validated_data['hdd_size'],
|
||||
default_disk_format,
|
||||
serializer.validated_data['meta_prealloc'],
|
||||
default_disk_owner_uid,
|
||||
default_disk_owner_gid,
|
||||
)
|
||||
serializer.validated_data["storage"],
|
||||
serializer.validated_data["name"],
|
||||
serializer.validated_data["hdd_size"],
|
||||
default_disk_format,
|
||||
serializer.validated_data["meta_prealloc"],
|
||||
default_disk_owner_uid,
|
||||
default_disk_owner_gid,
|
||||
)
|
||||
volume = {}
|
||||
firmware = {}
|
||||
volume["device"] = "disk"
|
||||
|
@ -189,8 +226,8 @@ class CreateInstanceViewSet(viewsets.ViewSet):
|
|||
|
||||
volume_list.append(volume)
|
||||
|
||||
if "UEFI" in serializer.validated_data['firmware']:
|
||||
firmware["loader"] = serializer.validated_data['firmware'].split(":")[1].strip()
|
||||
if "UEFI" in serializer.validated_data["firmware"]:
|
||||
firmware["loader"] = (serializer.validated_data["firmware"].split(":")[1].strip())
|
||||
firmware["secure"] = "no"
|
||||
firmware["readonly"] = "yes"
|
||||
firmware["type"] = "pflash"
|
||||
|
@ -199,26 +236,26 @@ class CreateInstanceViewSet(viewsets.ViewSet):
|
|||
firmware["secure"] = "yes"
|
||||
|
||||
ret = conn.create_instance(
|
||||
name=serializer.validated_data['name'],
|
||||
memory=serializer.validated_data['memory'],
|
||||
vcpu=serializer.validated_data['vcpu'],
|
||||
vcpu_mode=serializer.validated_data['vcpu_mode'],
|
||||
name=serializer.validated_data["name"],
|
||||
memory=serializer.validated_data["memory"],
|
||||
vcpu=serializer.validated_data["vcpu"],
|
||||
vcpu_mode=serializer.validated_data["vcpu_mode"],
|
||||
uuid=util.randomUUID(),
|
||||
arch=arch,
|
||||
machine=machine,
|
||||
firmware=firmware,
|
||||
volumes=volume_list,
|
||||
networks=serializer.validated_data['networks'],
|
||||
nwfilter=serializer.validated_data['nwfilter'],
|
||||
graphics=serializer.validated_data['graphics'],
|
||||
virtio=serializer.validated_data['virtio'],
|
||||
listener_addr=serializer.validated_data['listener_addr'],
|
||||
video=serializer.validated_data['video'],
|
||||
console_pass=serializer.validated_data['console_pass'],
|
||||
mac=serializer.validated_data['mac'],
|
||||
qemu_ga=serializer.validated_data['qemu_ga'],
|
||||
networks=serializer.validated_data["networks"],
|
||||
nwfilter=serializer.validated_data["nwfilter"],
|
||||
graphics=serializer.validated_data["graphics"],
|
||||
virtio=serializer.validated_data["virtio"],
|
||||
listener_addr=serializer.validated_data["listener_addr"],
|
||||
video=serializer.validated_data["video"],
|
||||
console_pass=serializer.validated_data["console_pass"],
|
||||
mac=serializer.validated_data["mac"],
|
||||
qemu_ga=serializer.validated_data["qemu_ga"],
|
||||
)
|
||||
msg = f"Instance {serializer.validated_data['name']} is created"
|
||||
return Response({'status': msg })
|
||||
return Response({"status": msg})
|
||||
else:
|
||||
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
|
|
@ -3,17 +3,19 @@ from django.db.models.signals import post_migrate
|
|||
|
||||
|
||||
def migrate_can_clone_instances(sender, **kwargs):
|
||||
'''
|
||||
"""
|
||||
Migrate can clone instances user attribute to permission
|
||||
'''
|
||||
"""
|
||||
from django.contrib.auth.models import Permission, User
|
||||
|
||||
plan = kwargs.get('plan', [])
|
||||
plan = kwargs.get("plan", [])
|
||||
for migration, rolled_back in plan:
|
||||
if migration.app_label == 'instances' and migration.name == '0002_permissionset' and not rolled_back:
|
||||
if (migration.app_label == "instances" and migration.name == "0002_permissionset" and not rolled_back):
|
||||
users = User.objects.all()
|
||||
permission = Permission.objects.get(codename='clone_instances')
|
||||
print('\033[1m* \033[92mMigrating can_clone_instaces user attribute to permission\033[0m')
|
||||
permission = Permission.objects.get(codename="clone_instances")
|
||||
print(
|
||||
"\033[1m* \033[92mMigrating can_clone_instaces user attribute to permission\033[0m"
|
||||
)
|
||||
for user in users:
|
||||
if user.userattributes:
|
||||
if user.userattributes.can_clone_instances:
|
||||
|
@ -22,25 +24,26 @@ def migrate_can_clone_instances(sender, **kwargs):
|
|||
|
||||
|
||||
def apply_passwordless_console(sender, **kwargs):
|
||||
'''
|
||||
"""
|
||||
Apply new passwordless_console permission for all users
|
||||
'''
|
||||
"""
|
||||
from django.contrib.auth import get_user_model
|
||||
from django.contrib.auth.models import Permission
|
||||
|
||||
User = get_user_model()
|
||||
plan = kwargs.get('plan', [])
|
||||
plan = kwargs.get("plan", [])
|
||||
for migration, rolled_back in plan:
|
||||
if migration.app_label == 'instances' and migration.name == '0009_auto_20200717_0524' and not rolled_back:
|
||||
print('\033[1m* \033[92mApplying permission passwordless_console for all users\033[0m')
|
||||
if (migration.app_label == "instances" and migration.name == "0009_auto_20200717_0524" and not rolled_back):
|
||||
print("\033[1m* \033[92mApplying permission passwordless_console for all users\033[0m")
|
||||
users = User.objects.all()
|
||||
permission = Permission.objects.get(codename='passwordless_console')
|
||||
permission = Permission.objects.get(codename="passwordless_console")
|
||||
for user in users:
|
||||
user.user_permissions.add(permission)
|
||||
|
||||
|
||||
class InstancesConfig(AppConfig):
|
||||
name = 'instances'
|
||||
verbose_name = 'Instances'
|
||||
name = "instances"
|
||||
verbose_name = "Instances"
|
||||
|
||||
def ready(self):
|
||||
post_migrate.connect(migrate_can_clone_instances, sender=self)
|
||||
|
|
|
@ -12,7 +12,7 @@ from .models import CreateInstance, Flavor
|
|||
class FlavorForm(forms.ModelForm):
|
||||
class Meta:
|
||||
model = Flavor
|
||||
fields = '__all__'
|
||||
fields = "__all__"
|
||||
|
||||
|
||||
class ConsoleForm(forms.Form):
|
||||
|
@ -20,17 +20,25 @@ class ConsoleForm(forms.Form):
|
|||
listen_on = forms.ChoiceField()
|
||||
generate_password = forms.BooleanField(required=False)
|
||||
clear_password = forms.BooleanField(required=False)
|
||||
password = forms.CharField(widget=forms.PasswordInput(render_value=True), required=False)
|
||||
password = forms.CharField(
|
||||
widget=forms.PasswordInput(render_value=True),
|
||||
required=False
|
||||
)
|
||||
clear_keymap = forms.BooleanField(required=False)
|
||||
keymap = forms.ChoiceField(required=False)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(ConsoleForm, self).__init__(*args, **kwargs)
|
||||
type_choices = ((c, c) for c in AppSettings.objects.get(key="QEMU_CONSOLE_DEFAULT_TYPE").choices_as_list())
|
||||
keymap_choices = [('auto', 'Auto')] + list((c, c) for c in QEMU_KEYMAPS)
|
||||
self.fields['type'] = forms.ChoiceField(choices=type_choices)
|
||||
self.fields['listen_on'] = forms.ChoiceField(choices=QEMU_CONSOLE_LISTENER_ADDRESSES)
|
||||
self.fields['keymap'] = forms.ChoiceField(choices=keymap_choices)
|
||||
type_choices = (
|
||||
(c, c)
|
||||
for c in AppSettings.objects.get(key="QEMU_CONSOLE_DEFAULT_TYPE").choices_as_list()
|
||||
)
|
||||
keymap_choices = [("auto", "Auto")] + list((c, c) for c in QEMU_KEYMAPS)
|
||||
self.fields["type"] = forms.ChoiceField(choices=type_choices)
|
||||
self.fields["listen_on"] = forms.ChoiceField(
|
||||
choices=QEMU_CONSOLE_LISTENER_ADDRESSES
|
||||
)
|
||||
self.fields["keymap"] = forms.ChoiceField(choices=keymap_choices)
|
||||
|
||||
|
||||
class NewVMForm(forms.ModelForm):
|
||||
|
@ -57,12 +65,16 @@ class NewVMForm(forms.ModelForm):
|
|||
# listener_addr = forms.ChoiceField(required=True, widget=forms.RadioSelect, choices=QEMU_CONSOLE_LISTENER_ADDRESSES)
|
||||
class Meta:
|
||||
model = CreateInstance
|
||||
fields = '__all__'
|
||||
exclude = ['compute']
|
||||
fields = "__all__"
|
||||
exclude = ["compute"]
|
||||
|
||||
def clean_name(self):
|
||||
name = self.cleaned_data['name']
|
||||
have_symbol = re.match('^[a-zA-Z0-9._-]+$', name)
|
||||
name = self.cleaned_data["name"]
|
||||
have_symbol = re.match("^[a-zA-Z0-9._-]+$", name)
|
||||
if not have_symbol:
|
||||
raise forms.ValidationError(_('The name of the virtual machine must not contain any special characters'))
|
||||
raise forms.ValidationError(
|
||||
_(
|
||||
"The name of the virtual machine must not contain any special characters"
|
||||
)
|
||||
)
|
||||
return name
|
||||
|
|
|
@ -10,10 +10,10 @@ from vrtManager.instance import wvmInstance
|
|||
|
||||
|
||||
class Flavor(models.Model):
|
||||
label = models.CharField(_('label'), max_length=12, unique=True)
|
||||
memory = models.IntegerField(_('memory'))
|
||||
vcpu = models.IntegerField(_('vcpu'))
|
||||
disk = models.IntegerField(_('disk'))
|
||||
label = models.CharField(_("label"), max_length=12, unique=True)
|
||||
memory = models.IntegerField(_("memory"))
|
||||
vcpu = models.IntegerField(_("vcpu"))
|
||||
disk = models.IntegerField(_("disk"))
|
||||
|
||||
def __str__(self):
|
||||
return self.label
|
||||
|
@ -21,21 +21,21 @@ class Flavor(models.Model):
|
|||
|
||||
class InstanceManager(models.Manager):
|
||||
def get_queryset(self):
|
||||
return super().get_queryset().select_related('compute')
|
||||
return super().get_queryset().select_related("compute")
|
||||
|
||||
|
||||
class Instance(models.Model):
|
||||
compute = models.ForeignKey(Compute, on_delete=models.CASCADE)
|
||||
name = models.CharField(_('name'), max_length=120, db_index=True)
|
||||
uuid = models.CharField(_('uuid'), max_length=36, db_index=True)
|
||||
is_template = models.BooleanField(_('is template'), default=False)
|
||||
created = models.DateTimeField(_('created'), auto_now_add=True)
|
||||
drbd = models.CharField(_('drbd'), max_length=24, default="None")
|
||||
name = models.CharField(_("name"), max_length=120, db_index=True)
|
||||
uuid = models.CharField(_("uuid"), max_length=36, db_index=True)
|
||||
is_template = models.BooleanField(_("is template"), default=False)
|
||||
created = models.DateTimeField(_("created"), auto_now_add=True)
|
||||
drbd = models.CharField(_("drbd"), max_length=24, default="None")
|
||||
|
||||
objects = InstanceManager()
|
||||
|
||||
def __str__(self):
|
||||
return f'{self.compute}/{self.name}'
|
||||
return f"{self.compute}/{self.name}"
|
||||
|
||||
@cached_property
|
||||
def proxy(self):
|
||||
|
@ -173,7 +173,7 @@ class Instance(models.Model):
|
|||
|
||||
@cached_property
|
||||
def snapshots(self):
|
||||
return sorted(self.proxy.get_snapshot(), reverse=True, key=lambda k: k['date'])
|
||||
return sorted(self.proxy.get_snapshot(), reverse=True, key=lambda k: k["date"])
|
||||
|
||||
@cached_property
|
||||
def inst_xml(self):
|
||||
|
@ -209,35 +209,59 @@ class Instance(models.Model):
|
|||
|
||||
|
||||
class MigrateInstance(models.Model):
|
||||
instance = models.ForeignKey(Instance, related_name='source_host', on_delete=models.DO_NOTHING)
|
||||
target_compute = models.ForeignKey(Compute, related_name='target_host', on_delete=models.DO_NOTHING)
|
||||
instance = models.ForeignKey(
|
||||
Instance,
|
||||
related_name="source_host",
|
||||
on_delete=models.DO_NOTHING
|
||||
)
|
||||
target_compute = models.ForeignKey(
|
||||
Compute,
|
||||
related_name="target_host",
|
||||
on_delete=models.DO_NOTHING
|
||||
)
|
||||
|
||||
live = models.BooleanField(_('Live'))
|
||||
xml_del = models.BooleanField(_('Undefine XML'), default=True)
|
||||
offline = models.BooleanField(_('Offline'))
|
||||
autoconverge = models.BooleanField(_('Auto Converge'), default=True)
|
||||
compress = models.BooleanField(_('Compress'), default=False)
|
||||
postcopy = models.BooleanField(_('Post Copy'), default=False)
|
||||
unsafe = models.BooleanField(_('Unsafe'), default=False)
|
||||
live = models.BooleanField(_("Live"))
|
||||
xml_del = models.BooleanField(_("Undefine XML"), default=True)
|
||||
offline = models.BooleanField(_("Offline"))
|
||||
autoconverge = models.BooleanField(_("Auto Converge"), default=True)
|
||||
compress = models.BooleanField(_("Compress"), default=False)
|
||||
postcopy = models.BooleanField(_("Post Copy"), default=False)
|
||||
unsafe = models.BooleanField(_("Unsafe"), default=False)
|
||||
|
||||
class Meta:
|
||||
managed = False
|
||||
|
||||
|
||||
class CreateInstance(models.Model):
|
||||
compute = models.ForeignKey(Compute, related_name='host', on_delete=models.DO_NOTHING)
|
||||
name = models.CharField(max_length=64, error_messages={'required': _('No Virtual Machine name has been entered')})
|
||||
compute = models.ForeignKey(
|
||||
Compute,
|
||||
related_name="host",
|
||||
on_delete=models.DO_NOTHING
|
||||
)
|
||||
name = models.CharField(
|
||||
max_length=64,
|
||||
error_messages={"required": _("No Virtual Machine name has been entered")},
|
||||
)
|
||||
firmware = models.CharField(max_length=64)
|
||||
vcpu = models.IntegerField(error_messages={'required': _('No VCPU has been entered')})
|
||||
vcpu = models.IntegerField(
|
||||
error_messages={"required": _("No VCPU has been entered")}
|
||||
)
|
||||
vcpu_mode = models.CharField(max_length=20, blank=True)
|
||||
disk = models.IntegerField(blank=True)
|
||||
memory = models.IntegerField(error_messages={'required': _('No RAM size has been entered')})
|
||||
networks = models.CharField(max_length=256, error_messages={'required': _('No Network pool has been choosen')})
|
||||
memory = models.IntegerField(
|
||||
error_messages={"required": _("No RAM size has been entered")}
|
||||
)
|
||||
networks = models.CharField(
|
||||
max_length=256,
|
||||
error_messages={"required": _("No Network pool has been choosen")},
|
||||
)
|
||||
nwfilter = models.CharField(max_length=256, blank=True)
|
||||
storage = models.CharField(max_length=256, blank=True)
|
||||
template = models.CharField(max_length=256, blank=True)
|
||||
images = models.CharField(max_length=256, blank=True)
|
||||
cache_mode = models.CharField(max_length=16, error_messages={'required': _('Please select HDD cache mode')})
|
||||
cache_mode = models.CharField(
|
||||
max_length=16, error_messages={"required": _("Please select HDD cache mode")}
|
||||
)
|
||||
hdd_size = models.IntegerField(blank=True)
|
||||
meta_prealloc = models.BooleanField(default=False, blank=True)
|
||||
virtio = models.BooleanField(default=True)
|
||||
|
@ -246,9 +270,15 @@ class CreateInstance(models.Model):
|
|||
console_pass = models.CharField(max_length=64, blank=True)
|
||||
add_cdrom = models.CharField(max_length=16)
|
||||
add_input = models.CharField(max_length=16)
|
||||
graphics = models.CharField(max_length=16, error_messages={'required': _('Please select a graphics type')})
|
||||
video = models.CharField(max_length=16, error_messages={'required': _('Please select a video driver')})
|
||||
listener_addr = models.CharField(max_length=20, choices=QEMU_CONSOLE_LISTENER_ADDRESSES)
|
||||
graphics = models.CharField(
|
||||
max_length=16, error_messages={"required": _("Please select a graphics type")}
|
||||
)
|
||||
video = models.CharField(
|
||||
max_length=16, error_messages={"required": _("Please select a video driver")}
|
||||
)
|
||||
listener_addr = models.CharField(
|
||||
max_length=20, choices=QEMU_CONSOLE_LISTENER_ADDRESSES
|
||||
)
|
||||
|
||||
class Meta:
|
||||
managed = False
|
||||
|
@ -258,13 +288,14 @@ class PermissionSet(models.Model):
|
|||
"""
|
||||
Dummy model for holding set of permissions we need to be automatically added by Django
|
||||
"""
|
||||
|
||||
class Meta:
|
||||
default_permissions = ()
|
||||
permissions = [
|
||||
('clone_instances', 'Can clone instances'),
|
||||
('passwordless_console', _('Can access console without password')),
|
||||
('view_instances', 'Can view instances'),
|
||||
('snapshot_instances', 'Can snapshot instances'),
|
||||
("clone_instances", "Can clone instances"),
|
||||
("passwordless_console", _("Can access console without password")),
|
||||
("view_instances", "Can view instances"),
|
||||
("snapshot_instances", "Can snapshot instances"),
|
||||
]
|
||||
|
||||
managed = False
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -2,69 +2,83 @@ from django.urls import path
|
|||
|
||||
from . import views
|
||||
|
||||
app_name = 'instances'
|
||||
app_name = "instances"
|
||||
|
||||
urlpatterns = [
|
||||
path('', views.index, name='index'),
|
||||
path('flavor/create/', views.flavor_create, name='flavor_create'),
|
||||
path('flavor/<int:pk>/update/', views.flavor_update, name='flavor_update'),
|
||||
path('flavor/<int:pk>/delete/', views.flavor_delete, name='flavor_delete'),
|
||||
path('<int:pk>/', views.instance, name='instance'),
|
||||
path('<int:pk>/poweron/', views.poweron, name='poweron'),
|
||||
path('<int:pk>/powercycle/', views.powercycle, name='powercycle'),
|
||||
path('<int:pk>/poweroff/', views.poweroff, name='poweroff'),
|
||||
path('<int:pk>/suspend/', views.suspend, name='suspend'),
|
||||
path('<int:pk>/resume/', views.resume, name='resume'),
|
||||
path('<int:pk>/force_off/', views.force_off, name='force_off'),
|
||||
path('<int:pk>/destroy/', views.destroy, name='destroy'),
|
||||
path('<int:pk>/migrate/', views.migrate, name='migrate'),
|
||||
path('<int:pk>/status/', views.status, name='status'),
|
||||
path('<int:pk>/stats/', views.stats, name='stats'),
|
||||
path('<int:pk>/osinfo/', views.osinfo, name='osinfo'),
|
||||
path('<int:pk>/rootpasswd/', views.set_root_pass, name='rootpasswd'),
|
||||
path('<int:pk>/add_public_key/', views.add_public_key, name='add_public_key'),
|
||||
path('<int:pk>/resizevm_cpu/', views.resizevm_cpu, name='resizevm_cpu'),
|
||||
path('<int:pk>/resize_memory/', views.resize_memory, name='resize_memory'),
|
||||
path('<int:pk>/resize_disk/', views.resize_disk, name='resize_disk'),
|
||||
path('<int:pk>/add_new_vol/', views.add_new_vol, name='add_new_vol'),
|
||||
path('<int:pk>/delete_vol/', views.delete_vol, name='delete_vol'),
|
||||
path('<int:pk>/add_owner/', views.add_owner, name='add_owner'),
|
||||
path('<int:pk>/add_existing_vol/', views.add_existing_vol, name='add_existing_vol'),
|
||||
path('<int:pk>/edit_volume/', views.edit_volume, name='edit_volume'),
|
||||
path('<int:pk>/detach_vol/', views.detach_vol, name='detach_vol'),
|
||||
path('<int:pk>/add_cdrom/', views.add_cdrom, name='add_cdrom'),
|
||||
path('<int:pk>/detach_cdrom/<str:dev>/', views.detach_cdrom, name='detach_cdrom'),
|
||||
path('<int:pk>/unmount_iso/', views.unmount_iso, name='unmount_iso'),
|
||||
path('<int:pk>/mount_iso/', views.mount_iso, name='mount_iso'),
|
||||
path('<int:pk>/snapshot/', views.snapshot, name='snapshot'),
|
||||
path('<int:pk>/delete_snapshot/', views.delete_snapshot, name='delete_snapshot'),
|
||||
path('<int:pk>/revert_snapshot/', views.revert_snapshot, name='revert_snapshot'),
|
||||
path('<int:pk>/set_vcpu/', views.set_vcpu, name='set_vcpu'),
|
||||
path('<int:pk>/set_vcpu_hotplug/', views.set_vcpu_hotplug, name='set_vcpu_hotplug'),
|
||||
path('<int:pk>/set_autostart/', views.set_autostart, name='set_autostart'),
|
||||
path('<int:pk>/unset_autostart/', views.unset_autostart, name='unset_autostart'),
|
||||
path('<int:pk>/set_bootmenu/', views.set_bootmenu, name='set_bootmenu'),
|
||||
path('<int:pk>/unset_bootmenu/', views.unset_bootmenu, name='unset_bootmenu'),
|
||||
path('<int:pk>/set_bootorder/', views.set_bootorder, name='set_bootorder'),
|
||||
path('<int:pk>/change_xml/', views.change_xml, name='change_xml'),
|
||||
path('<int:pk>/set_guest_agent/', views.set_guest_agent, name='set_guest_agent'),
|
||||
path('<int:pk>/set_video_model/', views.set_video_model, name='set_video_model'),
|
||||
path('<int:pk>/change_network/', views.change_network, name='change_network'),
|
||||
path('<int:pk>/add_network/', views.add_network, name='add_network'),
|
||||
path('<int:pk>/delete_network/', views.delete_network, name='delete_network'),
|
||||
path('<int:pk>/set_link_state/', views.set_link_state, name='set_link_state'),
|
||||
path('<int:pk>/set_qos/', views.set_qos, name='set_qos'),
|
||||
path('<int:pk>/unset_qos/', views.unset_qos, name='unset_qos'),
|
||||
path('<int:pk>/del_owner/', views.del_owner, name='del_owner'), # no links to this one???
|
||||
path('<int:pk>/clone/', views.clone, name='clone'),
|
||||
path('<int:pk>/update_console/', views.update_console, name='update_console'),
|
||||
path('<int:pk>/change_options/', views.change_options, name='change_options'),
|
||||
path('<int:pk>/getvvfile/', views.getvvfile, name='getvvfile'), # no links to this one???
|
||||
path('create/<int:compute_id>/', views.create_instance_select_type, name='create_instance_select_type'),
|
||||
path('create/<int:compute_id>/<str:arch>/<str:machine>/', views.create_instance, name='create_instance'),
|
||||
path('guess_mac_address/<vname>/', views.guess_mac_address, name='guess_mac_address'),
|
||||
path('guess_clone_name/', views.guess_clone_name, name='guess_clone_name'),
|
||||
path('random_mac_address/', views.random_mac_address, name='random_mac_address'),
|
||||
path('check_instance/<vname>/', views.check_instance, name='check_instance'),
|
||||
path('<int:pk>/sshkeys/', views.sshkeys, name='sshkeys'),
|
||||
path("", views.index, name="index"),
|
||||
path("flavor/create/", views.flavor_create, name="flavor_create"),
|
||||
path("flavor/<int:pk>/update/", views.flavor_update, name="flavor_update"),
|
||||
path("flavor/<int:pk>/delete/", views.flavor_delete, name="flavor_delete"),
|
||||
path("<int:pk>/", views.instance, name="instance"),
|
||||
path("<int:pk>/poweron/", views.poweron, name="poweron"),
|
||||
path("<int:pk>/powercycle/", views.powercycle, name="powercycle"),
|
||||
path("<int:pk>/poweroff/", views.poweroff, name="poweroff"),
|
||||
path("<int:pk>/suspend/", views.suspend, name="suspend"),
|
||||
path("<int:pk>/resume/", views.resume, name="resume"),
|
||||
path("<int:pk>/force_off/", views.force_off, name="force_off"),
|
||||
path("<int:pk>/destroy/", views.destroy, name="destroy"),
|
||||
path("<int:pk>/migrate/", views.migrate, name="migrate"),
|
||||
path("<int:pk>/status/", views.status, name="status"),
|
||||
path("<int:pk>/stats/", views.stats, name="stats"),
|
||||
path("<int:pk>/osinfo/", views.osinfo, name="osinfo"),
|
||||
path("<int:pk>/rootpasswd/", views.set_root_pass, name="rootpasswd"),
|
||||
path("<int:pk>/add_public_key/", views.add_public_key, name="add_public_key"),
|
||||
path("<int:pk>/resizevm_cpu/", views.resizevm_cpu, name="resizevm_cpu"),
|
||||
path("<int:pk>/resize_memory/", views.resize_memory, name="resize_memory"),
|
||||
path("<int:pk>/resize_disk/", views.resize_disk, name="resize_disk"),
|
||||
path("<int:pk>/add_new_vol/", views.add_new_vol, name="add_new_vol"),
|
||||
path("<int:pk>/delete_vol/", views.delete_vol, name="delete_vol"),
|
||||
path("<int:pk>/add_owner/", views.add_owner, name="add_owner"),
|
||||
path("<int:pk>/add_existing_vol/", views.add_existing_vol, name="add_existing_vol"),
|
||||
path("<int:pk>/edit_volume/", views.edit_volume, name="edit_volume"),
|
||||
path("<int:pk>/detach_vol/", views.detach_vol, name="detach_vol"),
|
||||
path("<int:pk>/add_cdrom/", views.add_cdrom, name="add_cdrom"),
|
||||
path("<int:pk>/detach_cdrom/<str:dev>/", views.detach_cdrom, name="detach_cdrom"),
|
||||
path("<int:pk>/unmount_iso/", views.unmount_iso, name="unmount_iso"),
|
||||
path("<int:pk>/mount_iso/", views.mount_iso, name="mount_iso"),
|
||||
path("<int:pk>/snapshot/", views.snapshot, name="snapshot"),
|
||||
path("<int:pk>/delete_snapshot/", views.delete_snapshot, name="delete_snapshot"),
|
||||
path("<int:pk>/revert_snapshot/", views.revert_snapshot, name="revert_snapshot"),
|
||||
path("<int:pk>/set_vcpu/", views.set_vcpu, name="set_vcpu"),
|
||||
path("<int:pk>/set_vcpu_hotplug/", views.set_vcpu_hotplug, name="set_vcpu_hotplug"),
|
||||
path("<int:pk>/set_autostart/", views.set_autostart, name="set_autostart"),
|
||||
path("<int:pk>/unset_autostart/", views.unset_autostart, name="unset_autostart"),
|
||||
path("<int:pk>/set_bootmenu/", views.set_bootmenu, name="set_bootmenu"),
|
||||
path("<int:pk>/unset_bootmenu/", views.unset_bootmenu, name="unset_bootmenu"),
|
||||
path("<int:pk>/set_bootorder/", views.set_bootorder, name="set_bootorder"),
|
||||
path("<int:pk>/change_xml/", views.change_xml, name="change_xml"),
|
||||
path("<int:pk>/set_guest_agent/", views.set_guest_agent, name="set_guest_agent"),
|
||||
path("<int:pk>/set_video_model/", views.set_video_model, name="set_video_model"),
|
||||
path("<int:pk>/change_network/", views.change_network, name="change_network"),
|
||||
path("<int:pk>/add_network/", views.add_network, name="add_network"),
|
||||
path("<int:pk>/delete_network/", views.delete_network, name="delete_network"),
|
||||
path("<int:pk>/set_link_state/", views.set_link_state, name="set_link_state"),
|
||||
path("<int:pk>/set_qos/", views.set_qos, name="set_qos"),
|
||||
path("<int:pk>/unset_qos/", views.unset_qos, name="unset_qos"),
|
||||
path(
|
||||
"<int:pk>/del_owner/", views.del_owner, name="del_owner"
|
||||
), # no links to this one???
|
||||
path("<int:pk>/clone/", views.clone, name="clone"),
|
||||
path("<int:pk>/update_console/", views.update_console, name="update_console"),
|
||||
path("<int:pk>/change_options/", views.change_options, name="change_options"),
|
||||
path(
|
||||
"<int:pk>/getvvfile/", views.getvvfile, name="getvvfile"
|
||||
), # no links to this one???
|
||||
path(
|
||||
"create/<int:compute_id>/",
|
||||
views.create_instance_select_type,
|
||||
name="create_instance_select_type",
|
||||
),
|
||||
path(
|
||||
"create/<int:compute_id>/<str:arch>/<str:machine>/",
|
||||
views.create_instance,
|
||||
name="create_instance",
|
||||
),
|
||||
path(
|
||||
"guess_mac_address/<vname>/", views.guess_mac_address, name="guess_mac_address"
|
||||
),
|
||||
path("guess_clone_name/", views.guess_clone_name, name="guess_clone_name"),
|
||||
path("random_mac_address/", views.random_mac_address, name="random_mac_address"),
|
||||
path("check_instance/<vname>/", views.check_instance, name="check_instance"),
|
||||
path("<int:pk>/sshkeys/", views.sshkeys, name="sshkeys"),
|
||||
]
|
||||
|
|
|
@ -38,8 +38,8 @@ def check_user_quota(user, instance, cpu, memory, disk_size):
|
|||
instance += user_instances.count()
|
||||
for usr_inst in user_instances:
|
||||
if connection_manager.host_is_up(
|
||||
usr_inst.instance.compute.type,
|
||||
usr_inst.instance.compute.hostname,
|
||||
usr_inst.instance.compute.type,
|
||||
usr_inst.instance.compute.hostname,
|
||||
):
|
||||
conn = wvmInstance(
|
||||
usr_inst.instance.compute.hostname,
|
||||
|
@ -51,8 +51,8 @@ def check_user_quota(user, instance, cpu, memory, disk_size):
|
|||
cpu += int(conn.get_vcpu())
|
||||
memory += int(conn.get_memory())
|
||||
for disk in conn.get_disk_devices():
|
||||
if disk['size']:
|
||||
disk_size += int(disk['size']) >> 30
|
||||
if disk["size"]:
|
||||
disk_size += int(disk["size"]) >> 30
|
||||
|
||||
if ua.max_instances > 0 and instance > ua.max_instances:
|
||||
msg = "instance"
|
||||
|
@ -86,17 +86,17 @@ def get_new_disk_dev(media, disks, bus):
|
|||
dev_base = "sd"
|
||||
|
||||
if disks:
|
||||
existing_disk_devs = [disk['dev'] for disk in disks]
|
||||
existing_disk_devs = [disk["dev"] for disk in disks]
|
||||
|
||||
# cd-rom bus could be virtio/sata, because of that we should check it also
|
||||
if media:
|
||||
existing_media_devs = [m['dev'] for m in media]
|
||||
existing_media_devs = [m["dev"] for m in media]
|
||||
|
||||
for al in string.ascii_lowercase:
|
||||
dev = dev_base + al
|
||||
if dev not in existing_disk_devs and dev not in existing_media_devs:
|
||||
return dev
|
||||
raise Exception(_('None available device name'))
|
||||
raise Exception(_("None available device name"))
|
||||
|
||||
|
||||
def get_network_tuple(network_source_str):
|
||||
|
@ -104,7 +104,7 @@ def get_network_tuple(network_source_str):
|
|||
if len(network_source_pack) > 1:
|
||||
return network_source_pack[1], network_source_pack[0]
|
||||
else:
|
||||
return network_source_pack[0], 'net'
|
||||
return network_source_pack[0], "net"
|
||||
|
||||
|
||||
def migrate_instance(
|
||||
|
@ -174,44 +174,44 @@ def refr(compute):
|
|||
Instance.objects.filter(compute=compute).exclude(name__in=domain_names).delete()
|
||||
Instance.objects.filter(compute=compute).exclude(uuid__in=domain_uuids).delete()
|
||||
# Create instances that're not in DB
|
||||
names = Instance.objects.filter(compute=compute).values_list('name', flat=True)
|
||||
names = Instance.objects.filter(compute=compute).values_list("name", flat=True)
|
||||
for domain in domains:
|
||||
if domain.name() not in names:
|
||||
Instance(compute=compute, name=domain.name(), uuid=domain.UUIDString()).save()
|
||||
|
||||
|
||||
def get_dhcp_mac_address(vname):
|
||||
dhcp_file = str(settings.BASE_DIR) + '/dhcpd.conf'
|
||||
mac = ''
|
||||
dhcp_file = str(settings.BASE_DIR) + "/dhcpd.conf"
|
||||
mac = ""
|
||||
if os.path.isfile(dhcp_file):
|
||||
with open(dhcp_file, 'r') as f:
|
||||
with open(dhcp_file, "r") as f:
|
||||
name_found = False
|
||||
for line in f:
|
||||
if "host %s." % vname in line:
|
||||
name_found = True
|
||||
if name_found and "hardware ethernet" in line:
|
||||
mac = line.split(' ')[-1].strip().strip(';')
|
||||
mac = line.split(" ")[-1].strip().strip(";")
|
||||
break
|
||||
return mac
|
||||
|
||||
|
||||
def get_random_mac_address():
|
||||
mac = '52:54:00:%02x:%02x:%02x' % (
|
||||
random.randint(0x00, 0xff),
|
||||
random.randint(0x00, 0xff),
|
||||
random.randint(0x00, 0xff),
|
||||
mac = "52:54:00:%02x:%02x:%02x" % (
|
||||
random.randint(0x00, 0xFF),
|
||||
random.randint(0x00, 0xFF),
|
||||
random.randint(0x00, 0xFF),
|
||||
)
|
||||
return mac
|
||||
|
||||
|
||||
def get_clone_disk_name(disk, prefix, clone_name=''):
|
||||
if not disk['image']:
|
||||
def get_clone_disk_name(disk, prefix, clone_name=""):
|
||||
if not disk["image"]:
|
||||
return None
|
||||
if disk['image'].startswith(prefix) and clone_name:
|
||||
suffix = disk['image'][len(prefix):]
|
||||
if disk["image"].startswith(prefix) and clone_name:
|
||||
suffix = disk["image"][len(prefix) :]
|
||||
image = f"{clone_name}{suffix}"
|
||||
elif "." in disk['image'] and len(disk['image'].rsplit(".", 1)[1]) <= 7:
|
||||
name, suffix = disk['image'].rsplit(".", 1)
|
||||
elif "." in disk["image"] and len(disk["image"].rsplit(".", 1)[1]) <= 7:
|
||||
name, suffix = disk["image"].rsplit(".", 1)
|
||||
image = f"{name}-clone.{suffix}"
|
||||
else:
|
||||
image = f"{disk['image']}-clone"
|
||||
|
|
|
@ -21,7 +21,11 @@ from django.shortcuts import get_object_or_404, redirect, render
|
|||
from django.urls import reverse
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from instances.models import Instance
|
||||
from libvirt import VIR_DOMAIN_UNDEFINE_KEEP_NVRAM, VIR_DOMAIN_UNDEFINE_NVRAM, libvirtError
|
||||
from libvirt import (
|
||||
VIR_DOMAIN_UNDEFINE_KEEP_NVRAM,
|
||||
VIR_DOMAIN_UNDEFINE_NVRAM,
|
||||
libvirtError,
|
||||
)
|
||||
from logs.views import addlogmsg
|
||||
from vrtManager import util
|
||||
from vrtManager.create import wvmCreate
|
||||
|
@ -50,9 +54,13 @@ def index(request):
|
|||
if request.user.is_superuser or request.user.has_perm("instances.view_instances"):
|
||||
instances = Instance.objects.all().prefetch_related("userinstance_set")
|
||||
else:
|
||||
instances = Instance.objects.filter(userinstance__user=request.user).prefetch_related("userinstance_set")
|
||||
instances = Instance.objects.filter(
|
||||
userinstance__user=request.user
|
||||
).prefetch_related("userinstance_set")
|
||||
|
||||
return render(request, "allinstances.html", {"computes": computes, "instances": instances})
|
||||
return render(
|
||||
request, "allinstances.html", {"computes": computes, "instances": instances}
|
||||
)
|
||||
|
||||
|
||||
def instance(request, pk):
|
||||
|
@ -63,7 +71,9 @@ def instance(request, pk):
|
|||
users = User.objects.all().order_by("username")
|
||||
publickeys = UserSSHKey.objects.filter(user_id=request.user.id)
|
||||
keymaps = settings.QEMU_KEYMAPS
|
||||
console_types = AppSettings.objects.get(key="QEMU_CONSOLE_DEFAULT_TYPE").choices_as_list()
|
||||
console_types = AppSettings.objects.get(
|
||||
key="QEMU_CONSOLE_DEFAULT_TYPE"
|
||||
).choices_as_list()
|
||||
console_form = ConsoleForm(
|
||||
initial={
|
||||
"type": instance.console_type,
|
||||
|
@ -74,10 +84,14 @@ def instance(request, pk):
|
|||
)
|
||||
console_listener_addresses = settings.QEMU_CONSOLE_LISTENER_ADDRESSES
|
||||
bottom_bar = app_settings.VIEW_INSTANCE_DETAIL_BOTTOM_BAR
|
||||
allow_admin_or_not_template = request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
allow_admin_or_not_template = (
|
||||
request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
)
|
||||
try:
|
||||
userinstance = UserInstance.objects.get(
|
||||
instance__compute_id=compute.id, instance__name=instance.name, user__id=request.user.id
|
||||
instance__compute_id=compute.id,
|
||||
instance__name=instance.name,
|
||||
user__id=request.user.id,
|
||||
)
|
||||
except UserInstance.DoesNotExist:
|
||||
userinstance = None
|
||||
|
@ -116,7 +130,9 @@ def instance(request, pk):
|
|||
|
||||
# userinstances = UserInstance.objects.filter(instance=instance).order_by('user__username')
|
||||
userinstances = instance.userinstance_set.order_by("user__username")
|
||||
allow_admin_or_not_template = request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
allow_admin_or_not_template = (
|
||||
request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
)
|
||||
|
||||
# Host resources
|
||||
vcpu_host = len(instance.vcpu_range)
|
||||
|
@ -128,7 +144,7 @@ def instance(request, pk):
|
|||
storages_host = sorted(instance.proxy.get_storages(True))
|
||||
net_models_host = instance.proxy.get_network_models()
|
||||
|
||||
if app_settings.VM_DRBD_STATUS == 'True':
|
||||
if app_settings.VM_DRBD_STATUS == "True":
|
||||
instance.drbd = drbd_status(request, pk)
|
||||
instance.save()
|
||||
|
||||
|
@ -139,17 +155,25 @@ def status(request, pk):
|
|||
instance = get_instance(request.user, pk)
|
||||
return JsonResponse({"status": instance.proxy.get_status()})
|
||||
|
||||
|
||||
def drbd_status(request, pk):
|
||||
instance = get_instance(request.user, pk)
|
||||
result = "None DRBD"
|
||||
|
||||
if instance.compute.type == 2:
|
||||
conn = instance.compute.login + "@" + instance.compute.hostname
|
||||
remoteDrbdStatus = subprocess.run(["ssh", conn, "sudo", "drbdadm", "status", "&&", "exit"], stdout=subprocess.PIPE, text=True)
|
||||
remoteDrbdStatus = subprocess.run(
|
||||
["ssh", conn, "sudo", "drbdadm", "status", "&&", "exit"],
|
||||
stdout=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
|
||||
if remoteDrbdStatus.stdout:
|
||||
try:
|
||||
instanceFindDrbd = re.compile(instance.name + '[_]*[A-Z]* role:(.+?)\n disk:(.+?)\n', re.IGNORECASE)
|
||||
instanceFindDrbd = re.compile(
|
||||
instance.name + "[_]*[A-Z]* role:(.+?)\n disk:(.+?)\n",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
instanceDrbd = instanceFindDrbd.findall(remoteDrbdStatus.stdout)
|
||||
|
||||
primaryCount = 0
|
||||
|
@ -179,6 +203,7 @@ def drbd_status(request, pk):
|
|||
|
||||
return result
|
||||
|
||||
|
||||
def stats(request, pk):
|
||||
instance = get_instance(request.user, pk)
|
||||
json_blk = []
|
||||
|
@ -192,10 +217,20 @@ def stats(request, pk):
|
|||
|
||||
current_time = time.strftime("%H:%M:%S")
|
||||
for blk in blk_usage:
|
||||
json_blk.append({"dev": blk["dev"], "data": [int(blk["rd"]) / 1048576, int(blk["wr"]) / 1048576]})
|
||||
json_blk.append(
|
||||
{
|
||||
"dev": blk["dev"],
|
||||
"data": [int(blk["rd"]) / 1048576, int(blk["wr"]) / 1048576],
|
||||
}
|
||||
)
|
||||
|
||||
for net in net_usage:
|
||||
json_net.append({"dev": net["dev"], "data": [int(net["rx"]) / 1048576, int(net["tx"]) / 1048576]})
|
||||
json_net.append(
|
||||
{
|
||||
"dev": net["dev"],
|
||||
"data": [int(net["rx"]) / 1048576, int(net["tx"]) / 1048576],
|
||||
}
|
||||
)
|
||||
|
||||
return JsonResponse(
|
||||
{
|
||||
|
@ -207,12 +242,14 @@ def stats(request, pk):
|
|||
}
|
||||
)
|
||||
|
||||
|
||||
def osinfo(request, pk):
|
||||
instance = get_instance(request.user, pk)
|
||||
results = instance.proxy.osinfo()
|
||||
|
||||
|
||||
return JsonResponse(results)
|
||||
|
||||
|
||||
def guess_mac_address(request, vname):
|
||||
data = {"vname": vname}
|
||||
mac = utils.get_dhcp_mac_address(vname)
|
||||
|
@ -232,7 +269,9 @@ def guess_clone_name(request):
|
|||
dhcp_file = "/srv/webvirtcloud/dhcpd.conf"
|
||||
prefix = app_settings.CLONE_INSTANCE_DEFAULT_PREFIX
|
||||
if os.path.isfile(dhcp_file):
|
||||
instance_names = [i.name for i in Instance.objects.filter(name__startswith=prefix)]
|
||||
instance_names = [
|
||||
i.name for i in Instance.objects.filter(name__startswith=prefix)
|
||||
]
|
||||
with open(dhcp_file, "r") as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
|
@ -281,7 +320,11 @@ def get_instance(user, pk):
|
|||
instance = get_object_or_404(Instance, pk=pk)
|
||||
user_instances = user.userinstance_set.all().values_list("instance", flat=True)
|
||||
|
||||
if user.is_superuser or user.has_perm("instances.view_instances") or instance.id in user_instances:
|
||||
if (
|
||||
user.is_superuser
|
||||
or user.has_perm("instances.view_instances")
|
||||
or instance.id in user_instances
|
||||
):
|
||||
return instance
|
||||
else:
|
||||
raise Http404()
|
||||
|
@ -293,7 +336,9 @@ def poweron(request, pk):
|
|||
messages.warning(request, _("Templates cannot be started."))
|
||||
else:
|
||||
instance.proxy.start()
|
||||
addlogmsg(request.user.username, instance.compute.name, instance.name, _("Power On"))
|
||||
addlogmsg(
|
||||
request.user.username, instance.compute.name, instance.name, _("Power On")
|
||||
)
|
||||
|
||||
return redirect(request.META.get("HTTP_REFERER"))
|
||||
|
||||
|
@ -302,14 +347,18 @@ def powercycle(request, pk):
|
|||
instance = get_instance(request.user, pk)
|
||||
instance.proxy.force_shutdown()
|
||||
instance.proxy.start()
|
||||
addlogmsg(request.user.username, instance.compute.name, instance.name, _("Power Cycle"))
|
||||
addlogmsg(
|
||||
request.user.username, instance.compute.name, instance.name, _("Power Cycle")
|
||||
)
|
||||
return redirect(request.META.get("HTTP_REFERER"))
|
||||
|
||||
|
||||
def poweroff(request, pk):
|
||||
instance = get_instance(request.user, pk)
|
||||
instance.proxy.shutdown()
|
||||
addlogmsg(request.user.username, instance.compute.name, instance.name, _("Power Off"))
|
||||
addlogmsg(
|
||||
request.user.username, instance.compute.name, instance.name, _("Power Off")
|
||||
)
|
||||
|
||||
return redirect(request.META.get("HTTP_REFERER"))
|
||||
|
||||
|
@ -333,7 +382,9 @@ def resume(request, pk):
|
|||
def force_off(request, pk):
|
||||
instance = get_instance(request.user, pk)
|
||||
instance.proxy.force_shutdown()
|
||||
addlogmsg(request.user.username, instance.compute.name, instance.name, _("Force Off"))
|
||||
addlogmsg(
|
||||
request.user.username, instance.compute.name, instance.name, _("Force Off")
|
||||
)
|
||||
return redirect(request.META.get("HTTP_REFERER"))
|
||||
|
||||
|
||||
|
@ -349,7 +400,9 @@ def destroy(request, pk):
|
|||
instance.proxy.force_shutdown()
|
||||
|
||||
if request.POST.get("delete_disk", ""):
|
||||
snapshots = sorted(instance.proxy.get_snapshot(), reverse=True, key=lambda k: k["date"])
|
||||
snapshots = sorted(
|
||||
instance.proxy.get_snapshot(), reverse=True, key=lambda k: k["date"]
|
||||
)
|
||||
for snapshot in snapshots:
|
||||
instance.proxy.snapshot_delete(snapshot["name"])
|
||||
instance.proxy.delete_all_disks()
|
||||
|
@ -360,7 +413,9 @@ def destroy(request, pk):
|
|||
instance.proxy.delete(VIR_DOMAIN_UNDEFINE_KEEP_NVRAM)
|
||||
|
||||
instance.delete()
|
||||
addlogmsg(request.user.username, instance.compute.name, instance.name, _("Destroy"))
|
||||
addlogmsg(
|
||||
request.user.username, instance.compute.name, instance.name, _("Destroy")
|
||||
)
|
||||
return redirect(reverse("instances:index"))
|
||||
|
||||
return render(
|
||||
|
@ -390,12 +445,26 @@ def migrate(request, pk):
|
|||
target_host = Compute.objects.get(id=compute_id)
|
||||
|
||||
try:
|
||||
utils.migrate_instance(target_host, instance, request.user, live, unsafe, xml_del, offline, autoconverge, compress, postcopy)
|
||||
utils.migrate_instance(
|
||||
target_host,
|
||||
instance,
|
||||
request.user,
|
||||
live,
|
||||
unsafe,
|
||||
xml_del,
|
||||
offline,
|
||||
autoconverge,
|
||||
compress,
|
||||
postcopy,
|
||||
)
|
||||
except libvirtError as err:
|
||||
messages.error(request, err)
|
||||
|
||||
migration_method = "live" if live is True else "offline"
|
||||
msg = _("Instance is migrated(%(method)s) to %(hostname)s") % {"hostname": target_host.hostname, "method": migration_method}
|
||||
msg = _("Instance is migrated(%(method)s) to %(hostname)s") % {
|
||||
"hostname": target_host.hostname,
|
||||
"method": migration_method,
|
||||
}
|
||||
addlogmsg(request.user.username, current_host, instance.name, msg)
|
||||
|
||||
return redirect(request.META.get("HTTP_REFERER"))
|
||||
|
@ -419,7 +488,9 @@ def set_root_pass(request, pk):
|
|||
s.close()
|
||||
if result["return"] == "success":
|
||||
msg = _("Reset root password")
|
||||
addlogmsg(request.user.username, instance.compute.name, instance.name, msg)
|
||||
addlogmsg(
|
||||
request.user.username, instance.compute.name, instance.name, msg
|
||||
)
|
||||
messages.success(request, msg)
|
||||
else:
|
||||
messages.error(request, result["message"])
|
||||
|
@ -434,7 +505,11 @@ def add_public_key(request, pk):
|
|||
if request.method == "POST":
|
||||
sshkeyid = request.POST.get("sshkeyid", "")
|
||||
publickey = UserSSHKey.objects.get(id=sshkeyid)
|
||||
data = {"action": "publickey", "key": publickey.keypublic, "vname": instance.name}
|
||||
data = {
|
||||
"action": "publickey",
|
||||
"key": publickey.keypublic,
|
||||
"vname": instance.name,
|
||||
}
|
||||
|
||||
if instance.proxy.get_status() == 5:
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
|
@ -445,7 +520,9 @@ def add_public_key(request, pk):
|
|||
if result["return"] == "error":
|
||||
msg = result["message"]
|
||||
else:
|
||||
msg = _("Installed new SSH public key %(keyname)s") % {"keyname": publickey.keyname}
|
||||
msg = _("Installed new SSH public key %(keyname)s") % {
|
||||
"keyname": publickey.keyname
|
||||
}
|
||||
addlogmsg(request.user.username, instance.compute.name, instance.name, msg)
|
||||
|
||||
if result["return"] == "success":
|
||||
|
@ -470,9 +547,13 @@ def resizevm_cpu(request, pk):
|
|||
new_vcpu = request.POST.get("vcpu", "")
|
||||
new_cur_vcpu = request.POST.get("cur_vcpu", "")
|
||||
|
||||
quota_msg = utils.check_user_quota(request.user, 0, int(new_vcpu) - vcpu, 0, 0)
|
||||
quota_msg = utils.check_user_quota(
|
||||
request.user, 0, int(new_vcpu) - vcpu, 0, 0
|
||||
)
|
||||
if not request.user.is_superuser and quota_msg:
|
||||
msg = _("User %(quota_msg)s quota reached, cannot resize CPU of '%(instance_name)s'!") % {
|
||||
msg = _(
|
||||
"User %(quota_msg)s quota reached, cannot resize CPU of '%(instance_name)s'!"
|
||||
) % {
|
||||
"quota_msg": quota_msg,
|
||||
"instance_name": instance.name,
|
||||
}
|
||||
|
@ -481,8 +562,13 @@ def resizevm_cpu(request, pk):
|
|||
cur_vcpu = new_cur_vcpu
|
||||
vcpu = new_vcpu
|
||||
instance.proxy.resize_cpu(cur_vcpu, vcpu)
|
||||
msg = _("CPU is resized: %(old)s to %(new)s") % {"old": cur_vcpu, "new": vcpu}
|
||||
addlogmsg(request.user.username, instance.compute.name, instance.name, msg)
|
||||
msg = _("CPU is resized: %(old)s to %(new)s") % {
|
||||
"old": cur_vcpu,
|
||||
"new": vcpu,
|
||||
}
|
||||
addlogmsg(
|
||||
request.user.username, instance.compute.name, instance.name, msg
|
||||
)
|
||||
messages.success(request, msg)
|
||||
return redirect(reverse("instances:instance", args=[instance.id]) + "#resize")
|
||||
|
||||
|
@ -507,22 +593,30 @@ def resize_memory(request, pk):
|
|||
new_cur_memory_custom = request.POST.get("cur_memory_custom", "")
|
||||
if new_cur_memory_custom:
|
||||
new_cur_memory = new_cur_memory_custom
|
||||
quota_msg = utils.check_user_quota(request.user, 0, 0, int(new_memory) - memory, 0)
|
||||
quota_msg = utils.check_user_quota(
|
||||
request.user, 0, 0, int(new_memory) - memory, 0
|
||||
)
|
||||
if not request.user.is_superuser and quota_msg:
|
||||
msg = _("User %(quota_msg)s quota reached, cannot resize memory of '%(instance_name)s'!") % {
|
||||
msg = _(
|
||||
"User %(quota_msg)s quota reached, cannot resize memory of '%(instance_name)s'!"
|
||||
) % {
|
||||
"quota_msg": quota_msg,
|
||||
"instance_name": instance.name,
|
||||
}
|
||||
messages.error(request, msg)
|
||||
else:
|
||||
instance.proxy.resize_mem(new_cur_memory, new_memory)
|
||||
msg = _("Memory is resized: current/max: %(old_cur)s/%(old_max)s to %(new_cur)s/%(new_max)s") % {
|
||||
msg = _(
|
||||
"Memory is resized: current/max: %(old_cur)s/%(old_max)s to %(new_cur)s/%(new_max)s"
|
||||
) % {
|
||||
"old_cur": cur_memory,
|
||||
"old_max": memory,
|
||||
"new_cur": new_cur_memory,
|
||||
"new_max": new_memory,
|
||||
}
|
||||
addlogmsg(request.user.username, instance.compute.name, instance.name, msg)
|
||||
addlogmsg(
|
||||
request.user.username, instance.compute.name, instance.name, msg
|
||||
)
|
||||
messages.success(request, msg)
|
||||
|
||||
return redirect(reverse("instances:instance", args=[instance.id]) + "#resize")
|
||||
|
@ -542,15 +636,21 @@ def resize_disk(request, pk):
|
|||
if request.user.is_superuser or request.user.is_staff or userinstance.is_change:
|
||||
disks_new = list()
|
||||
for disk in disks:
|
||||
input_disk_size = int(request.POST.get("disk_size_" + disk["dev"], "0")) * 1073741824
|
||||
input_disk_size = (
|
||||
int(request.POST.get("disk_size_" + disk["dev"], "0")) * 1073741824
|
||||
)
|
||||
if input_disk_size > disk["size"] + (64 << 20):
|
||||
disk["size_new"] = input_disk_size
|
||||
disks_new.append(disk)
|
||||
disk_sum = sum([disk["size"] >> 30 for disk in disks_new])
|
||||
disk_new_sum = sum([disk["size_new"] >> 30 for disk in disks_new])
|
||||
quota_msg = utils.check_user_quota(request.user, 0, 0, 0, disk_new_sum - disk_sum)
|
||||
quota_msg = utils.check_user_quota(
|
||||
request.user, 0, 0, 0, disk_new_sum - disk_sum
|
||||
)
|
||||
if not request.user.is_superuser and quota_msg:
|
||||
msg = _("User %(quota_msg)s quota reached, cannot resize disks of '%(instance_name)s'!") % {
|
||||
msg = _(
|
||||
"User %(quota_msg)s quota reached, cannot resize disks of '%(instance_name)s'!"
|
||||
) % {
|
||||
"quota_msg": quota_msg,
|
||||
"instance_name": instance.name,
|
||||
}
|
||||
|
@ -558,7 +658,9 @@ def resize_disk(request, pk):
|
|||
else:
|
||||
instance.proxy.resize_disk(disks_new)
|
||||
msg = _("Disk is resized: %(dev)s") % {"dev": disk["dev"]}
|
||||
addlogmsg(request.user.username, instance.compute.name, instance.name, msg)
|
||||
addlogmsg(
|
||||
request.user.username, instance.compute.name, instance.name, msg
|
||||
)
|
||||
messages.success(request, msg)
|
||||
|
||||
return redirect(reverse("instances:instance", args=[instance.id]) + "#resize")
|
||||
|
@ -566,7 +668,9 @@ def resize_disk(request, pk):
|
|||
|
||||
def add_new_vol(request, pk):
|
||||
instance = get_instance(request.user, pk)
|
||||
allow_admin_or_not_template = request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
allow_admin_or_not_template = (
|
||||
request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
)
|
||||
|
||||
if allow_admin_or_not_template:
|
||||
media = instance.proxy.get_media_devices()
|
||||
|
@ -607,20 +711,34 @@ def add_new_vol(request, pk):
|
|||
pool_type = conn_pool.get_type()
|
||||
disk_type = conn_pool.get_volume_type(os.path.basename(source))
|
||||
|
||||
if pool_type == 'rbd':
|
||||
if pool_type == "rbd":
|
||||
source_info = conn_pool.get_rbd_source()
|
||||
else: # add more disk types to handle different pool and disk types
|
||||
else: # add more disk types to handle different pool and disk types
|
||||
source_info = None
|
||||
|
||||
instance.proxy.attach_disk(target_dev, source, source_info=source_info, pool_type=pool_type, disk_type=disk_type, target_bus=bus, format_type=format, cache_mode=cache)
|
||||
msg = _("Attach new disk: %(name)s (%(format)s)") % {"name": name, "format": format}
|
||||
instance.proxy.attach_disk(
|
||||
target_dev,
|
||||
source,
|
||||
source_info=source_info,
|
||||
pool_type=pool_type,
|
||||
disk_type=disk_type,
|
||||
target_bus=bus,
|
||||
format_type=format,
|
||||
cache_mode=cache,
|
||||
)
|
||||
msg = _("Attach new disk: %(name)s (%(format)s)") % {
|
||||
"name": name,
|
||||
"format": format,
|
||||
}
|
||||
addlogmsg(request.user.username, instance.compute.name, instance.name, msg)
|
||||
return redirect(request.META.get("HTTP_REFERER") + "#disks")
|
||||
|
||||
|
||||
def add_existing_vol(request, pk):
|
||||
instance = get_instance(request.user, pk)
|
||||
allow_admin_or_not_template = request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
allow_admin_or_not_template = (
|
||||
request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
)
|
||||
if allow_admin_or_not_template:
|
||||
storage = request.POST.get("selected_storage", "")
|
||||
name = request.POST.get("vols", "")
|
||||
|
@ -641,7 +759,7 @@ def add_existing_vol(request, pk):
|
|||
format_type = conn_create.get_volume_format_type(name)
|
||||
disk_type = conn_create.get_volume_type(name)
|
||||
pool_type = conn_create.get_type()
|
||||
if pool_type == 'rbd':
|
||||
if pool_type == "rbd":
|
||||
source_info = conn_create.get_rbd_source()
|
||||
path = conn_create.get_source_name()
|
||||
else:
|
||||
|
@ -651,7 +769,16 @@ def add_existing_vol(request, pk):
|
|||
target_dev = utils.get_new_disk_dev(media, disks, bus)
|
||||
source = f"{path}/{name}"
|
||||
|
||||
instance.proxy.attach_disk(target_dev, source, source_info=source_info, pool_type=pool_type, disk_type=disk_type, target_bus=bus, format_type=format_type, cache_mode=cache)
|
||||
instance.proxy.attach_disk(
|
||||
target_dev,
|
||||
source,
|
||||
source_info=source_info,
|
||||
pool_type=pool_type,
|
||||
disk_type=disk_type,
|
||||
target_bus=bus,
|
||||
format_type=format_type,
|
||||
cache_mode=cache,
|
||||
)
|
||||
msg = _("Attach Existing disk: %(target_dev)s") % {"target_dev": target_dev}
|
||||
addlogmsg(request.user.username, instance.compute.name, instance.name, msg)
|
||||
return redirect(request.META.get("HTTP_REFERER") + "#disks")
|
||||
|
@ -659,7 +786,9 @@ def add_existing_vol(request, pk):
|
|||
|
||||
def edit_volume(request, pk):
|
||||
instance = get_instance(request.user, pk)
|
||||
allow_admin_or_not_template = request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
allow_admin_or_not_template = (
|
||||
request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
)
|
||||
if "edit_volume" in request.POST and allow_admin_or_not_template:
|
||||
target_dev = request.POST.get("dev", "")
|
||||
|
||||
|
@ -671,10 +800,16 @@ def edit_volume(request, pk):
|
|||
new_bus = request.POST.get("vol_bus", bus)
|
||||
serial = request.POST.get("vol_serial", "")
|
||||
format = request.POST.get("vol_format", "")
|
||||
cache = request.POST.get("vol_cache", app_settings.INSTANCE_VOLUME_DEFAULT_CACHE)
|
||||
cache = request.POST.get(
|
||||
"vol_cache", app_settings.INSTANCE_VOLUME_DEFAULT_CACHE
|
||||
)
|
||||
io = request.POST.get("vol_io_mode", app_settings.INSTANCE_VOLUME_DEFAULT_IO)
|
||||
discard = request.POST.get("vol_discard_mode", app_settings.INSTANCE_VOLUME_DEFAULT_DISCARD)
|
||||
zeroes = request.POST.get("vol_detect_zeroes", app_settings.INSTANCE_VOLUME_DEFAULT_DETECT_ZEROES)
|
||||
discard = request.POST.get(
|
||||
"vol_discard_mode", app_settings.INSTANCE_VOLUME_DEFAULT_DISCARD
|
||||
)
|
||||
zeroes = request.POST.get(
|
||||
"vol_detect_zeroes", app_settings.INSTANCE_VOLUME_DEFAULT_DETECT_ZEROES
|
||||
)
|
||||
new_target_dev = utils.get_new_disk_dev(instance.media, instance.disks, new_bus)
|
||||
|
||||
if new_bus != bus:
|
||||
|
@ -710,7 +845,10 @@ def edit_volume(request, pk):
|
|||
if not instance.proxy.get_status() == 5:
|
||||
messages.success(
|
||||
request,
|
||||
_("Volume changes are applied. " + "But it will be activated after shutdown"),
|
||||
_(
|
||||
"Volume changes are applied. "
|
||||
+ "But it will be activated after shutdown"
|
||||
),
|
||||
)
|
||||
else:
|
||||
messages.success(request, _("Volume is changed successfully."))
|
||||
|
@ -722,7 +860,9 @@ def edit_volume(request, pk):
|
|||
|
||||
def delete_vol(request, pk):
|
||||
instance = get_instance(request.user, pk)
|
||||
allow_admin_or_not_template = request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
allow_admin_or_not_template = (
|
||||
request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
)
|
||||
if allow_admin_or_not_template:
|
||||
storage = request.POST.get("storage", "")
|
||||
conn_delete = wvmStorage(
|
||||
|
@ -746,7 +886,9 @@ def delete_vol(request, pk):
|
|||
|
||||
def detach_vol(request, pk):
|
||||
instance = get_instance(request.user, pk)
|
||||
allow_admin_or_not_template = request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
allow_admin_or_not_template = (
|
||||
request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
)
|
||||
|
||||
if allow_admin_or_not_template:
|
||||
dev = request.POST.get("dev", "")
|
||||
|
@ -760,11 +902,20 @@ def detach_vol(request, pk):
|
|||
|
||||
def add_cdrom(request, pk):
|
||||
instance = get_instance(request.user, pk)
|
||||
allow_admin_or_not_template = request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
allow_admin_or_not_template = (
|
||||
request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
)
|
||||
if allow_admin_or_not_template:
|
||||
bus = request.POST.get("bus", "ide" if instance.machine == "pc" else "sata")
|
||||
target = utils.get_new_disk_dev(instance.media, instance.disks, bus)
|
||||
instance.proxy.attach_disk(target, "", disk_device="cdrom", cache_mode="none", target_bus=bus, readonly=True)
|
||||
instance.proxy.attach_disk(
|
||||
target,
|
||||
"",
|
||||
disk_device="cdrom",
|
||||
cache_mode="none",
|
||||
target_bus=bus,
|
||||
readonly=True,
|
||||
)
|
||||
msg = _("Add CD-ROM: %(target)s") % {"target": target}
|
||||
addlogmsg(request.user.username, instance.compute.name, instance.name, msg)
|
||||
|
||||
|
@ -773,7 +924,9 @@ def add_cdrom(request, pk):
|
|||
|
||||
def detach_cdrom(request, pk, dev):
|
||||
instance = get_instance(request.user, pk)
|
||||
allow_admin_or_not_template = request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
allow_admin_or_not_template = (
|
||||
request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
)
|
||||
|
||||
if allow_admin_or_not_template:
|
||||
# dev = request.POST.get('detach_cdrom', '')
|
||||
|
@ -786,7 +939,9 @@ def detach_cdrom(request, pk, dev):
|
|||
|
||||
def unmount_iso(request, pk):
|
||||
instance = get_instance(request.user, pk)
|
||||
allow_admin_or_not_template = request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
allow_admin_or_not_template = (
|
||||
request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
)
|
||||
if allow_admin_or_not_template:
|
||||
image = request.POST.get("path", "")
|
||||
dev = request.POST.get("umount_iso", "")
|
||||
|
@ -799,7 +954,9 @@ def unmount_iso(request, pk):
|
|||
|
||||
def mount_iso(request, pk):
|
||||
instance = get_instance(request.user, pk)
|
||||
allow_admin_or_not_template = request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
allow_admin_or_not_template = (
|
||||
request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
)
|
||||
if allow_admin_or_not_template:
|
||||
image = request.POST.get("media", "")
|
||||
dev = request.POST.get("mount_iso", "")
|
||||
|
@ -812,9 +969,13 @@ def mount_iso(request, pk):
|
|||
|
||||
def snapshot(request, pk):
|
||||
instance = get_instance(request.user, pk)
|
||||
allow_admin_or_not_template = request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
allow_admin_or_not_template = (
|
||||
request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
)
|
||||
|
||||
if allow_admin_or_not_template and request.user.has_perm("instances.snapshot_instances"):
|
||||
if allow_admin_or_not_template and request.user.has_perm(
|
||||
"instances.snapshot_instances"
|
||||
):
|
||||
name = request.POST.get("name", "")
|
||||
desc = request.POST.get("description", "")
|
||||
instance.proxy.create_snapshot(name, desc)
|
||||
|
@ -825,8 +986,12 @@ def snapshot(request, pk):
|
|||
|
||||
def delete_snapshot(request, pk):
|
||||
instance = get_instance(request.user, pk)
|
||||
allow_admin_or_not_template = request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
if allow_admin_or_not_template and request.user.has_perm("instances.snapshot_instances"):
|
||||
allow_admin_or_not_template = (
|
||||
request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
)
|
||||
if allow_admin_or_not_template and request.user.has_perm(
|
||||
"instances.snapshot_instances"
|
||||
):
|
||||
snap_name = request.POST.get("name", "")
|
||||
instance.proxy.snapshot_delete(snap_name)
|
||||
msg = _("Delete snapshot: %(snap)s") % {"snap": snap_name}
|
||||
|
@ -836,8 +1001,12 @@ def delete_snapshot(request, pk):
|
|||
|
||||
def revert_snapshot(request, pk):
|
||||
instance = get_instance(request.user, pk)
|
||||
allow_admin_or_not_template = request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
if allow_admin_or_not_template and request.user.has_perm("instances.snapshot_instances"):
|
||||
allow_admin_or_not_template = (
|
||||
request.user.is_superuser or request.user.is_staff or not instance.is_template
|
||||
)
|
||||
if allow_admin_or_not_template and request.user.has_perm(
|
||||
"instances.snapshot_instances"
|
||||
):
|
||||
snap_name = request.POST.get("name", "")
|
||||
instance.proxy.snapshot_revert(snap_name)
|
||||
msg = _("Successful revert snapshot: ")
|
||||
|
@ -865,7 +1034,7 @@ def set_vcpu(request, pk):
|
|||
@superuser_only
|
||||
def set_vcpu_hotplug(request, pk):
|
||||
instance = get_instance(request.user, pk)
|
||||
status = True if request.POST.get("vcpu_hotplug", "False") == 'True' else False
|
||||
status = True if request.POST.get("vcpu_hotplug", "False") == "True" else False
|
||||
msg = _("VCPU Hot-plug is enabled=%(status)s") % {"status": status}
|
||||
instance.proxy.set_vcpu_hotplug(status)
|
||||
addlogmsg(request.user.username, instance.compute.name, instance.name, msg)
|
||||
|
@ -923,7 +1092,10 @@ def set_bootorder(request, pk):
|
|||
if not instance.proxy.get_status() == 5:
|
||||
messages.success(
|
||||
request,
|
||||
_("Boot menu changes applied. " + "But it will be activated after shutdown"),
|
||||
_(
|
||||
"Boot menu changes applied. "
|
||||
+ "But it will be activated after shutdown"
|
||||
),
|
||||
)
|
||||
else:
|
||||
messages.success(request, _("Boot order changed successfully."))
|
||||
|
@ -979,7 +1151,7 @@ def change_network(request, pk):
|
|||
network_data[post] = source
|
||||
network_data[post + "-type"] = source_type
|
||||
|
||||
if source_type == 'iface':
|
||||
if source_type == "iface":
|
||||
iface = wvmInterface(
|
||||
instance.compute.hostname,
|
||||
instance.compute.login,
|
||||
|
@ -1007,14 +1179,14 @@ def add_network(request, pk):
|
|||
nwfilter = request.POST.get("add-net-nwfilter")
|
||||
(source, source_type) = utils.get_network_tuple(request.POST.get("add-net-network"))
|
||||
|
||||
if source_type == 'iface':
|
||||
if source_type == "iface":
|
||||
iface = wvmInterface(
|
||||
instance.compute.hostname,
|
||||
instance.compute.login,
|
||||
instance.compute.password,
|
||||
instance.compute.type,
|
||||
source,
|
||||
)
|
||||
instance.compute.hostname,
|
||||
instance.compute.login,
|
||||
instance.compute.password,
|
||||
instance.compute.type,
|
||||
source,
|
||||
)
|
||||
source_type = iface.get_type()
|
||||
|
||||
instance.proxy.add_network(mac, source, source_type, nwfilter=nwfilter)
|
||||
|
@ -1062,7 +1234,9 @@ def set_qos(request, pk):
|
|||
|
||||
instance.proxy.set_qos(mac, qos_dir, average, peak, burst)
|
||||
if instance.proxy.get_status() == 5:
|
||||
messages.success(request, _("%(qos_dir)s QoS is set") % {"qos_dir": qos_dir.capitalize()})
|
||||
messages.success(
|
||||
request, _("%(qos_dir)s QoS is set") % {"qos_dir": qos_dir.capitalize()}
|
||||
)
|
||||
else:
|
||||
messages.success(
|
||||
request,
|
||||
|
@ -1084,7 +1258,9 @@ def unset_qos(request, pk):
|
|||
instance.proxy.unset_qos(mac, qos_dir)
|
||||
|
||||
if instance.proxy.get_status() == 5:
|
||||
messages.success(request, _("%(qos_dir)s QoS is deleted") % {"qos_dir": qos_dir.capitalize()})
|
||||
messages.success(
|
||||
request, _("%(qos_dir)s QoS is deleted") % {"qos_dir": qos_dir.capitalize()}
|
||||
)
|
||||
else:
|
||||
messages.success(
|
||||
request,
|
||||
|
@ -1108,7 +1284,9 @@ def add_owner(request, pk):
|
|||
check_inst = UserInstance.objects.filter(instance=instance).count()
|
||||
|
||||
if check_inst > 0:
|
||||
messages.error(request, _("Only one owner is allowed and the one already added"))
|
||||
messages.error(
|
||||
request, _("Only one owner is allowed and the one already added")
|
||||
)
|
||||
else:
|
||||
add_user_inst = UserInstance(instance=instance, user_id=user_id)
|
||||
add_user_inst.save()
|
||||
|
@ -1137,7 +1315,9 @@ def clone(request, pk):
|
|||
clone_data["name"] = request.POST.get("name", "")
|
||||
|
||||
disk_sum = sum([disk["size"] >> 30 for disk in instance.disks])
|
||||
quota_msg = utils.check_user_quota(request.user, 1, instance.vcpu, instance.memory, disk_sum)
|
||||
quota_msg = utils.check_user_quota(
|
||||
request.user, 1, instance.vcpu, instance.memory, disk_sum
|
||||
)
|
||||
check_instance = Instance.objects.filter(name=clone_data["name"])
|
||||
|
||||
clone_data["disk_owner_uid"] = int(app_settings.INSTANCE_VOLUME_DEFAULT_OWNER_UID)
|
||||
|
@ -1156,19 +1336,31 @@ def clone(request, pk):
|
|||
clone_data[disk_dev] = disk_name
|
||||
|
||||
if not request.user.is_superuser and quota_msg:
|
||||
msg = _("User '%(quota_msg)s' quota reached, cannot create '%(clone_name)s'!") % {
|
||||
msg = _(
|
||||
"User '%(quota_msg)s' quota reached, cannot create '%(clone_name)s'!"
|
||||
) % {
|
||||
"quota_msg": quota_msg,
|
||||
"clone_name": clone_data["name"],
|
||||
}
|
||||
messages.error(request, msg)
|
||||
elif check_instance:
|
||||
msg = _("Instance '%(clone_name)s' already exists!") % {"clone_name": clone_data["name"]}
|
||||
msg = _("Instance '%(clone_name)s' already exists!") % {
|
||||
"clone_name": clone_data["name"]
|
||||
}
|
||||
messages.error(request, msg)
|
||||
elif not re.match(r"^[a-zA-Z0-9-]+$", clone_data["name"]):
|
||||
msg = _("Instance name '%(clone_name)s' contains invalid characters!") % {"clone_name": clone_data["name"]}
|
||||
msg = _("Instance name '%(clone_name)s' contains invalid characters!") % {
|
||||
"clone_name": clone_data["name"]
|
||||
}
|
||||
messages.error(request, msg)
|
||||
elif not re.match(r"^([0-9A-F]{2})(:?[0-9A-F]{2}){5}$", clone_data["clone-net-mac-0"], re.IGNORECASE):
|
||||
msg = _("Instance MAC '%(clone_mac)s' invalid format!") % {"clone_mac": clone_data["clone-net-mac-0"]}
|
||||
elif not re.match(
|
||||
r"^([0-9A-F]{2})(:?[0-9A-F]{2}){5}$",
|
||||
clone_data["clone-net-mac-0"],
|
||||
re.IGNORECASE,
|
||||
):
|
||||
msg = _("Instance MAC '%(clone_mac)s' invalid format!") % {
|
||||
"clone_mac": clone_data["clone-net-mac-0"]
|
||||
}
|
||||
messages.error(request, msg)
|
||||
else:
|
||||
new_instance = Instance(compute=instance.compute, name=clone_data["name"])
|
||||
|
@ -1176,15 +1368,23 @@ def clone(request, pk):
|
|||
new_uuid = instance.proxy.clone_instance(clone_data)
|
||||
new_instance.uuid = new_uuid
|
||||
new_instance.save()
|
||||
user_instance = UserInstance(instance_id=new_instance.id, user_id=request.user.id, is_delete=True)
|
||||
user_instance = UserInstance(
|
||||
instance_id=new_instance.id, user_id=request.user.id, is_delete=True
|
||||
)
|
||||
user_instance.save()
|
||||
msg = _("Create a clone of '%(instance_name)s'") % {"instance_name": instance.name}
|
||||
msg = _("Create a clone of '%(instance_name)s'") % {
|
||||
"instance_name": instance.name
|
||||
}
|
||||
messages.success(request, msg)
|
||||
addlogmsg(request.user.username, instance.compute.name, new_instance.name, msg)
|
||||
addlogmsg(
|
||||
request.user.username, instance.compute.name, new_instance.name, msg
|
||||
)
|
||||
|
||||
if app_settings.CLONE_INSTANCE_AUTO_MIGRATE == "True":
|
||||
new_compute = Compute.objects.order_by("?").first()
|
||||
utils.migrate_instance(new_compute, new_instance, request.user, xml_del=True, offline=True)
|
||||
utils.migrate_instance(
|
||||
new_compute, new_instance, request.user, xml_del=True, offline=True
|
||||
)
|
||||
|
||||
return redirect(reverse("instances:instance", args=[new_instance.id]))
|
||||
except Exception as e:
|
||||
|
@ -1223,7 +1423,9 @@ def update_console(request, pk):
|
|||
messages.error(request, msg)
|
||||
else:
|
||||
msg = _("Set VNC password")
|
||||
addlogmsg(request.user.username, instance.compute.name, instance.name, msg)
|
||||
addlogmsg(
|
||||
request.user.username, instance.compute.name, instance.name, msg
|
||||
)
|
||||
|
||||
if "keymap" in form.changed_data or "clear_keymap" in form.changed_data:
|
||||
if form.cleaned_data["clear_keymap"]:
|
||||
|
@ -1232,17 +1434,23 @@ def update_console(request, pk):
|
|||
instance.proxy.set_console_keymap(form.cleaned_data["keymap"])
|
||||
|
||||
msg = _("Set VNC keymap")
|
||||
addlogmsg(request.user.username, instance.compute.name, instance.name, msg)
|
||||
addlogmsg(
|
||||
request.user.username, instance.compute.name, instance.name, msg
|
||||
)
|
||||
|
||||
if "type" in form.changed_data:
|
||||
instance.proxy.set_console_type(form.cleaned_data["type"])
|
||||
msg = _("Set VNC type")
|
||||
addlogmsg(request.user.username, instance.compute.name, instance.name, msg)
|
||||
addlogmsg(
|
||||
request.user.username, instance.compute.name, instance.name, msg
|
||||
)
|
||||
|
||||
if "listen_on" in form.changed_data:
|
||||
instance.proxy.set_console_listener_addr(form.cleaned_data["listen_on"])
|
||||
msg = _("Set VNC listen address")
|
||||
addlogmsg(request.user.username, instance.compute.name, instance.name, msg)
|
||||
addlogmsg(
|
||||
request.user.username, instance.compute.name, instance.name, msg
|
||||
)
|
||||
|
||||
return redirect(request.META.get("HTTP_REFERER") + "#vncsettings")
|
||||
|
||||
|
@ -1326,7 +1534,15 @@ def create_instance_select_type(request, compute_id):
|
|||
all_hypervisors = conn.get_hypervisors_machines()
|
||||
|
||||
# Supported hypervisors by webvirtcloud: i686, x86_64(for now)
|
||||
supported_arch = ["x86_64", "i686", "aarch64", "armv7l", "ppc64", "ppc64le", "s390x"]
|
||||
supported_arch = [
|
||||
"x86_64",
|
||||
"i686",
|
||||
"aarch64",
|
||||
"armv7l",
|
||||
"ppc64",
|
||||
"ppc64le",
|
||||
"s390x",
|
||||
]
|
||||
hypervisors = [hpv for hpv in all_hypervisors.keys() if hpv in supported_arch]
|
||||
default_machine = app_settings.INSTANCE_MACHINE_DEFAULT_TYPE
|
||||
default_arch = app_settings.INSTANCE_ARCH_DEFAULT_TYPE
|
||||
|
@ -1371,7 +1587,12 @@ def create_instance(request, compute_id, arch, machine):
|
|||
appsettings = AppSettings.objects.all()
|
||||
|
||||
try:
|
||||
conn = wvmCreate(compute.hostname, compute.login, compute.password, compute.type)
|
||||
conn = wvmCreate(
|
||||
compute.hostname,
|
||||
compute.login,
|
||||
compute.password,
|
||||
compute.type
|
||||
)
|
||||
|
||||
default_firmware = app_settings.INSTANCE_FIRMWARE_DEFAULT_TYPE
|
||||
default_cpu_mode = app_settings.INSTANCE_CPU_DEFAULT_MODE
|
||||
|
@ -1397,7 +1618,7 @@ def create_instance(request, compute_id, arch, machine):
|
|||
storages = sorted(conn.get_storages(only_actives=True))
|
||||
default_graphics = app_settings.QEMU_CONSOLE_DEFAULT_TYPE
|
||||
default_cdrom = app_settings.INSTANCE_CDROM_ADD
|
||||
input_device_buses = ['default', 'virtio', 'usb']
|
||||
input_device_buses = ["default", "virtio", "usb"]
|
||||
default_input_device_bus = app_settings.INSTANCE_INPUT_DEFAULT_DEVICE
|
||||
|
||||
dom_caps = conn.get_dom_capabilities(arch, machine)
|
||||
|
@ -1437,13 +1658,21 @@ def create_instance(request, compute_id, arch, machine):
|
|||
meta_prealloc = True
|
||||
if instances:
|
||||
if data["name"] in instances:
|
||||
raise libvirtError(_("A virtual machine with this name already exists"))
|
||||
raise libvirtError(
|
||||
_("A virtual machine with this name already exists")
|
||||
)
|
||||
if Instance.objects.filter(name__exact=data["name"]):
|
||||
raise libvirtError(_("There is an instance with same name. Remove it and try again!"))
|
||||
raise libvirtError(
|
||||
_(
|
||||
"There is an instance with same name. Remove it and try again!"
|
||||
)
|
||||
)
|
||||
|
||||
if data["hdd_size"]:
|
||||
if not data["mac"]:
|
||||
raise libvirtError(_("No Virtual Machine MAC has been entered"))
|
||||
raise libvirtError(
|
||||
_("No Virtual Machine MAC has been entered")
|
||||
)
|
||||
else:
|
||||
path = conn.create_volume(
|
||||
data["storage"],
|
||||
|
@ -1471,10 +1700,14 @@ def create_instance(request, compute_id, arch, machine):
|
|||
|
||||
elif data["template"]:
|
||||
templ_path = conn.get_volume_path(data["template"])
|
||||
dest_vol = conn.get_volume_path(data["name"] + ".img", data["storage"])
|
||||
dest_vol = conn.get_volume_path(
|
||||
data["name"] + ".img", data["storage"]
|
||||
)
|
||||
if dest_vol:
|
||||
raise libvirtError(
|
||||
_("Image has already exist. Please check volumes or change instance name")
|
||||
_(
|
||||
"Image has already exist. Please check volumes or change instance name"
|
||||
)
|
||||
)
|
||||
else:
|
||||
clone_path = conn.clone_from_template(
|
||||
|
@ -1501,15 +1734,21 @@ def create_instance(request, compute_id, arch, machine):
|
|||
is_disk_created = True
|
||||
else:
|
||||
if not data["images"]:
|
||||
raise libvirtError(_("First you need to create or select an image"))
|
||||
raise libvirtError(
|
||||
_("First you need to create or select an image")
|
||||
)
|
||||
else:
|
||||
for idx, vol in enumerate(data["images"].split(",")):
|
||||
path = conn.get_volume_path(vol)
|
||||
volume = dict()
|
||||
volume["path"] = path
|
||||
volume["type"] = conn.get_volume_format_type(path)
|
||||
volume["device"] = request.POST.get("device" + str(idx), "")
|
||||
volume["bus"] = request.POST.get("bus" + str(idx), "")
|
||||
volume["device"] = request.POST.get(
|
||||
"device" + str(idx), ""
|
||||
)
|
||||
volume["bus"] = request.POST.get(
|
||||
"bus" + str(idx), ""
|
||||
)
|
||||
if volume["bus"] == "scsi":
|
||||
volume["scsi_model"] = default_scsi_disk_model
|
||||
volume["cache_mode"] = data["cache_mode"]
|
||||
|
@ -1560,12 +1799,21 @@ def create_instance(request, compute_id, arch, machine):
|
|||
add_cdrom=data["add_cdrom"],
|
||||
add_input=data["add_input"],
|
||||
)
|
||||
create_instance = Instance(compute_id=compute_id, name=data["name"], uuid=uuid)
|
||||
create_instance = Instance(
|
||||
compute_id=compute_id, name=data["name"], uuid=uuid
|
||||
)
|
||||
create_instance.save()
|
||||
msg = _("Instance is created")
|
||||
messages.success(request, msg)
|
||||
addlogmsg(request.user.username, create_instance.compute.name, create_instance.name, msg)
|
||||
return redirect(reverse("instances:instance", args=[create_instance.id]))
|
||||
addlogmsg(
|
||||
request.user.username,
|
||||
create_instance.compute.name,
|
||||
create_instance.name,
|
||||
msg,
|
||||
)
|
||||
return redirect(
|
||||
reverse("instances:instance", args=[create_instance.id])
|
||||
)
|
||||
except libvirtError as lib_err:
|
||||
if data["hdd_size"] or len(volume_list) > 0:
|
||||
if is_disk_created:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue