diff --git a/agent/conf/agent.properties b/agent/conf/agent.properties index 0dc5b8211e0d..7a74c908135d 100644 --- a/agent/conf/agent.properties +++ b/agent/conf/agent.properties @@ -78,6 +78,14 @@ zone=default # Generated with "uuidgen". local.storage.uuid= +# Enable TLS for image server transfers. The keys are read from: +# cert file = /etc/cloudstack/agent/cloud.crt +# key file = /etc/cloudstack/agent/cloud.key +image.server.tls.enabled=true + +# The Address for the network interface that the image server listens on. If not specified, it will listen on the Management network. +#image.server.listen.address= + # Location for KVM virtual router scripts. # The path defined in this property is relative to the directory "/usr/share/cloudstack-common/". domr.scripts.dir=scripts/network/domr/kvm diff --git a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java index 3364f9708cf5..ec60b5416055 100644 --- a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java +++ b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java @@ -123,6 +123,20 @@ public class AgentProperties{ */ public static final Property LOCAL_STORAGE_PATH = new Property<>("local.storage.path", "/var/lib/libvirt/images/"); + /** + * Enables TLS on the KVM image server transfer endpoint.
+ * Data type: Boolean.
+ * Default value: true + */ + public static final Property IMAGE_SERVER_TLS_ENABLED = new Property<>("image.server.tls.enabled", true); + + /** + * The IP address that the KVM image server listens on.
+ * Data type: String.
+ * Default value: null + */ + public static final Property IMAGE_SERVER_LISTEN_ADDRESS = new Property<>("image.server.listen.address", null, String.class); + /** * Directory where Qemu sockets are placed.
* These sockets are for the Qemu Guest Agent and SSVM provisioning.
diff --git a/api/src/main/java/com/cloud/storage/VolumeApiService.java b/api/src/main/java/com/cloud/storage/VolumeApiService.java index 1a9bcc6ee98b..b74f230d2fba 100644 --- a/api/src/main/java/com/cloud/storage/VolumeApiService.java +++ b/api/src/main/java/com/cloud/storage/VolumeApiService.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.Map; +import com.cloud.dc.DataCenter; import com.cloud.exception.ResourceAllocationException; import com.cloud.offering.DiskOffering; import com.cloud.user.Account; @@ -70,6 +71,10 @@ public interface VolumeApiService { */ Volume allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationException; + Volume allocVolume(long ownerId, Long zoneId, Long diskOfferingId, Long vmId, Long snapshotId, String name, + Long cmdSize, Boolean displayVolume, Long cmdMinIops, Long cmdMaxIops, String customId) + throws ResourceAllocationException; + /** * Creates the volume based on the given criteria * @@ -80,6 +85,8 @@ public interface VolumeApiService { */ Volume createVolume(CreateVolumeCmd cmd); + Volume createVolume(long volumeId, Long vmId, Long snapshotId, Long storageId, Boolean display); + /** * Resizes the volume based on the given criteria * @@ -203,4 +210,6 @@ Volume updateVolume(long volumeId, String path, String state, Long storageId, Pair checkAndRepairVolume(CheckAndRepairVolumeCmd cmd) throws ResourceAllocationException; Long getVolumePhysicalSize(Storage.ImageFormat format, String path, String chainInfo); + + Long getCustomDiskOfferingIdForVolumeUpload(Account owner, DataCenter zone); } diff --git a/api/src/main/java/com/cloud/user/AccountService.java b/api/src/main/java/com/cloud/user/AccountService.java index 4145e2b89eb3..f0640abf8793 100644 --- a/api/src/main/java/com/cloud/user/AccountService.java +++ b/api/src/main/java/com/cloud/user/AccountService.java @@ -88,10 +88,14 @@ User createUser(String userName, String password, String firstName, String lastN Account getActiveAccountById(long accountId); + Account getActiveAccountByUuid(String accountUuid); + Account getAccount(long accountId); User getActiveUser(long userId); + User getOneActiveUserForAccount(Account account); + User getUserIncludingRemoved(long userId); boolean isRootAdmin(Long accountId); diff --git a/api/src/main/java/com/cloud/vm/VmDetailConstants.java b/api/src/main/java/com/cloud/vm/VmDetailConstants.java index 9e56bf4f17b2..33cc6da70812 100644 --- a/api/src/main/java/com/cloud/vm/VmDetailConstants.java +++ b/api/src/main/java/com/cloud/vm/VmDetailConstants.java @@ -130,4 +130,10 @@ public interface VmDetailConstants { String EXTERNAL_DETAIL_PREFIX = "External:"; String CLOUDSTACK_VM_DETAILS = "cloudstack.vm.details"; String CLOUDSTACK_VLAN = "cloudstack.vlan"; + + // KVM Checkpoints related + String ACTIVE_CHECKPOINT_ID = "active.checkpoint.id"; + String ACTIVE_CHECKPOINT_CREATE_TIME = "active.checkpoint.create.time"; + String LAST_CHECKPOINT_ID = "last.checkpoint.id"; + String LAST_CHECKPOINT_CREATE_TIME = "last.checkpoint.create.time"; } diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 05c6098bc726..aede52ed5c9a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -77,6 +77,7 @@ public class ApiConstants { public static final String BOOTABLE = "bootable"; public static final String BIND_DN = "binddn"; public static final String BIND_PASSWORD = "bindpass"; + public static final String BLANK_INSTANCE = "blankinstance"; public static final String BUS_ADDRESS = "busaddress"; public static final String BYTES_READ_RATE = "bytesreadrate"; public static final String BYTES_READ_RATE_MAX = "bytesreadratemax"; @@ -216,6 +217,7 @@ public class ApiConstants { public static final String DOMAIN_PATH = "domainpath"; public static final String DOMAIN_ID = "domainid"; public static final String DOMAIN__ID = "domainId"; + public static final String DUMMY = "dummy"; public static final String DURATION = "duration"; public static final String ELIGIBLE = "eligible"; public static final String EMAIL = "email"; @@ -331,6 +333,7 @@ public class ApiConstants { public static final String IS_2FA_VERIFIED = "is2faverified"; public static final String IS_2FA_MANDATED = "is2famandated"; + public static final String IS_ACTIVE = "isactive"; public static final String IS_ASYNC = "isasync"; public static final String IP_AVAILABLE = "ipavailable"; public static final String IP_LIMIT = "iplimit"; diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiServerService.java b/api/src/main/java/org/apache/cloudstack/api/ApiServerService.java index 18c96c371591..1ee41ac86c22 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiServerService.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiServerService.java @@ -21,8 +21,11 @@ import javax.servlet.http.HttpSession; +import org.apache.cloudstack.context.CallContext; + import com.cloud.domain.Domain; import com.cloud.exception.CloudAuthenticationException; +import com.cloud.user.Account; import com.cloud.user.UserAccount; public interface ApiServerService { @@ -52,4 +55,20 @@ public ResponseObject loginUser(HttpSession session, String username, String pas String getDomainId(Map params); boolean isPostRequestsAndTimestampsEnforced(); + + AsyncCmdResult processAsyncCmd(BaseAsyncCmd cmdObj, Map params, CallContext ctx, Long callerUserId, Account caller) throws Exception; + + class AsyncCmdResult { + public final Long objectId; + public final String objectUuid; + public final BaseAsyncCmd asyncCmd; + public final long jobId; + + public AsyncCmdResult(Long objectId, String objectUuid, BaseAsyncCmd asyncCmd, long jobId) { + this.objectId = objectId; + this.objectUuid = objectUuid; + this.asyncCmd = asyncCmd; + this.jobId = jobId; + } + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/CreateImageTransferCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/CreateImageTransferCmd.java new file mode 100644 index 000000000000..eeb63b985d5c --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/CreateImageTransferCmd.java @@ -0,0 +1,98 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.api.command.admin.backup; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.command.admin.AdminCmd; +import org.apache.cloudstack.api.response.BackupResponse; +import org.apache.cloudstack.api.response.ImageTransferResponse; +import org.apache.cloudstack.api.response.VolumeResponse; +import org.apache.cloudstack.backup.ImageTransfer; +import org.apache.cloudstack.backup.KVMBackupExportService; +import org.apache.cloudstack.context.CallContext; + +import com.cloud.utils.EnumUtils; + +@APICommand(name = "createImageTransfer", + description = "Create image transfer for a disk in backup. This API is intended for testing only and is disabled by default.", + responseObject = ImageTransferResponse.class, + since = "4.23.0", + authorized = {RoleType.Admin}) +public class CreateImageTransferCmd extends BaseCmd implements AdminCmd { + + @Inject + private KVMBackupExportService kvmBackupExportService; + + @Parameter(name = ApiConstants.BACKUP_ID, + type = CommandType.UUID, + entityType = BackupResponse.class, + description = "ID of the backup") + private Long backupId; + + @Parameter(name = ApiConstants.VOLUME_ID, + type = CommandType.UUID, + entityType = VolumeResponse.class, + required = true, + description = "ID of the disk/volume") + private Long volumeId; + + @Parameter(name = ApiConstants.DIRECTION, + type = CommandType.STRING, + required = true, + description = "Direction of the transfer: upload, download") + private String direction; + + @Parameter(name = ApiConstants.FORMAT, + type = CommandType.STRING, + description = "Format of the image: cow/raw. Currently only raw is supported for download. Defaults to raw if not provided") + private String format; + + public Long getBackupId() { + return backupId; + } + + public Long getVolumeId() { + return volumeId; + } + + public ImageTransfer.Direction getDirection() { + return ImageTransfer.Direction.valueOf(direction); + } + + public ImageTransfer.Format getFormat() { + return EnumUtils.getEnum(ImageTransfer.Format.class, format); + } + + @Override + public void execute() { + ImageTransferResponse response = kvmBackupExportService.createImageTransfer(this); + response.setResponseName(getCommandName()); + setResponseObject(response); + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/DeleteVmCheckpointCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/DeleteVmCheckpointCmd.java new file mode 100644 index 000000000000..d0e17e86d427 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/DeleteVmCheckpointCmd.java @@ -0,0 +1,85 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.api.command.admin.backup; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.command.admin.AdminCmd; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.api.response.UserVmResponse; +import org.apache.cloudstack.backup.KVMBackupExportService; +import org.apache.cloudstack.context.CallContext; + +@APICommand(name = "deleteVirtualMachineCheckpoint", + description = "Delete a VM checkpoint. This API is intended for testing only and is disabled by default.", + responseObject = SuccessResponse.class, + since = "4.23.0", + authorized = {RoleType.Admin}) +public class DeleteVmCheckpointCmd extends BaseCmd implements AdminCmd { + + @Inject + private KVMBackupExportService kvmBackupExportService; + + @Parameter(name = ApiConstants.VIRTUAL_MACHINE_ID, + type = CommandType.UUID, + entityType = UserVmResponse.class, + required = true, + description = "ID of the VM") + private Long vmId; + + @Parameter(name = "checkpointid", + type = CommandType.STRING, + required = true, + description = "Checkpoint ID") + private String checkpointId; + + public Long getVmId() { + return vmId; + } + + public String getCheckpointId() { + return checkpointId; + } + + public void setVmId(Long vmId) { + this.vmId = vmId; + } + + public void setCheckpointId(String checkpointId) { + this.checkpointId = checkpointId; + } + + @Override + public void execute() { + boolean result = kvmBackupExportService.deleteVmCheckpoint(this); + SuccessResponse response = new SuccessResponse(getCommandName()); + response.setSuccess(result); + response.setResponseName(getCommandName()); + setResponseObject(response); + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/FinalizeBackupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/FinalizeBackupCmd.java new file mode 100644 index 000000000000..45173f8668ee --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/FinalizeBackupCmd.java @@ -0,0 +1,103 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.api.command.admin.backup; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.admin.AdminCmd; +import org.apache.cloudstack.api.response.BackupResponse; +import org.apache.cloudstack.api.response.UserVmResponse; +import org.apache.cloudstack.backup.Backup; +import org.apache.cloudstack.backup.BackupManager; +import org.apache.cloudstack.backup.KVMBackupExportService; +import org.apache.cloudstack.context.CallContext; + +import com.cloud.event.EventTypes; + +@APICommand(name = "finalizeBackup", + description = "Finalize a VM backup session. This API is intended for testing only and is disabled by default.", + responseObject = BackupResponse.class, + since = "4.23.0", + authorized = {RoleType.Admin}) +public class FinalizeBackupCmd extends BaseAsyncCmd implements AdminCmd { + + @Inject + private KVMBackupExportService kvmBackupExportService; + + @Inject + private BackupManager backupManager; + + @Parameter(name = ApiConstants.VIRTUAL_MACHINE_ID, + type = CommandType.UUID, + entityType = UserVmResponse.class, + required = true, + description = "ID of the VM") + private Long vmId; + + @Parameter(name = ApiConstants.ID, + type = CommandType.UUID, + entityType = BackupResponse.class, + required = true, + description = "ID of the backup") + private Long backupId; + + public Long getVmId() { + return vmId; + } + + public Long getBackupId() { + return backupId; + } + + @Override + public void execute() { + Backup backup = kvmBackupExportService.finalizeBackup(this); + + if (backup == null) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create Backup"); + } + + BackupResponse response = backupManager.createBackupResponse(backup, null); + + response.setResponseName(getCommandName()); + setResponseObject(response); + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } + + + @Override + public String getEventType() { + return EventTypes.EVENT_VM_BACKUP_CREATE; + } + + @Override + public String getEventDescription() { + return "Finalizing backup " + backupId; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/FinalizeImageTransferCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/FinalizeImageTransferCmd.java new file mode 100644 index 000000000000..d483f78b4228 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/FinalizeImageTransferCmd.java @@ -0,0 +1,67 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.api.command.admin.backup; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.command.admin.AdminCmd; +import org.apache.cloudstack.api.response.ImageTransferResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.backup.KVMBackupExportService; +import org.apache.cloudstack.context.CallContext; + +@APICommand(name = "finalizeImageTransfer", + description = "Finalize an image transfe. This API is intended for testing only and is disabled by default.r", + responseObject = SuccessResponse.class, + since = "4.23.0", + authorized = {RoleType.Admin}) +public class FinalizeImageTransferCmd extends BaseCmd implements AdminCmd { + + @Inject + private KVMBackupExportService kvmBackupExportService; + + @Parameter(name = ApiConstants.ID, + type = CommandType.UUID, + entityType = ImageTransferResponse.class, + required = true, + description = "ID of the image transfer") + private Long imageTransferId; + + public Long getImageTransferId() { + return imageTransferId; + } + + @Override + public void execute() { + boolean result = kvmBackupExportService.finalizeImageTransfer(this); + SuccessResponse response = new SuccessResponse(getCommandName()); + response.setSuccess(result); + response.setResponseName(getCommandName()); + setResponseObject(response); + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListImageTransfersCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListImageTransfersCmd.java new file mode 100644 index 000000000000..2565ef241a6b --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListImageTransfersCmd.java @@ -0,0 +1,79 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.api.command.admin.backup; + +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.command.admin.AdminCmd; +import org.apache.cloudstack.api.response.BackupResponse; +import org.apache.cloudstack.api.response.ImageTransferResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.backup.KVMBackupExportService; +import org.apache.cloudstack.context.CallContext; + +@APICommand(name = "listImageTransfers", + description = "List image transfers for a backup. This API is intended for testing only and is disabled by default.", + responseObject = ImageTransferResponse.class, + since = "4.23.0", + authorized = {RoleType.Admin}) +public class ListImageTransfersCmd extends BaseListCmd implements AdminCmd { + + @Inject + private KVMBackupExportService kvmBackupExportService; + + @Parameter(name = ApiConstants.ID, + type = CommandType.UUID, + entityType = ImageTransferResponse.class, + description = "ID of the Image Transfer") + private Long id; + + @Parameter(name = ApiConstants.BACKUP_ID, + type = CommandType.UUID, + entityType = BackupResponse.class, + description = "ID of the backup") + private Long backupId; + + public Long getId() { + return id; + } + + public Long getBackupId() { + return backupId; + } + + @Override + public void execute() { + List responses = kvmBackupExportService.listImageTransfers(this); + ListResponse response = new ListResponse<>(); + response.setResponses(responses); + response.setResponseName(getCommandName()); + setResponseObject(response); + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListVmCheckpointsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListVmCheckpointsCmd.java new file mode 100644 index 000000000000..a61661e982de --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListVmCheckpointsCmd.java @@ -0,0 +1,69 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.api.command.admin.backup; + +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.command.admin.AdminCmd; +import org.apache.cloudstack.api.response.CheckpointResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.UserVmResponse; +import org.apache.cloudstack.backup.KVMBackupExportService; + +@APICommand(name = "listVirtualMachineCheckpoints", + description = "List checkpoints for a VM. This API is intended for testing only and is disabled by default.", + responseObject = CheckpointResponse.class, + since = "4.23.0", + authorized = {RoleType.Admin}) +public class ListVmCheckpointsCmd extends BaseListCmd implements AdminCmd { + + @Inject + private KVMBackupExportService kvmBackupExportService; + + @Parameter(name = ApiConstants.VIRTUAL_MACHINE_ID, + type = CommandType.UUID, + entityType = UserVmResponse.class, + required = true, + description = "ID of the VM") + private Long vmId; + + public Long getVmId() { + return vmId; + } + + @Override + public void execute() { + List responses = kvmBackupExportService.listVmCheckpoints(this); + ListResponse response = new ListResponse<>(); + response.setResponses(responses); + response.setResponseName(getCommandName()); + setResponseObject(response); + } + + @Override + public long getEntityOwnerId() { + return 0; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/StartBackupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/StartBackupCmd.java new file mode 100644 index 000000000000..a5c4773c0fc4 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/StartBackupCmd.java @@ -0,0 +1,120 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.api.command.admin.backup; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCreateCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.admin.AdminCmd; +import org.apache.cloudstack.api.response.BackupResponse; +import org.apache.cloudstack.api.response.UserVmResponse; +import org.apache.cloudstack.backup.Backup; +import org.apache.cloudstack.backup.BackupManager; +import org.apache.cloudstack.backup.KVMBackupExportService; +import org.apache.cloudstack.context.CallContext; + +import com.cloud.event.EventTypes; + +@APICommand(name = "startBackup", + description = "Start a VM backup session. This API is intended for testing only and is disabled by default.", + responseObject = BackupResponse.class, + since = "4.23.0", + authorized = {RoleType.Admin}) + public class StartBackupCmd extends BaseAsyncCreateCmd implements AdminCmd { + + @Inject + private KVMBackupExportService kvmBackupExportService; + + @Inject + private BackupManager backupManager; + + @Parameter(name = ApiConstants.VIRTUAL_MACHINE_ID, + type = CommandType.UUID, + entityType = UserVmResponse.class, + required = true, + description = "ID of the VM") + private Long vmId; + + @Parameter(name = ApiConstants.NAME, + type = CommandType.STRING, + description = "the name of the backup") + private String name; + + @Parameter(name = ApiConstants.DESCRIPTION, + type = CommandType.STRING, + description = "the description for the backup") + private String description; + + public Long getVmId() { + return vmId; + } + + public String getName() { + return name; + } + + public String getDescription() { + return description; + } + + @Override + public void execute() { + try { + Backup backup = kvmBackupExportService.startBackup(this); + BackupResponse response = backupManager.createBackupResponse(backup, null); + + response.setResponseName(getCommandName()); + setResponseObject(response); + } catch (Exception e) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); + } + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } + + @Override + public void create() { + Backup backup = kvmBackupExportService.createBackup(this); + + if (backup != null) { + setEntityId(backup.getId()); + setEntityUuid(backup.getUuid()); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create Backup"); + } + } + + @Override + public String getEventType() { + return EventTypes.EVENT_VM_BACKUP_CREATE; + } + + @Override + public String getEventDescription() { + return "Starting backup for Instance " + vmId; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/AssignVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/AssignVMCmd.java index e11d20d06466..0e5d598505fe 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/AssignVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/AssignVMCmd.java @@ -85,6 +85,8 @@ public class AssignVMCmd extends BaseCmd { "In case no security groups are provided the Instance is part of the default security group.") private List securityGroupIdList; + private boolean skipNetwork = false; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -113,6 +115,34 @@ public List getSecurityGroupIdList() { return securityGroupIdList; } + public boolean isSkipNetwork() { + return skipNetwork; + } + + ///////////////////////////////////////////////////// + /////////////////// Setters ///////////////////////// + ///////////////////////////////////////////////////// + + public void setVirtualMachineId(Long virtualMachineId) { + this.virtualMachineId = virtualMachineId; + } + + public void setAccountName(String accountName) { + this.accountName = accountName; + } + + public void setDomainId(Long domainId) { + this.domainId = domainId; + } + + public void setProjectId(Long projectId) { + this.projectId = projectId; + } + + public void setSkipNetwork(boolean skipNetwork) { + this.skipNetwork = skipNetwork; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/DeployVMCmdByAdmin.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/DeployVMCmdByAdmin.java index e64c8b3f46c6..5760bd25a366 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/DeployVMCmdByAdmin.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/DeployVMCmdByAdmin.java @@ -48,4 +48,12 @@ public Long getPodId() { public Long getClusterId() { return clusterId; } + + ///////////////////////////////////////////////////// + ////////////////// Setters ////////////////////////// + ///////////////////////////////////////////////////// + + public void setClusterId(Long clusterId) { + this.clusterId = clusterId; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/offering/ListServiceOfferingsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/offering/ListServiceOfferingsCmd.java index 5c5c8776bce3..164a97891bc8 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/offering/ListServiceOfferingsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/offering/ListServiceOfferingsCmd.java @@ -193,6 +193,18 @@ public Boolean getGpuEnabled() { return gpuEnabled; } + public void setZoneId(Long zoneId) { + this.zoneId = zoneId; + } + + public void setCpuNumber(Integer cpuNumber) { + this.cpuNumber = cpuNumber; + } + + public void setMemory(Integer memory) { + this.memory = memory; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/AddNicToVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/AddNicToVMCmd.java index 6347c38811e8..f6ef955956f6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/AddNicToVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/AddNicToVMCmd.java @@ -100,6 +100,26 @@ public String getMacAddress() { return NetUtils.standardizeMacAddress(macaddr); } + public void setVmId(Long vmId) { + this.vmId = vmId; + } + + public void setNetworkId(Long netId) { + this.netId = netId; + } + + public void setIpaddr(String ipaddr) { + this.ipaddr = ipaddr; + } + + public void setMacAddress(String macaddr) { + this.macaddr = macaddr; + } + + public void setDhcpOptions(Map dhcpOptions) { + this.dhcpOptions = dhcpOptions; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/BaseDeployVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/BaseDeployVMCmd.java index 8c29d7338b85..0fffefaee3fc 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/BaseDeployVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/BaseDeployVMCmd.java @@ -61,10 +61,10 @@ import com.cloud.network.Network.IpAddresses; import com.cloud.offering.DiskOffering; import com.cloud.template.VirtualMachineTemplate; +import com.cloud.utils.net.Dhcp; import com.cloud.utils.net.NetUtils; import com.cloud.vm.VmDetailConstants; import com.cloud.vm.VmDiskInfo; -import com.cloud.utils.net.Dhcp; public abstract class BaseDeployVMCmd extends BaseAsyncCreateCustomIdCmd implements SecurityGroupAction, UserCmd { @@ -75,13 +75,13 @@ public abstract class BaseDeployVMCmd extends BaseAsyncCreateCustomIdCmd impleme ///////////////////////////////////////////////////// @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, required = true, description = "availability zone for the virtual machine") - private Long zoneId; + protected Long zoneId; @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "host name for the virtual machine", validations = {ApiArgValidator.RFCComplianceDomainName}) - private String name; + protected String name; @Parameter(name = ApiConstants.DISPLAY_NAME, type = CommandType.STRING, description = "an optional user generated name for the virtual machine") - private String displayName; + protected String displayName; @Parameter(name=ApiConstants.PASSWORD, type=CommandType.STRING, description="The password of the virtual machine. If null, a random password will be generated for the VM.", since="4.19.0.0") @@ -89,21 +89,21 @@ public abstract class BaseDeployVMCmd extends BaseAsyncCreateCustomIdCmd impleme //Owner information @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "an optional account for the virtual machine. Must be used with domainId.") - private String accountName; + protected String accountName; @Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, entityType = DomainResponse.class, description = "an optional domainId for the virtual machine. If the account parameter is used, domainId must also be used. If account is NOT provided then virtual machine will be assigned to the caller account and domain.") - private Long domainId; + protected Long domainId; //Network information //@ACL(accessType = AccessType.UseEntry) @Parameter(name = ApiConstants.NETWORK_IDS, type = CommandType.LIST, collectionType = CommandType.UUID, entityType = NetworkResponse.class, description = "list of network ids used by virtual machine. Can't be specified with ipToNetworkList parameter") - private List networkIds; + protected List networkIds; @Parameter(name = ApiConstants.BOOT_TYPE, type = CommandType.STRING, required = false, description = "Guest VM Boot option either custom[UEFI] or default boot [BIOS]. Not applicable with VMware if the template is marked as deploy-as-is, as we honour what is defined in the template.", since = "4.14.0.0") - private String bootType; + protected String bootType; @Parameter(name = ApiConstants.BOOT_MODE, type = CommandType.STRING, required = false, description = "Boot Mode [Legacy] or [Secure] Applicable when Boot Type Selected is UEFI, otherwise Legacy only for BIOS. Not applicable with VMware if the template is marked as deploy-as-is, as we honour what is defined in the template.", since = "4.14.0.0") - private String bootMode; + protected String bootMode; @Parameter(name = ApiConstants.BOOT_INTO_SETUP, type = CommandType.BOOLEAN, required = false, description = "Boot into hardware setup or not (ignored if startVm = false, only valid for vmware)", since = "4.15.0.0") private Boolean bootIntoSetup; @@ -138,7 +138,7 @@ public abstract class BaseDeployVMCmd extends BaseAsyncCreateCustomIdCmd impleme @Parameter(name = ApiConstants.HYPERVISOR, type = CommandType.STRING, description = "the hypervisor on which to deploy the virtual machine. " + "The parameter is required and respected only when hypervisor info is not set on the ISO/Template passed to the call") - private String hypervisor; + protected String hypervisor; @Parameter(name = ApiConstants.USER_DATA, type = CommandType.STRING, description = "an optional binary data that can be sent to the virtual machine upon a successful deployment. " + @@ -147,10 +147,10 @@ public abstract class BaseDeployVMCmd extends BaseAsyncCreateCustomIdCmd impleme "Using HTTP POST (via POST body), you can send up to 1MB of data after base64 encoding. " + "You also need to change vm.userdata.max.length value", length = 1048576) - private String userData; + protected String userData; @Parameter(name = ApiConstants.USER_DATA_ID, type = CommandType.UUID, entityType = UserDataResponse.class, description = "the ID of the Userdata", since = "4.18") - private Long userdataId; + protected Long userdataId; @Parameter(name = ApiConstants.USER_DATA_DETAILS, type = CommandType.MAP, description = "used to specify the parameters values for the variables in userdata.", since = "4.18") private Map userdataDetails; @@ -189,10 +189,10 @@ public abstract class BaseDeployVMCmd extends BaseAsyncCreateCustomIdCmd impleme private String macAddress; @Parameter(name = ApiConstants.KEYBOARD, type = CommandType.STRING, description = "an optional keyboard device type for the virtual machine. valid value can be one of de,de-ch,es,es-latam,fi,fr,fr-be,fr-ch,is,it,jp,nl-be,no,pt,uk,us") - private String keyboard; + protected String keyboard; @Parameter(name = ApiConstants.PROJECT_ID, type = CommandType.UUID, entityType = ProjectResponse.class, description = "Deploy vm for the project") - private Long projectId; + protected Long projectId; @Parameter(name = ApiConstants.START_VM, type = CommandType.BOOLEAN, description = "true if start vm after creating; defaulted to true if not specified") private Boolean startVm; @@ -200,7 +200,7 @@ public abstract class BaseDeployVMCmd extends BaseAsyncCreateCustomIdCmd impleme @ACL @Parameter(name = ApiConstants.AFFINITY_GROUP_IDS, type = CommandType.LIST, collectionType = CommandType.UUID, entityType = AffinityGroupResponse.class, description = "comma separated list of affinity groups id that are going to be applied to the virtual machine." + " Mutually exclusive with affinitygroupnames parameter") - private List affinityGroupIdList; + protected List affinityGroupIdList; @ACL @Parameter(name = ApiConstants.AFFINITY_GROUP_NAMES, type = CommandType.LIST, collectionType = CommandType.STRING, entityType = AffinityGroupResponse.class, description = "comma separated list of affinity groups names that are going to be applied to the virtual machine." @@ -208,10 +208,10 @@ public abstract class BaseDeployVMCmd extends BaseAsyncCreateCustomIdCmd impleme private List affinityGroupNameList; @Parameter(name = ApiConstants.DISPLAY_VM, type = CommandType.BOOLEAN, since = "4.2", description = "an optional field, whether to the display the vm to the end user or not.", authorized = {RoleType.Admin}) - private Boolean displayVm; + protected Boolean displayVm; @Parameter(name = ApiConstants.DETAILS, type = CommandType.MAP, since = "4.3", description = "used to specify the custom parameters. 'extraconfig' is not allowed to be passed in details") - private Map details; + protected Map details; @Parameter(name = ApiConstants.DEPLOYMENT_PLANNER, type = CommandType.STRING, description = "Deployment planner to use for vm allocation. Available to ROOT admin only", since = "4.4", authorized = { RoleType.Admin }) private String deploymentPlanner; @@ -225,7 +225,7 @@ public abstract class BaseDeployVMCmd extends BaseAsyncCreateCustomIdCmd impleme private Map dataDiskTemplateToDiskOfferingList; @Parameter(name = ApiConstants.EXTRA_CONFIG, type = CommandType.STRING, since = "4.12", description = "an optional URL encoded string that can be passed to the virtual machine upon successful deployment", length = 5120) - private String extraConfig; + protected String extraConfig; @Parameter(name = ApiConstants.COPY_IMAGE_TAGS, type = CommandType.BOOLEAN, since = "4.13", description = "if true the image tags (if any) will be copied to the VM, default value is false") private Boolean copyImageTags; @@ -798,6 +798,7 @@ public IoDriverPolicy getIoDriverPolicy() { } return null; } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java index 050592b97a3b..13baf0fe4ccc 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java @@ -16,10 +16,11 @@ // under the License. package org.apache.cloudstack.api.command.user.vm; +import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.stream.Stream; -import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.api.ACL; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -40,6 +41,7 @@ import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.uservm.UserVm; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine; @APICommand(name = "deployVirtualMachine", description = "Creates and automatically starts an Instance based on a service offering, disk offering, and Template.", responseObject = UserVmResponse.class, responseView = ResponseView.Restricted, entityType = {VirtualMachine.class}, @@ -64,6 +66,10 @@ public class DeployVMCmd extends BaseDeployVMCmd { @Parameter(name = ApiConstants.SNAPSHOT_ID, type = CommandType.UUID, entityType = SnapshotResponse.class, since = "4.21") private Long snapshotId; + @Parameter(name = "blank", type = CommandType.BOOLEAN, since = "4.22.1") + private Boolean blankInstance; + + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -88,6 +94,107 @@ public boolean isVolumeOrSnapshotProvided() { return volumeId != null || snapshotId != null; } + public boolean isBlankInstance() { + return Boolean.TRUE.equals(blankInstance); + } + + + + ///////////////////////////////////////////////////// + ////////////////// Setters ////////////////////////// + ///////////////////////////////////////////////////// + public void setZoneId(Long zoneId) { + this.zoneId = zoneId; + } + + public void setName(String name) { + this.name = name; + } + + public void setDisplayName(String displayName) { + this.displayName = displayName; + } + + public void setAccountName(String accountName) { + this.accountName = accountName; + } + + public void setDomainId(Long domainId) { + this.domainId = domainId; + } + + public void setNetworkIds(List networkIds) { + this.networkIds = networkIds; + } + + public void setBootType(String bootType) { + this.bootType = bootType; + } + + public void setBootMode(String bootMode) { + this.bootMode = bootMode; + } + + public void setHypervisor(String hypervisor) { + this.hypervisor = hypervisor; + } + + public void setUserData(String userData) { + this.userData = userData; + } + + public void setKeyboard(String keyboard) { + this.keyboard = keyboard; + } + + public void setProjectId(Long projectId) { + this.projectId = projectId; + } + + public void setDisplayVm(Boolean displayVm) { + this.displayVm = displayVm; + } + + public void setUserDataId(Long userDataId) { + this.userdataId = userDataId; + } + + public void setAffinityGroupIds(List ids) { + this.affinityGroupIdList = ids; + } + + public void setDetails(Map details) { + this.details = details; + } + + public void setExtraConfig(String extraConfig) { + this.extraConfig = extraConfig; + } + + public void setDynamicScalingEnabled(Boolean dynamicScalingEnabled) { + this.dynamicScalingEnabled = dynamicScalingEnabled; + } + + public void setServiceOfferingId(Long serviceOfferingId) { + this.serviceOfferingId = serviceOfferingId; + } + + public void setTemplateId(Long templateId) { + this.templateId = templateId; + } + + public void setVolumeId(Long volumeId) { + this.volumeId = volumeId; + } + + public void setSnapshotId(Long snapshotId) { + this.snapshotId = snapshotId; + } + + public void setBlankInstance(boolean blankInstance) { + this.blankInstance = blankInstance; + } + @Override public void execute() { UserVm result; @@ -132,7 +239,7 @@ public void execute() { @Override public void create() throws ResourceAllocationException { - if (Stream.of(templateId, snapshotId, volumeId).filter(Objects::nonNull).count() != 1) { + if (!isBlankInstance() && Stream.of(templateId, snapshotId, volumeId).filter(Objects::nonNull).count() != 1) { throw new CloudRuntimeException("Please provide only one of the following parameters - template ID, volume ID or snapshot ID"); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/AssignVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/AssignVolumeCmd.java index f39853512281..f50abaf73c96 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/AssignVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/AssignVolumeCmd.java @@ -70,6 +70,21 @@ public Long getProjectid() { return projectid; } + ///////////////////////////////////////////////////// + /////////////////// Setter/////////////////////////// + ///////////////////////////////////////////////////// + public void setVolumeId(Long volumeId) { + this.volumeId = volumeId; + } + + public void setAccountId(Long accountId) { + this.accountId = accountId; + } + + public void setProjectId(Long projectid) { + this.projectid = projectid; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java index 5bcf3a141178..15926c55e873 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java @@ -32,6 +32,7 @@ import org.apache.cloudstack.api.response.DomainResponse; import org.apache.cloudstack.api.response.ProjectResponse; import org.apache.cloudstack.api.response.SnapshotResponse; +import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; @@ -109,6 +110,12 @@ public class CreateVolumeCmd extends BaseAsyncCreateCustomIdCmd implements UserC description = "The ID of the Instance; to be used with snapshot Id, Instance to which the volume gets attached after creation") private Long virtualMachineId; + @Parameter(name = ApiConstants.STORAGE_ID, + type = CommandType.UUID, + entityType = StoragePoolResponse.class, + description = "Storage pool ID to create the volume in. Exclusive with SnapshotId parameter.") + private Long storageId; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -153,6 +160,13 @@ private Long getProjectId() { return projectId; } + public Long getStorageId() { + if (snapshotId != null && storageId != null) { + throw new IllegalArgumentException("StorageId parameter cannot be specified with the SnapshotId parameter."); + } + return storageId; + } + public Boolean getDisplayVolume() { return displayVolume; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/BackupResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/BackupResponse.java index b855bfe40b8d..f1564843ae36 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/BackupResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/BackupResponse.java @@ -127,6 +127,18 @@ public class BackupResponse extends BaseResponse { @Param(description = "Indicates whether the VM from which the backup was taken is expunged or not", since = "4.22.0") private Boolean isVmExpunged; + @SerializedName("from_checkpoint_id") + @Param(description = "Previous active checkpoint id for incremental backups", since = "4.22.0") + private String fromCheckpointId; + + @SerializedName("to_checkpoint_id") + @Param(description = "Next checkpoint id for incremental backups", since = "4.22.0") + private String toCheckpointId; + + @SerializedName(ApiConstants.HOST_ID) + @Param(description = "Host ID where the backup is running", since = "4.22.0") + private String hostId; + public String getId() { return id; } @@ -314,4 +326,28 @@ public void setVmOfferingRemoved(Boolean vmOfferingRemoved) { public void setVmExpunged(Boolean isVmExpunged) { this.isVmExpunged = isVmExpunged; } + + public void setFromCheckpointId(String fromCheckpointId) { + this.fromCheckpointId = fromCheckpointId; + } + + public String getFromCheckpointId() { + return this.fromCheckpointId; + } + + public void setToCheckpointId(String toCheckpointId) { + this.toCheckpointId = toCheckpointId; + } + + public String getToCheckpointId() { + return this.toCheckpointId; + } + + public void setHostId(String hostId) { + this.hostId = hostId; + } + + public String getHostId() { + return this.hostId; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/CheckpointResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/CheckpointResponse.java new file mode 100644 index 000000000000..2bec7711064f --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/response/CheckpointResponse.java @@ -0,0 +1,53 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.api.response; + +import java.util.Date; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +public class CheckpointResponse extends BaseResponse { + + @SerializedName(ApiConstants.ID) + @Param(description = "the checkpoint ID") + private String id; + + @SerializedName(ApiConstants.CREATED) + @Param(description = "the checkpoint creation time") + private Date created; + + @SerializedName(ApiConstants.IS_ACTIVE) + @Param(description = "whether this is the active checkpoint") + private Boolean isActive; + + public void setId(String id) { + this.id = id; + } + + public void setCreated(Date created) { + this.created = created; + } + + public void setIsActive(Boolean isActive) { + this.isActive = isActive; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ImageTransferResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ImageTransferResponse.java new file mode 100644 index 000000000000..8a24ed3966f2 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/response/ImageTransferResponse.java @@ -0,0 +1,112 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.api.response; + +import java.util.Date; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; +import org.apache.cloudstack.backup.ImageTransfer; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +@EntityReference(value = ImageTransfer.class) +public class ImageTransferResponse extends BaseResponse { + + @SerializedName(ApiConstants.ID) + @Param(description = "the ID of the image transfer") + private String id; + + @SerializedName("backupid") + @Param(description = "the backup ID") + private String backupId; + + @SerializedName("vmid") + @Param(description = "the VM ID") + private String vmId; + + @SerializedName(ApiConstants.VOLUME_ID) + @Param(description = "the disk/volume ID") + private String diskId; + + @SerializedName("devicename") + @Param(description = "the device name (vda, vdb, etc)") + private String deviceName; + + @SerializedName("transferurl") + @Param(description = "the transfer URL") + private String transferUrl; + + @SerializedName("phase") + @Param(description = "the transfer phase") + private String phase; + + @SerializedName("direction") + @Param(description = "the image transfer direction: upload / download") + private String direction; + + @SerializedName("progress") + @Param(description = "progress in percentage for the upload image transfer") + private Integer progress; + + @SerializedName(ApiConstants.CREATED) + @Param(description = "the date created") + private Date created; + + public void setId(String id) { + this.id = id; + } + + public void setBackupId(String backupId) { + this.backupId = backupId; + } + + public void setVmId(String vmId) { + this.vmId = vmId; + } + + public void setDiskId(String diskId) { + this.diskId = diskId; + } + + public void setDeviceName(String deviceName) { + this.deviceName = deviceName; + } + + public void setTransferUrl(String transferUrl) { + this.transferUrl = transferUrl; + } + + public void setPhase(String phase) { + this.phase = phase; + } + + public void setDirection(String direction) { + this.direction = direction; + } + + public void setProgress(Integer progress) { + this.progress = progress; + } + + public void setCreated(Date created) { + this.created = created; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/backup/Backup.java b/api/src/main/java/org/apache/cloudstack/backup/Backup.java index 951af9180e7f..42afc7f196ce 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/Backup.java +++ b/api/src/main/java/org/apache/cloudstack/backup/Backup.java @@ -30,8 +30,16 @@ public interface Backup extends ControlledEntity, InternalIdentity, Identity { + String getFromCheckpointId(); + + String getToCheckpointId(); + + Long getCheckpointCreateTime(); + + Long getHostId(); + enum Status { - Allocated, Queued, BackingUp, BackedUp, Error, Failed, Restoring, Removed, Expunged + Allocated, Queued, BackingUp, ReadyForTransfer, FinalizingTransfer, BackedUp, Error, Failed, Restoring, Removed, Expunged } class Metric { diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java b/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java index 6c0121a3e4d8..f3bd535a6b87 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java @@ -58,7 +58,7 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer ConfigKey BackupProviderPlugin = new ValidatedConfigKey<>("Advanced", String.class, "backup.framework.provider.plugin", "dummy", - "The backup and recovery provider plugin. Valid plugin values: dummy, veeam, networker and nas", + "The backup and recovery provider plugin. Valid plugin values: dummy, veeam, networker, nas", true, ConfigKey.Scope.Zone, BackupFrameworkEnabled.key(), value -> validateBackupProviderConfig((String)value)); ConfigKey BackupSyncPollingInterval = new ConfigKey<>("Advanced", Long.class, diff --git a/api/src/main/java/org/apache/cloudstack/backup/ImageTransfer.java b/api/src/main/java/org/apache/cloudstack/backup/ImageTransfer.java new file mode 100644 index 000000000000..f7fe1e9c2bb2 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/backup/ImageTransfer.java @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.backup; + +import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.api.InternalIdentity; + +public interface ImageTransfer extends ControlledEntity, InternalIdentity { + long getDataCenterId(); + + public enum Direction { + upload, download + } + + public enum Format { + raw, + cow + } + + public enum Backend { + nbd, + file + } + + public enum Phase { + initializing, transferring, finished, failed + } + + String getUuid(); + + Long getBackupId(); + + long getDiskId(); + + long getHostId(); + + String getTransferUrl(); + + Phase getPhase(); + + Direction getDirection(); + + Backend getBackend(); + + String getSignedTicketId(); +} diff --git a/api/src/main/java/org/apache/cloudstack/backup/KVMBackupExportService.java b/api/src/main/java/org/apache/cloudstack/backup/KVMBackupExportService.java new file mode 100644 index 000000000000..51e52c85ec34 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/backup/KVMBackupExportService.java @@ -0,0 +1,105 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.backup; + +import java.util.List; + +import org.apache.cloudstack.api.command.admin.backup.CreateImageTransferCmd; +import org.apache.cloudstack.api.command.admin.backup.DeleteVmCheckpointCmd; +import org.apache.cloudstack.api.command.admin.backup.FinalizeBackupCmd; +import org.apache.cloudstack.api.command.admin.backup.FinalizeImageTransferCmd; +import org.apache.cloudstack.api.command.admin.backup.ListImageTransfersCmd; +import org.apache.cloudstack.api.command.admin.backup.ListVmCheckpointsCmd; +import org.apache.cloudstack.api.command.admin.backup.StartBackupCmd; +import org.apache.cloudstack.api.response.CheckpointResponse; +import org.apache.cloudstack.api.response.ImageTransferResponse; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; + +import com.cloud.utils.component.PluggableService; + +/** + * Service for Creating Backups and ImageTransfer sessions which will be consumed by an external orchestrator. + */ +public interface KVMBackupExportService extends Configurable, PluggableService { + + ConfigKey ImageTransferPollingInterval = new ConfigKey<>("Advanced", Long.class, + "image.transfer.polling.interval", + "10", + "The image transfer progress polling interval in seconds.", true, ConfigKey.Scope.Global); + + ConfigKey ImageTransferIdleTimeoutSeconds = new ConfigKey<>("Advanced", Integer.class, + "image.transfer.idle.timeout.seconds", + "600", + "Seconds since last completed HTTP request to an image transfer before the image server unregisters it (idle timeout).", + true, ConfigKey.Scope.Zone); + + ConfigKey ExposeKVMBackupExportServiceApis = new ConfigKey<>("Advanced", Boolean.class, + "expose.kvm.backup.export.service.apis", + "false", + "Enable to expose APIs for testing the KVM Backup Export Service.", true, ConfigKey.Scope.Global); + /** + * Creates a backup session for a VM + */ + Backup createBackup(StartBackupCmd cmd); + + /** + * Start a backup session for a VM + * Creates a new checkpoint and starts NBD server for pull-mode backup + */ + Backup startBackup(StartBackupCmd cmd); + + /** + * Finalize a backup session + * Stops NBD server, updates checkpoint tracking, deletes old checkpoints + */ + Backup finalizeBackup(FinalizeBackupCmd cmd); + + /** + * Create an image transfer object for a disk + * Registers NBD endpoint with ImageIO (stubbed for POC) + */ + ImageTransferResponse createImageTransfer(CreateImageTransferCmd cmd); + + ImageTransfer createImageTransfer(long volumeId, Long backupId, ImageTransfer.Direction direction, ImageTransfer.Format format); + + boolean cancelImageTransfer(long imageTransferId); + + /** + * Finalize an image transfer + * Marks transfer as complete (NBD is closed globally in finalize backup) + */ + boolean finalizeImageTransfer(FinalizeImageTransferCmd cmd); + + boolean finalizeImageTransfer(long imageTransferId); + + /** + * List image transfers for a backup + */ + List listImageTransfers(ListImageTransfersCmd cmd); + + /** + * List checkpoints for a VM + */ + List listVmCheckpoints(ListVmCheckpointsCmd cmd); + + /** + * Delete a VM checkpoint (no-op for normal flow, kept for API parity) + */ + boolean deleteVmCheckpoint(DeleteVmCheckpointCmd cmd); +} diff --git a/client/pom.xml b/client/pom.xml index b8dffe65d4fb..88a32c6f6461 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -612,6 +612,11 @@ cloud-plugin-backup-nas ${project.version} + + org.apache.cloudstack + cloud-plugin-integrations-veeam-control-service + ${project.version} + org.apache.cloudstack cloud-plugin-integrations-kubernetes-service diff --git a/core/src/main/java/org/apache/cloudstack/backup/CreateImageTransferAnswer.java b/core/src/main/java/org/apache/cloudstack/backup/CreateImageTransferAnswer.java new file mode 100644 index 000000000000..34cf6d4ca34c --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/CreateImageTransferAnswer.java @@ -0,0 +1,56 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Answer; + +public class CreateImageTransferAnswer extends Answer { + private String imageTransferId; + private String transferUrl; + + public CreateImageTransferAnswer() { + } + + public CreateImageTransferAnswer(CreateImageTransferCommand cmd, boolean success, String details) { + super(cmd, success, details); + } + + public CreateImageTransferAnswer(CreateImageTransferCommand cmd, boolean success, String details, + String imageTransferId, String transferUrl) { + super(cmd, success, details); + this.imageTransferId = imageTransferId; + this.transferUrl = transferUrl; + } + + public String getImageTransferId() { + return imageTransferId; + } + + public void setImageTransferId(String imageTransferId) { + this.imageTransferId = imageTransferId; + } + + public String getTransferUrl() { + return transferUrl; + } + + public void setTransferUrl(String transferUrl) { + this.transferUrl = transferUrl; + } + +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/CreateImageTransferCommand.java b/core/src/main/java/org/apache/cloudstack/backup/CreateImageTransferCommand.java new file mode 100644 index 000000000000..95b56c9a9c38 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/CreateImageTransferCommand.java @@ -0,0 +1,94 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; + +public class CreateImageTransferCommand extends Command { + private String transferId; + private String exportName; + private String socket; + private String direction; + private String checkpointId; + private String file; + private ImageTransfer.Backend backend; + private int idleTimeoutSeconds; + + public CreateImageTransferCommand() { + } + + private CreateImageTransferCommand(String transferId, String direction, String socket, int idleTimeoutSeconds) { + this.transferId = transferId; + this.direction = direction; + this.socket = socket; + this.idleTimeoutSeconds = idleTimeoutSeconds; + } + + public CreateImageTransferCommand(String transferId, String direction, String exportName, String socket, String checkpointId, int idleTimeoutSeconds) { + this(transferId, direction, socket, idleTimeoutSeconds); + this.backend = ImageTransfer.Backend.nbd; + this.exportName = exportName; + this.checkpointId = checkpointId; + } + + public CreateImageTransferCommand(String transferId, String direction, String socket, String file, int idleTimeoutSeconds) { + this(transferId, direction, socket, idleTimeoutSeconds); + if (direction == ImageTransfer.Direction.download.toString()) { + throw new IllegalArgumentException("File backend is only supported for upload"); + } + this.backend = ImageTransfer.Backend.file; + this.file = file; + } + + public String getExportName() { + return exportName; + } + + public String getSocket() { + return socket; + } + + public String getFile() { + return file; + } + + public ImageTransfer.Backend getBackend() { + return backend; + } + + public String getTransferId() { + return transferId; + } + + @Override + public boolean executeInSequence() { + return true; + } + + public String getDirection() { + return direction; + } + + public String getCheckpointId() { + return checkpointId; + } + + public int getIdleTimeoutSeconds() { + return idleTimeoutSeconds; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/DeleteVmCheckpointCommand.java b/core/src/main/java/org/apache/cloudstack/backup/DeleteVmCheckpointCommand.java new file mode 100644 index 000000000000..81cf6c1abfcc --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/DeleteVmCheckpointCommand.java @@ -0,0 +1,60 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.backup; + +import java.util.Map; + +import com.cloud.agent.api.Command; + +public class DeleteVmCheckpointCommand extends Command { + private String vmName; + private String checkpointId; + private Map diskPathUuidMap; + private boolean stoppedVM; + + public DeleteVmCheckpointCommand() { + } + + public DeleteVmCheckpointCommand(String vmName, String checkpointId, Map diskPathUuidMap, boolean stoppedVM) { + this.vmName = vmName; + this.checkpointId = checkpointId; + this.diskPathUuidMap = diskPathUuidMap; + this.stoppedVM = stoppedVM; + } + + public String getVmName() { + return vmName; + } + + public String getCheckpointId() { + return checkpointId; + } + + public Map getDiskPathUuidMap() { + return diskPathUuidMap; + } + + public boolean isStoppedVM() { + return stoppedVM; + } + + @Override + public boolean executeInSequence() { + return true; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/FinalizeImageTransferCommand.java b/core/src/main/java/org/apache/cloudstack/backup/FinalizeImageTransferCommand.java new file mode 100644 index 000000000000..84d9b1ff8186 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/FinalizeImageTransferCommand.java @@ -0,0 +1,40 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; + +public class FinalizeImageTransferCommand extends Command { + private String transferId; + + public FinalizeImageTransferCommand() { + } + + public FinalizeImageTransferCommand(String transferId) { + this.transferId = transferId; + } + + public String getTransferId() { + return transferId; + } + + @Override + public boolean executeInSequence() { + return true; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/GetImageTransferProgressAnswer.java b/core/src/main/java/org/apache/cloudstack/backup/GetImageTransferProgressAnswer.java new file mode 100644 index 000000000000..5b5713f4683a --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/GetImageTransferProgressAnswer.java @@ -0,0 +1,47 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.backup; + +import java.util.Map; + +import com.cloud.agent.api.Answer; + +public class GetImageTransferProgressAnswer extends Answer { + private Map progressMap; // transferId -> progress percentage (0-100) + + public GetImageTransferProgressAnswer() { + } + + public GetImageTransferProgressAnswer(GetImageTransferProgressCommand cmd, boolean success, String details) { + super(cmd, success, details); + } + + public GetImageTransferProgressAnswer(GetImageTransferProgressCommand cmd, boolean success, String details, + Map progressMap) { + super(cmd, success, details); + this.progressMap = progressMap; + } + + public Map getProgressMap() { + return progressMap; + } + + public void setProgressMap(Map progressMap) { + this.progressMap = progressMap; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/GetImageTransferProgressCommand.java b/core/src/main/java/org/apache/cloudstack/backup/GetImageTransferProgressCommand.java new file mode 100644 index 000000000000..2391f957f51f --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/GetImageTransferProgressCommand.java @@ -0,0 +1,67 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.backup; + +import java.util.List; +import java.util.Map; + +import com.cloud.agent.api.Command; + +public class GetImageTransferProgressCommand extends Command { + private List transferIds; + private Map volumePaths; // transferId -> volume path + private Map volumeSizes; // transferId -> volume size + + public GetImageTransferProgressCommand() { + } + + public GetImageTransferProgressCommand(List transferIds, Map volumePaths, Map volumeSizes) { + this.transferIds = transferIds; + this.volumePaths = volumePaths; + this.volumeSizes = volumeSizes; + } + + public List getTransferIds() { + return transferIds; + } + + public void setTransferIds(List transferIds) { + this.transferIds = transferIds; + } + + public Map getVolumePaths() { + return volumePaths; + } + + public void setVolumePaths(Map volumePaths) { + this.volumePaths = volumePaths; + } + + public Map getVolumeSizes() { + return volumeSizes; + } + + public void setVolumeSizes(Map volumeSizes) { + this.volumeSizes = volumeSizes; + } + + @Override + public boolean executeInSequence() { + return false; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/StartBackupAnswer.java b/core/src/main/java/org/apache/cloudstack/backup/StartBackupAnswer.java new file mode 100644 index 000000000000..d7cbf097df90 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/StartBackupAnswer.java @@ -0,0 +1,44 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Answer; + +public class StartBackupAnswer extends Answer { + private Long checkpointCreateTime; + + public StartBackupAnswer() { + } + + public StartBackupAnswer(StartBackupCommand cmd, boolean success, String details) { + super(cmd, success, details); + } + + public StartBackupAnswer(StartBackupCommand cmd, boolean success, String details, Long checkpointCreateTime) { + super(cmd, success, details); + this.checkpointCreateTime = checkpointCreateTime; + } + + public Long getCheckpointCreateTime() { + return checkpointCreateTime; + } + + public void setCheckpointCreateTime(Long checkpointCreateTime) { + this.checkpointCreateTime = checkpointCreateTime; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/StartBackupCommand.java b/core/src/main/java/org/apache/cloudstack/backup/StartBackupCommand.java new file mode 100644 index 000000000000..0fc7d4e26b33 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/StartBackupCommand.java @@ -0,0 +1,83 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.backup; + +import java.util.Map; + +import com.cloud.agent.api.Command; + +public class StartBackupCommand extends Command { + private String vmName; + private String toCheckpointId; + private String fromCheckpointId; + private Long fromCheckpointCreateTime; + private String socket; + private Map diskPathUuidMap; + private boolean stoppedVM; + + public StartBackupCommand() { + } + + public StartBackupCommand(String vmName, String toCheckpointId, String fromCheckpointId, Long fromCheckpointCreateTime, + String socket, Map diskPathUuidMap, boolean stoppedVM) { + this.vmName = vmName; + this.toCheckpointId = toCheckpointId; + this.fromCheckpointId = fromCheckpointId; + this.fromCheckpointCreateTime = fromCheckpointCreateTime; + this.socket = socket; + this.diskPathUuidMap = diskPathUuidMap; + this.stoppedVM = stoppedVM; + } + + public String getVmName() { + return vmName; + } + + public String getToCheckpointId() { + return toCheckpointId; + } + + public String getFromCheckpointId() { + return fromCheckpointId; + } + + public Long getFromCheckpointCreateTime() { + return fromCheckpointCreateTime; + } + + public String getSocket() { + return socket; + } + + public Map getDiskPathUuidMap() { + return diskPathUuidMap; + } + + public boolean isIncremental() { + return fromCheckpointId != null && !fromCheckpointId.isEmpty(); + } + + public boolean isStoppedVM() { + return stoppedVM; + } + + @Override + public boolean executeInSequence() { + return true; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/StartNBDServerAnswer.java b/core/src/main/java/org/apache/cloudstack/backup/StartNBDServerAnswer.java new file mode 100644 index 000000000000..d8c78d3c8807 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/StartNBDServerAnswer.java @@ -0,0 +1,56 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Answer; + +public class StartNBDServerAnswer extends Answer { + private String imageTransferId; + private String transferUrl; + + public StartNBDServerAnswer() { + } + + public StartNBDServerAnswer(StartNBDServerCommand cmd, boolean success, String details) { + super(cmd, success, details); + } + + public StartNBDServerAnswer(StartNBDServerCommand cmd, boolean success, String details, + String imageTransferId, String transferUrl) { + super(cmd, success, details); + this.imageTransferId = imageTransferId; + this.transferUrl = transferUrl; + } + + public String getImageTransferId() { + return imageTransferId; + } + + public void setImageTransferId(String imageTransferId) { + this.imageTransferId = imageTransferId; + } + + public String getTransferUrl() { + return transferUrl; + } + + public void setTransferUrl(String transferUrl) { + this.transferUrl = transferUrl; + } + +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/StartNBDServerCommand.java b/core/src/main/java/org/apache/cloudstack/backup/StartNBDServerCommand.java new file mode 100644 index 000000000000..67a858af7f00 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/StartNBDServerCommand.java @@ -0,0 +1,70 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; + +public class StartNBDServerCommand extends Command { + private String transferId; + private String exportName; + private String volumePath; + private String socket; + private String direction; + private String fromCheckpointId; + + public StartNBDServerCommand() { + } + + protected StartNBDServerCommand(String transferId, String exportName, String volumePath, String socket, String direction, String fromCheckpointId) { + this.transferId = transferId; + this.socket = socket; + this.exportName = exportName; + this.volumePath = volumePath; + this.direction = direction; + this.fromCheckpointId = fromCheckpointId; + } + + public String getExportName() { + return exportName; + } + + public String getSocket() { + return socket; + } + + public String getTransferId() { + return transferId; + } + + @Override + public boolean executeInSequence() { + return true; + } + + public String getVolumePath() { + return volumePath; + } + + public String getDirection() { + return direction; + } + + public String getFromCheckpointId() { + return fromCheckpointId; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/StopBackupAnswer.java b/core/src/main/java/org/apache/cloudstack/backup/StopBackupAnswer.java new file mode 100644 index 000000000000..ce977f31e005 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/StopBackupAnswer.java @@ -0,0 +1,30 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Answer; + +public class StopBackupAnswer extends Answer { + + public StopBackupAnswer() { + } + + public StopBackupAnswer(StopBackupCommand cmd, boolean success, String details) { + super(cmd, success, details); + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/StopBackupCommand.java b/core/src/main/java/org/apache/cloudstack/backup/StopBackupCommand.java new file mode 100644 index 000000000000..d3055021e9de --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/StopBackupCommand.java @@ -0,0 +1,52 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; + +public class StopBackupCommand extends Command { + private String vmName; + private Long vmId; + private Long backupId; + + public StopBackupCommand() { + } + + public StopBackupCommand(String vmName, Long vmId, Long backupId) { + this.vmName = vmName; + this.vmId = vmId; + this.backupId = backupId; + } + + public String getVmName() { + return vmName; + } + + public Long getVmId() { + return vmId; + } + + public Long getBackupId() { + return backupId; + } + + @Override + public boolean executeInSequence() { + return true; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/StopNBDServerCommand.java b/core/src/main/java/org/apache/cloudstack/backup/StopNBDServerCommand.java new file mode 100644 index 000000000000..d75168a22eb2 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/StopNBDServerCommand.java @@ -0,0 +1,46 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; + +public class StopNBDServerCommand extends Command { + private String transferId; + private String direction; + + public StopNBDServerCommand() { + } + + public StopNBDServerCommand(String transferId, String direction) { + this.transferId = transferId; + this.direction = direction; + } + + public String getTransferId() { + return transferId; + } + + public String getDirection() { + return direction; + } + + @Override + public boolean executeInSequence() { + return true; + } +} diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index b20c06fc2c31..2baf675a2579 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -50,7 +50,6 @@ import javax.naming.ConfigurationException; import javax.persistence.EntityExistsException; - import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; @@ -303,8 +302,8 @@ import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.dao.NicDao; import com.cloud.vm.dao.UserVmDao; -import com.cloud.vm.dao.VMInstanceDetailsDao; import com.cloud.vm.dao.VMInstanceDao; +import com.cloud.vm.dao.VMInstanceDetailsDao; import com.cloud.vm.snapshot.VMSnapshotManager; import com.cloud.vm.snapshot.VMSnapshotVO; import com.cloud.vm.snapshot.dao.VMSnapshotDao; @@ -577,7 +576,13 @@ public void allocate(final String vmInstanceName, final VirtualMachineTemplate t logger.debug("Allocating disks for {}", persistedVm); - allocateRootVolume(persistedVm, template, rootDiskOfferingInfo, owner, rootDiskSizeFinal, volume, snapshot); + if (_userVmMgr.isBlankInstance(template)) { + logger.debug("Template is a dummy template for hypervisor {}, skipping volume allocation", hyperType); + return; + } else { + allocateRootVolume(persistedVm, template, rootDiskOfferingInfo, owner, rootDiskSizeFinal, volume, snapshot); + } + // Create new Volume context and inject event resource type, id and details to generate VOLUME.CREATE event for the ROOT disk. CallContext volumeContext = CallContext.register(CallContext.current(), ApiCommandResourceType.Volume); diff --git a/engine/orchestration/src/main/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml b/engine/orchestration/src/main/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml index 17c5002c718b..49c668f50e8b 100644 --- a/engine/orchestration/src/main/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml +++ b/engine/orchestration/src/main/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml @@ -88,6 +88,7 @@ + diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java index 6cfd2608f5de..76509d2a6d1e 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java @@ -23,6 +23,7 @@ import com.cloud.dc.ClusterVO; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.utils.Pair; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDao; public interface ClusterDao extends GenericDao { @@ -61,4 +62,6 @@ public interface ClusterDao extends GenericDao { List listDistinctStorageAccessGroups(String name, String keyword); List listEnabledClusterIdsByZoneHypervisorArch(Long zoneId, HypervisorType hypervisorType, CPU.CPUArch arch); + + List listByHypervisorType(HypervisorType hypervisorType, Filter filter); } diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java index c63af0a237ba..1e36e0a780dd 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java @@ -38,6 +38,7 @@ import com.cloud.org.Grouping; import com.cloud.org.Managed; import com.cloud.utils.Pair; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.JoinBuilder; @@ -413,4 +414,11 @@ public List listEnabledClusterIdsByZoneHypervisorArch(Long zoneId, Hypervi } return customSearch(sc, null); } + + @Override + public List listByHypervisorType(HypervisorType hypervisorType, Filter filter) { + SearchCriteria sc = ZoneHyTypeSearch.create(); + sc.setParameters("hypervisorType", hypervisorType.toString()); + return listBy(sc, filter); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDao.java b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDao.java index fdca6e43f00f..57b98335a280 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDao.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDao.java @@ -24,6 +24,7 @@ import com.cloud.network.Network.GuestType; import com.cloud.network.Network.State; import com.cloud.network.Networks.TrafficType; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDao; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.fsm.StateDao; @@ -96,8 +97,13 @@ public interface NetworkDao extends GenericDao, StateDao serviceProviderMap); + List listByZoneAndTrafficType(long zoneId, TrafficType trafficType, Filter filter); + List listByZoneAndTrafficType(long zoneId, TrafficType trafficType); + List listByTrafficTypeAndOwners(final TrafficType trafficType, List accountIds, + List domainIds, Filter filter); + void setCheckForGc(long networkId); int getNetworkCountByNetworkOffId(long networkOfferingId); diff --git a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java index 9f7ffabac930..a1ab1d1ef93a 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java @@ -29,9 +29,9 @@ import javax.inject.Inject; import javax.persistence.TableGenerator; -import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.api.ApiConstants; +import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; import com.cloud.network.Network; @@ -63,6 +63,7 @@ import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.SequenceFetcher; import com.cloud.utils.db.TransactionLegacy; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; @Component @@ -632,12 +633,41 @@ public List listBy(final long accountId, final long dataCenterId, fin } @Override - public List listByZoneAndTrafficType(final long zoneId, final TrafficType trafficType) { + public List listByZoneAndTrafficType(final long zoneId, final TrafficType trafficType, Filter filter) { final SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("datacenter", zoneId); sc.setParameters("trafficType", trafficType); - return listBy(sc, null); + return listBy(sc, filter); + } + + @Override + public List listByZoneAndTrafficType(final long zoneId, final TrafficType trafficType) { + return listByZoneAndTrafficType(zoneId, trafficType, null); + } + + @Override + public List listByTrafficTypeAndOwners(final TrafficType trafficType, List accountIds, + List domainIds, Filter filter) { + SearchBuilder sb = createSearchBuilder(); + sb.and("trafficType", sb.entity().getTrafficType(), Op.EQ); + boolean accountIdsNotEmpty = CollectionUtils.isNotEmpty(accountIds); + boolean domainIdsNotEmpty = CollectionUtils.isNotEmpty(domainIds); + if (accountIdsNotEmpty || domainIdsNotEmpty) { + sb.and().op("account", sb.entity().getAccountId(), SearchCriteria.Op.IN); + sb.or("domain", sb.entity().getDomainId(), SearchCriteria.Op.IN); + sb.cp(); + } + sb.done(); + final SearchCriteria sc = sb.create(); + sc.setParameters("trafficType", trafficType); + if (accountIdsNotEmpty) { + sc.setParameters("account", accountIds.toArray()); + } + if (domainIdsNotEmpty) { + sc.setParameters("domain", domainIds.toArray()); + } + return listBy(sc, filter); } @Override diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java index 4c9f906b68a9..aec06d6d0003 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java @@ -106,4 +106,6 @@ public interface VMTemplateDao extends GenericDao, StateDao< VMTemplateVO findActiveSystemTemplateByHypervisorArchAndUrlPath(HypervisorType hypervisorType, CPU.CPUArch arch, String urlPathSuffix); + + VMTemplateVO findByAccountAndName(Long accountId, String templateName); } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java index 9b5d0edc599d..8c6e3fe0983f 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java @@ -945,4 +945,12 @@ public boolean updateState( } return rows > 0; } + + @Override + public VMTemplateVO findByAccountAndName(Long accountId, String templateName) { + SearchCriteria sc = NameAccountIdSearch.create(); + sc.setParameters("name", templateName); + sc.setParameters("accountId", accountId); + return findOneBy(sc); + } } diff --git a/engine/schema/src/main/java/com/cloud/tags/dao/ResourceTagDao.java b/engine/schema/src/main/java/com/cloud/tags/dao/ResourceTagDao.java index bacb09b98793..034ea61ee0e9 100644 --- a/engine/schema/src/main/java/com/cloud/tags/dao/ResourceTagDao.java +++ b/engine/schema/src/main/java/com/cloud/tags/dao/ResourceTagDao.java @@ -20,11 +20,13 @@ import java.util.Map; import java.util.Set; +import org.apache.cloudstack.api.response.ResourceTagResponse; + import com.cloud.server.ResourceTag; import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.tags.ResourceTagVO; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDao; -import org.apache.cloudstack.api.response.ResourceTagResponse; public interface ResourceTagDao extends GenericDao { @@ -60,4 +62,9 @@ public interface ResourceTagDao extends GenericDao { void removeByResourceIdAndKey(long resourceId, ResourceObjectType resourceType, String key); List listByResourceUuid(String resourceUuid); + + List listByResourceTypeKeyAndOwners(ResourceObjectType resourceType, String key, + List accountIds, List domainIds, Filter filter); + + ResourceTagVO findByResourceTypeKeyAndValue(ResourceObjectType resourceType, String key, String value); } diff --git a/engine/schema/src/main/java/com/cloud/tags/dao/ResourceTagsDaoImpl.java b/engine/schema/src/main/java/com/cloud/tags/dao/ResourceTagsDaoImpl.java index cc9d99e6ab16..b82dd5ec3dec 100644 --- a/engine/schema/src/main/java/com/cloud/tags/dao/ResourceTagsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/tags/dao/ResourceTagsDaoImpl.java @@ -16,18 +16,20 @@ // under the License. package com.cloud.tags.dao; -import java.util.List; -import java.util.Set; -import java.util.Map; import java.util.HashMap; import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; import org.apache.cloudstack.api.response.ResourceTagResponse; +import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; import com.cloud.server.ResourceTag; import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.tags.ResourceTagVO; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @@ -120,4 +122,46 @@ public List listByResourceUuid(String resourceUuid) { sc.setParameters("resourceUuid", resourceUuid); return listBy(sc); } + + @Override + public List listByResourceTypeKeyAndOwners(ResourceObjectType resourceType, String key, + List accountIds, List domainIds, + Filter filter) { + SearchBuilder sb = createSearchBuilder(); + sb.and("resourceType", sb.entity().getResourceType(), Op.EQ); + sb.and("key", sb.entity().getKey(), Op.EQ); + boolean accountIdsNotEmpty = CollectionUtils.isNotEmpty(accountIds); + boolean domainIdsNotEmpty = CollectionUtils.isNotEmpty(domainIds); + if (accountIdsNotEmpty || domainIdsNotEmpty) { + sb.and().op("account", sb.entity().getAccountId(), SearchCriteria.Op.IN); + sb.or("domain", sb.entity().getDomainId(), SearchCriteria.Op.IN); + sb.cp(); + } + sb.done(); + final SearchCriteria sc = sb.create(); + sc.setParameters("resourceType", resourceType); + sc.setParameters("key", key); + if (accountIdsNotEmpty) { + sc.setParameters("account", accountIds.toArray()); + } + if (domainIdsNotEmpty) { + sc.setParameters("domain", domainIds.toArray()); + } + return listBy(sc, filter); + } + + @Override + public ResourceTagVO findByResourceTypeKeyAndValue(ResourceObjectType resourceType, String key, + String value) { + SearchBuilder sb = createSearchBuilder(); + sb.and("resourceType", sb.entity().getResourceType(), Op.EQ); + sb.and("key", sb.entity().getKey(), Op.EQ); + sb.and("value", sb.entity().getValue(), Op.EQ); + sb.done(); + final SearchCriteria sc = sb.create(); + sc.setParameters("resourceType", resourceType); + sc.setParameters("key", key); + sc.setParameters("value", value); + return findOneBy(sc); + } } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java index 23541c2431e7..06ae01e92fa0 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java @@ -192,4 +192,6 @@ List searchRemovedByRemoveDate(final Date startDate, final Date en int getVmCountByOfferingNotInDomain(Long serviceOfferingId, List domainIds); List listByIdsIncludingRemoved(List ids); + + List listIdsByHostIdForVolumeStats(long hostIds); } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java index 589a63ea0d84..96b073522247 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -1296,4 +1296,20 @@ public List listByIdsIncludingRemoved(List ids) { sc.setParameters("ids", ids.toArray()); return listIncludingRemovedBy(sc); } + + @Override + public List listIdsByHostIdForVolumeStats(long hostId) { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + sb.selectFields(sb.entity().getId()); + sb.and().op("host", sb.entity().getHostId(), SearchCriteria.Op.EQ); + sb.or().op("hostNull", sb.entity().getHostId(), Op.NULL); + sb.and("lastHost", sb.entity().getLastHostId(), SearchCriteria.Op.EQ); + sb.cp(); + sb.cp(); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("host", hostId); + sc.setParameters("lastHost", hostId); + return customSearch(sc, null); + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java index 0f8a10fb7be6..d589f9e6bef8 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java @@ -103,6 +103,18 @@ public class BackupVO implements Backup { @Column(name = "backup_schedule_id") private Long backupScheduleId; + @Column(name = "from_checkpoint_id") + private String fromCheckpointId; + + @Column(name = "to_checkpoint_id") + private String toCheckpointId; + + @Column(name = "checkpoint_create_time") + private Long checkpointCreateTime; + + @Column(name = "host_id") + private Long hostId; + @Transient Map details; @@ -288,4 +300,40 @@ public Long getBackupScheduleId() { public void setBackupScheduleId(Long backupScheduleId) { this.backupScheduleId = backupScheduleId; } + + @Override + public String getFromCheckpointId() { + return fromCheckpointId; + } + + public void setFromCheckpointId(String fromCheckpointId) { + this.fromCheckpointId = fromCheckpointId; + } + + @Override + public String getToCheckpointId() { + return toCheckpointId; + } + + public void setToCheckpointId(String toCheckpointId) { + this.toCheckpointId = toCheckpointId; + } + + @Override + public Long getCheckpointCreateTime() { + return checkpointCreateTime; + } + + public void setCheckpointCreateTime(Long checkpointCreateTime) { + this.checkpointCreateTime = checkpointCreateTime; + } + + @Override + public Long getHostId() { + return hostId; + } + + public void setHostId(Long hostId) { + this.hostId = hostId; + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/ImageTransferVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/ImageTransferVO.java new file mode 100644 index 000000000000..c391eae2e86b --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/ImageTransferVO.java @@ -0,0 +1,254 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.backup; + +import java.util.Date; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.EnumType; +import javax.persistence.Enumerated; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; +import javax.persistence.Temporal; +import javax.persistence.TemporalType; + +@Entity +@Table(name = "image_transfer") +public class ImageTransferVO implements ImageTransfer { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "backup_id") + private Long backupId; + + @Column(name = "disk_id") + private long diskId; + + @Column(name = "host_id") + private long hostId; + + @Column(name = "socket") + private String socket; + + @Column(name = "file") + private String file; + + @Column(name = "transfer_url") + private String transferUrl; + + @Enumerated(value = EnumType.STRING) + @Column(name = "phase") + private Phase phase; + + @Enumerated(value = EnumType.STRING) + @Column(name = "direction") + private Direction direction; + + @Enumerated(value = EnumType.STRING) + @Column(name = "backend") + private Backend backend; + + @Column(name = "signed_ticket_id") + private String signedTicketId; + + @Column(name = "progress") + private Integer progress; + + @Column(name = "account_id") + Long accountId; + + @Column(name = "domain_id") + Long domainId; + + @Column(name = "data_center_id") + Long dataCenterId; + + @Column(name = "created") + @Temporal(value = TemporalType.TIMESTAMP) + private Date created; + + @Column(name = "updated") + @Temporal(value = TemporalType.TIMESTAMP) + private Date updated; + + @Column(name = "removed") + @Temporal(value = TemporalType.TIMESTAMP) + private Date removed; + + public ImageTransferVO() { + } + + private ImageTransferVO(String uuid, long diskId, long hostId, Phase phase, Direction direction, Long accountId, Long domainId, Long dataCenterId) { + this.uuid = uuid; + this.diskId = diskId; + this.hostId = hostId; + this.phase = phase; + this.direction = direction; + this.accountId = accountId; + this.domainId = domainId; + this.dataCenterId = dataCenterId; + this.created = new Date(); + } + + public ImageTransferVO(String uuid, Long backupId, long diskId, long hostId, String socket, Phase phase, Direction direction, Long accountId, Long domainId, Long dataCenterId) { + this(uuid, diskId, hostId, phase, direction, accountId, domainId, dataCenterId); + this.backupId = backupId; + this.socket = socket; + this.backend = Backend.nbd; + } + + public ImageTransferVO(String uuid, long diskId, long hostId, String file, Phase phase, Direction direction, Long accountId, Long domainId, Long dataCenterId) { + this(uuid, diskId, hostId, phase, direction, accountId, domainId, dataCenterId); + this.file = file; + this.backend = Backend.file; + } + + @Override + public long getId() { + return id; + } + + @Override + public String getUuid() { + return uuid; + } + + @Override + public Long getBackupId() { + return backupId; + } + + public void setBackupId(long backupId) { + this.backupId = backupId; + } + + @Override + public long getDiskId() { + return diskId; + } + + public void setDiskId(long diskId) { + this.diskId = diskId; + } + + @Override + public long getHostId() { + return hostId; + } + + public void setHostId(long hostId) { + this.hostId = hostId; + } + + public void setSocket(String socket) { + this.socket = socket; + } + + @Override + public String getTransferUrl() { + return transferUrl; + } + + public void setTransferUrl(String transferUrl) { + this.transferUrl = transferUrl; + } + + @Override + public Phase getPhase() { + return phase; + } + + public void setPhase(Phase phase) { + this.phase = phase; + this.updated = new Date(); + } + + @Override + public Direction getDirection() { + return direction; + } + + public void setDirection(Direction direction) { + this.direction = direction; + } + + @Override + public Backend getBackend() { + return backend; + } + + @Override + public String getSignedTicketId() { + return signedTicketId; + } + + public void setSignedTicketId(String signedTicketId) { + this.signedTicketId = signedTicketId; + } + + public Integer getProgress() { + return progress; + } + + public void setProgress(Integer progress) { + this.progress = progress; + this.updated = new Date(); + } + + @Override + public Class getEntityType() { + return ImageTransfer.class; + } + + @Override + public String getName() { + return null; + } + + @Override + public long getDomainId() { + return domainId; + } + + @Override + public long getAccountId() { + return accountId; + } + + @Override + public long getDataCenterId() { + return dataCenterId; + } + + public Date getCreated() { + return created; + } + + public Date getUpdated() { + return updated; + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/ImageTransferDao.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/ImageTransferDao.java new file mode 100644 index 000000000000..fab28dbc3421 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/ImageTransferDao.java @@ -0,0 +1,35 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.backup.dao; + +import java.util.List; + +import org.apache.cloudstack.backup.ImageTransfer; +import org.apache.cloudstack.backup.ImageTransferVO; + +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.GenericDao; + +public interface ImageTransferDao extends GenericDao { + List listByBackupId(Long backupId); + ImageTransferVO findByUuid(String uuid); + ImageTransferVO findByVolume(Long volumeId); + ImageTransferVO findUnfinishedByVolume(Long volumeId); + List listByPhaseAndDirection(ImageTransfer.Phase phase, ImageTransfer.Direction direction); + List listByOwners(List accountIds, List domainIds, Filter filter); +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/ImageTransferDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/ImageTransferDaoImpl.java new file mode 100644 index 000000000000..3e1f6b513a58 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/ImageTransferDaoImpl.java @@ -0,0 +1,129 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.backup.dao; + +import java.util.List; + +import javax.annotation.PostConstruct; + +import org.apache.cloudstack.backup.ImageTransfer; +import org.apache.cloudstack.backup.ImageTransferVO; +import org.apache.commons.collections.CollectionUtils; +import org.springframework.stereotype.Component; + +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@Component +public class ImageTransferDaoImpl extends GenericDaoBase implements ImageTransferDao { + + private SearchBuilder backupIdSearch; + private SearchBuilder uuidSearch; + private SearchBuilder volumeSearch; + private SearchBuilder volumeUnfinishedSearch; + private SearchBuilder phaseDirectionSearch; + + public ImageTransferDaoImpl() { + } + + @PostConstruct + protected void init() { + backupIdSearch = createSearchBuilder(); + backupIdSearch.and("backupId", backupIdSearch.entity().getBackupId(), SearchCriteria.Op.EQ); + backupIdSearch.done(); + + uuidSearch = createSearchBuilder(); + uuidSearch.and("uuid", uuidSearch.entity().getUuid(), SearchCriteria.Op.EQ); + uuidSearch.done(); + + volumeSearch = createSearchBuilder(); + volumeSearch.and("volumeId", volumeSearch.entity().getDiskId(), SearchCriteria.Op.EQ); + volumeSearch.done(); + + volumeUnfinishedSearch = createSearchBuilder(); + volumeUnfinishedSearch.and("volumeId", volumeUnfinishedSearch.entity().getDiskId(), SearchCriteria.Op.EQ); + volumeUnfinishedSearch.and("phase", volumeUnfinishedSearch.entity().getPhase(), SearchCriteria.Op.NEQ); + volumeUnfinishedSearch.done(); + + phaseDirectionSearch = createSearchBuilder(); + phaseDirectionSearch.and("phase", phaseDirectionSearch.entity().getPhase(), SearchCriteria.Op.EQ); + phaseDirectionSearch.and("direction", phaseDirectionSearch.entity().getDirection(), SearchCriteria.Op.EQ); + phaseDirectionSearch.done(); + } + + @Override + public List listByBackupId(Long backupId) { + SearchCriteria sc = backupIdSearch.create(); + sc.setParameters("backupId", backupId); + return listBy(sc); + } + + @Override + public ImageTransferVO findByUuid(String uuid) { + SearchCriteria sc = uuidSearch.create(); + sc.setParameters("uuid", uuid); + return findOneBy(sc); + } + + @Override + public ImageTransferVO findByVolume(Long volumeId) { + SearchCriteria sc = volumeSearch.create(); + sc.setParameters("volumeId", volumeId); + return findOneBy(sc); + } + + @Override + public ImageTransferVO findUnfinishedByVolume(Long volumeId) { + SearchCriteria sc = volumeUnfinishedSearch.create(); + sc.setParameters("volumeId", volumeId); + sc.setParameters("phase", ImageTransferVO.Phase.finished.toString()); + return findOneBy(sc); + } + + @Override + public List listByPhaseAndDirection(ImageTransfer.Phase phase, ImageTransfer.Direction direction) { + SearchCriteria sc = phaseDirectionSearch.create(); + sc.setParameters("phase", phase); + sc.setParameters("direction", direction); + return listBy(sc); + } + + @Override + public List listByOwners(List accountIds, List domainIds, Filter filter) { + SearchBuilder sb = createSearchBuilder(); + boolean accountIdsNotEmpty = CollectionUtils.isNotEmpty(accountIds); + boolean domainIdsNotEmpty = CollectionUtils.isNotEmpty(domainIds); + if (accountIdsNotEmpty || domainIdsNotEmpty) { + sb.and().op("account", sb.entity().getAccountId(), SearchCriteria.Op.IN); + sb.or("domain", sb.entity().getDomainId(), SearchCriteria.Op.IN); + sb.cp(); + } + sb.done(); + final SearchCriteria sc = sb.create(); + if (accountIdsNotEmpty) { + sc.setParameters("account", accountIds.toArray()); + } + if (domainIdsNotEmpty) { + sc.setParameters("domain", domainIds.toArray()); + } + + return listBy(sc, filter); + } +} diff --git a/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml b/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml index edc14d9fa0cc..fda874745dfa 100644 --- a/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml +++ b/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml @@ -273,6 +273,7 @@ + diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql b/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql index 858c46a7c1ee..fbb2fd079f9c 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql @@ -19,7 +19,6 @@ -- Schema upgrade from 4.21.0.0 to 4.22.0.0 --; - -- health check status as enum CALL `cloud`.`IDEMPOTENT_CHANGE_COLUMN`('router_health_check', 'check_result', 'check_result', 'varchar(16) NOT NULL COMMENT "check executions result: SUCCESS, FAILURE, WARNING, UNKNOWN"'); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42210to42300.sql b/engine/schema/src/main/resources/META-INF/db/schema-42210to42300.sql index 4cb9eb7cb2c4..47b28964acdc 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42210to42300.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42210to42300.sql @@ -117,3 +117,38 @@ CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.vpc_offerings','conserve_mode', 'tin --- Disable/enable NICs CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.nics','enabled', 'TINYINT(1) NOT NULL DEFAULT 1 COMMENT ''Indicates whether the NIC is enabled or not'' '); + +-- Add checkpoint tracking fields to backups table for incremental backup support +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.backups', 'from_checkpoint_id', 'VARCHAR(255) DEFAULT NULL COMMENT "Previous active checkpoint id for incremental backups"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.backups', 'to_checkpoint_id', 'VARCHAR(255) DEFAULT NULL COMMENT "New checkpoint id created for the next incremental backup"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.backups', 'checkpoint_create_time', 'BIGINT DEFAULT NULL COMMENT "Checkpoint creation timestamp from libvirt"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.backups', 'host_id', 'BIGINT UNSIGNED DEFAULT NULL COMMENT "Host where backup is running"'); + +-- Create image_transfer table for per-disk image transfers +CREATE TABLE IF NOT EXISTS `cloud`.`image_transfer`( + `id` bigint unsigned NOT NULL auto_increment COMMENT 'id', + `uuid` varchar(40) NOT NULL COMMENT 'uuid', + `account_id` bigint unsigned NOT NULL COMMENT 'Account ID', + `domain_id` bigint unsigned NOT NULL COMMENT 'Domain ID', + `data_center_id` bigint unsigned NOT NULL COMMENT 'Data Center ID', + `backup_id` bigint unsigned COMMENT 'Backup ID', + `disk_id` bigint unsigned NOT NULL COMMENT 'Disk/Volume ID', + `host_id` bigint unsigned NOT NULL COMMENT 'Host ID', + `transfer_url` varchar(255) COMMENT 'ImageIO transfer URL', + `file` varchar(255) COMMENT 'File for the file backend', + `phase` varchar(20) NOT NULL COMMENT 'Transfer phase: initializing, transferring, finished, failed', + `socket` varchar(255) COMMENT 'Unix socket for nbd backend', + `direction` varchar(20) NOT NULL COMMENT 'Direction: upload, download', + `backend` varchar(20) NOT NULL COMMENT 'Backend: nbd, file', + `progress` int COMMENT 'Transfer progress percentage (0-100)', + `signed_ticket_id` varchar(255) COMMENT 'Signed ticket ID from ImageIO', + `created` datetime NOT NULL COMMENT 'date created', + `updated` datetime COMMENT 'date updated if not null', + `removed` datetime COMMENT 'date removed if not null', + PRIMARY KEY (`id`), + UNIQUE KEY `uuid` (`uuid`), + CONSTRAINT `fk_image_transfer__backup_id` FOREIGN KEY (`backup_id`) REFERENCES `backups`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_image_transfer__disk_id` FOREIGN KEY (`disk_id`) REFERENCES `volumes`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_image_transfer__host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE, + INDEX `i_image_transfer__backup_id`(`backup_id`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.user_vm_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.user_vm_view.sql index 6f31fc17bce7..db3fd8be4841 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.user_vm_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.user_vm_view.sql @@ -56,6 +56,7 @@ SELECT `vm_instance`.`display_vm` AS `display_vm`, `vm_instance`.`delete_protection` AS `delete_protection`, `guest_os`.`uuid` AS `guest_os_uuid`, + `guest_os`.`display_name` AS `guest_os_display_name`, `vm_instance`.`pod_id` AS `pod_id`, `host_pod_ref`.`uuid` AS `pod_uuid`, `vm_instance`.`private_ip_address` AS `private_ip_address`, diff --git a/engine/userdata/src/main/java/org/apache/cloudstack/userdata/UserDataManagerImpl.java b/engine/userdata/src/main/java/org/apache/cloudstack/userdata/UserDataManagerImpl.java index 7c5692564c99..c9c48dbb179f 100644 --- a/engine/userdata/src/main/java/org/apache/cloudstack/userdata/UserDataManagerImpl.java +++ b/engine/userdata/src/main/java/org/apache/cloudstack/userdata/UserDataManagerImpl.java @@ -119,10 +119,10 @@ public String validateUserData(String userData, BaseCmd.HTTPMethod httpmethod) { byte[] decodedUserData = null; // If GET, use 4K. If POST, support up to 1M. - if (httpmethod.equals(BaseCmd.HTTPMethod.GET)) { - decodedUserData = validateAndDecodeByHTTPMethod(userData, MAX_HTTP_GET_LENGTH, BaseCmd.HTTPMethod.GET); - } else if (httpmethod.equals(BaseCmd.HTTPMethod.POST)) { + if (BaseCmd.HTTPMethod.POST.equals(httpmethod)) { decodedUserData = validateAndDecodeByHTTPMethod(userData, MAX_HTTP_POST_LENGTH, BaseCmd.HTTPMethod.POST); + } else { + decodedUserData = validateAndDecodeByHTTPMethod(userData, MAX_HTTP_GET_LENGTH, BaseCmd.HTTPMethod.GET); } // Re-encode so that the '=' paddings are added if necessary since 'isBase64' does not require it, but python does on the VR. diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobDao.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobDao.java index 9f7a4ad6e058..9aba2ba97fdc 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobDao.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobDao.java @@ -50,4 +50,6 @@ public interface AsyncJobDao extends GenericDao { // Returns the number of pending jobs for the given Management server msids. // NOTE: This is the msid and NOT the id long countPendingNonPseudoJobs(Long... msIds); + + List listPendingJobIdsForAccount(long accountId); } diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobDaoImpl.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobDaoImpl.java index a2f1f36b8637..1dfb1738f0eb 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobDaoImpl.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobDaoImpl.java @@ -266,4 +266,14 @@ public long countPendingJobs(String havingInfo, String... cmds) { List results = customSearch(sc, null); return results.get(0); } + + @Override + public List listPendingJobIdsForAccount(long accountId) { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + sb.and("accountId", sb.entity().getAccountId(), SearchCriteria.Op.EQ); + sb.selectFields(sb.entity().getId()); + SearchCriteria sc = sb.create(); + sc.setParameters("accountId", accountId); + return customSearch(sc, null); + } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/ImageServerControlSocket.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/ImageServerControlSocket.java new file mode 100644 index 000000000000..2e9852f7bc1e --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/ImageServerControlSocket.java @@ -0,0 +1,123 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package com.cloud.hypervisor.kvm.resource; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import com.cloud.utils.script.OutputInterpreter; +import com.cloud.utils.script.Script; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; + +/** + * Communicates with the cloudstack-image-server control socket via socat. + * + * Protocol: newline-delimited JSON over a Unix domain socket. + * Actions: register, unregister, status. + */ +public class ImageServerControlSocket { + private static final Logger LOGGER = LogManager.getLogger(ImageServerControlSocket.class); + static final String CONTROL_SOCKET_PATH = "/var/run/cloudstack/image-server.sock"; + private static final Gson GSON = new GsonBuilder().create(); + + private ImageServerControlSocket() { + } + + /** + * Send a JSON message to the image server control socket and return the + * parsed response, or null on communication failure. + */ + static JsonObject sendMessage(Map message) { + String json = GSON.toJson(message); + Script script = new Script("/bin/bash", LOGGER); + script.add("-c"); + script.add(String.format("echo '%s' | socat -t5 - UNIX-CONNECT:%s", + json.replace("'", "'\\''"), CONTROL_SOCKET_PATH)); + OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser(); + String result = script.execute(parser); + if (result != null) { + LOGGER.error("Control socket communication failed: {}", result); + return null; + } + String output = parser.getLines(); + if (output == null || output.trim().isEmpty()) { + LOGGER.error("Empty response from control socket"); + return null; + } + try { + return JsonParser.parseString(output.trim()).getAsJsonObject(); + } catch (Exception e) { + LOGGER.error("Failed to parse control socket response: {}", output, e); + return null; + } + } + + /** + * Register a transfer config with the image server. + * @return true if the server accepted the registration. + */ + public static boolean registerTransfer(String transferId, Map config) { + Map msg = new HashMap<>(); + msg.put("action", "register"); + msg.put("transfer_id", transferId); + msg.put("config", config); + JsonObject resp = sendMessage(msg); + if (resp == null) { + return false; + } + return "ok".equals(resp.has("status") ? resp.get("status").getAsString() : null); + } + + /** + * Unregister a transfer from the image server. + * @return the number of remaining active transfers, or -1 on error. + */ + public static int unregisterTransfer(String transferId) { + Map msg = new HashMap<>(); + msg.put("action", "unregister"); + msg.put("transfer_id", transferId); + JsonObject resp = sendMessage(msg); + if (resp == null) { + return -1; + } + if (!"ok".equals(resp.has("status") ? resp.get("status").getAsString() : null)) { + return -1; + } + return resp.has("active_transfers") ? resp.get("active_transfers").getAsInt() : -1; + } + + /** + * Check whether the image server control socket is responsive. + * @return true if the server responded with status "ok". + */ + public static boolean isReady() { + Map msg = new HashMap<>(); + msg.put("action", "status"); + JsonObject resp = sendMessage(msg); + if (resp == null) { + return false; + } + return "ok".equals(resp.has("status") ? resp.get("status").getAsString() : null); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 46cf1da461e7..08d84bb8d6a9 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -382,6 +382,9 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv public static final String CHECKPOINT_DELETE_COMMAND = "virsh checkpoint-delete --domain %s --checkpointname %s --metadata"; + public static final int IMAGE_SERVER_DEFAULT_PORT = 54322; + public static final String IMAGE_SERVER_SYSTEMD_UNIT_NAME = "cloudstack-image-server"; + protected int qcow2DeltaMergeTimeout; private String modifyVlanPath; @@ -395,6 +398,9 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv private String heartBeatPath; private String vmActivityCheckPath; private String nasBackupPath; + private String imageServerPath; + private boolean imageServerTlsEnabled = false; + private String imageServerListenAddress; private String securityGroupPath; private String ovsPvlanDhcpHostPath; private String ovsPvlanVmPath; @@ -809,6 +815,18 @@ public String getNasBackupPath() { return nasBackupPath; } + public String getImageServerPath() { + return imageServerPath; + } + + public boolean isImageServerTlsEnabled() { + return imageServerTlsEnabled; + } + + public String getImageServerListenAddress() { + return imageServerListenAddress; + } + public String getOvsPvlanDhcpHostPath() { return ovsPvlanDhcpHostPath; } @@ -1027,6 +1045,9 @@ public boolean configure(final String name, final Map params) th cachePath = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.HOST_CACHE_LOCATION); + imageServerTlsEnabled = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.IMAGE_SERVER_TLS_ENABLED); + imageServerListenAddress = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.IMAGE_SERVER_LISTEN_ADDRESS); + params.put("domr.scripts.dir", domrScriptsDir); virtRouterResource = new VirtualRoutingResource(this); @@ -1095,6 +1116,12 @@ public boolean configure(final String name, final Map params) th throw new ConfigurationException("Unable to find nasbackup.sh"); } + String imageServerMain = Script.findScript(kvmScriptsDir, "imageserver/__main__.py"); + if (imageServerMain == null) { + throw new ConfigurationException("Unable to find imageserver package"); + } + imageServerPath = new File(imageServerMain).getParent(); + createTmplPath = Script.findScript(storageScriptsDir, "createtmplt.sh"); if (createTmplPath == null) { throw new ConfigurationException("Unable to find the createtmplt.sh"); @@ -5227,6 +5254,24 @@ public void removeCheckpointsOnVm(String vmName, String volumeUuid, List logger.debug("Removed all checkpoints of volume [{}] on VM [{}].", volumeUuid, vmName); } + public Map getDiskPathLabelMap(String vmName) { + try { + Connect conn = LibvirtConnection.getConnectionByVmName(vmName); + List disks = getDisks(conn, vmName); + Map diskPathLabelMap = new HashMap<>(); + for (DiskDef disk : disks) { + if (disk.getDeviceType() != DeviceType.DISK) { + continue; + } + diskPathLabelMap.put(disk.getDiskPath(), disk.getDiskLabel()); + } + return diskPathLabelMap; + } catch (LibvirtException e) { + logger.error("Failed to get disk path label map for VM [{}] due to: [{}].", vmName, e.getMessage(), e); + throw new CloudRuntimeException(e); + } + } + public boolean recreateCheckpointsOnVm(List volumes, String vmName, Connect conn) { logger.debug("Trying to recreate checkpoints on VM [{}] with volumes [{}].", vmName, volumes); try { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateImageTransferCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateImageTransferCommandWrapper.java new file mode 100644 index 000000000000..7cf05da9b211 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateImageTransferCommandWrapper.java @@ -0,0 +1,178 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import java.io.File; +import java.util.HashMap; +import java.util.Map; + +import org.apache.cloudstack.backup.CreateImageTransferAnswer; +import org.apache.cloudstack.backup.CreateImageTransferCommand; +import org.apache.cloudstack.backup.ImageTransfer; +import org.apache.cloudstack.storage.resource.IpTablesHelper; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.ImageServerControlSocket; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.StringUtils; +import com.cloud.utils.script.Script; + +@ResourceWrapper(handles = CreateImageTransferCommand.class) +public class LibvirtCreateImageTransferCommandWrapper extends CommandWrapper { + protected Logger logger = LogManager.getLogger(getClass()); + + private static final String IMAGE_SERVER_TLS_CERT_FILE = "/etc/cloudstack/agent/cloud.crt"; + private static final String IMAGE_SERVER_TLS_KEY_FILE = "/etc/cloudstack/agent/cloud.key"; + + private void resetService(String unitName) { + Script resetScript = new Script("/bin/bash", logger); + resetScript.add("-c"); + resetScript.add(String.format("systemctl reset-failed %s || true", unitName)); + resetScript.execute(); + } + + private static String shellQuote(String value) { + return "'" + value.replace("'", "'\\''") + "'"; + } + + private boolean startImageServerIfNotRunning(int imageServerPort, String listenAddress, LibvirtComputingResource resource) { + final String imageServerPackageDir = resource.getImageServerPath(); + final String imageServerParentDir = new File(imageServerPackageDir).getParent(); + final String imageServerModuleName = new File(imageServerPackageDir).getName(); + final boolean tlsEnabled = resource.isImageServerTlsEnabled(); + String unitName = resource.IMAGE_SERVER_SYSTEMD_UNIT_NAME; + + Script checkScript = new Script("/bin/bash", logger); + checkScript.add("-c"); + checkScript.add(String.format("systemctl is-active --quiet %s", unitName)); + String checkResult = checkScript.execute(); + if (checkResult == null && ImageServerControlSocket.isReady()) { + return true; + } + + resetService(unitName); + if (checkResult != null) { + StringBuilder systemdRunCmd = new StringBuilder(String.format( + "systemd-run --unit=%s --property=Restart=no --property=WorkingDirectory=%s /usr/bin/python3 -m %s --listen %s --port %d", + unitName, shellQuote(imageServerParentDir), imageServerModuleName, shellQuote(listenAddress), imageServerPort)); + + if (tlsEnabled) { + systemdRunCmd.append(" --tls-enabled"); + systemdRunCmd.append(" --tls-cert-file ").append(IMAGE_SERVER_TLS_CERT_FILE); + systemdRunCmd.append(" --tls-key-file ").append(IMAGE_SERVER_TLS_KEY_FILE); + } + + Script startScript = new Script("/bin/bash", logger); + startScript.add("-c"); + startScript.add(systemdRunCmd.toString()); + String startResult = startScript.execute(); + + if (startResult != null) { + logger.error(String.format("Failed to start the Image server: %s", startResult)); + return false; + } + } + + int maxWaitSeconds = 10; + int pollIntervalMs = 1000; + int maxAttempts = (maxWaitSeconds * 1000) / pollIntervalMs; + boolean serverReady = false; + + for (int attempt = 0; attempt < maxAttempts; attempt++) { + if (ImageServerControlSocket.isReady()) { + serverReady = true; + logger.info(String.format("Image server control socket is ready (attempt %d)", attempt + 1)); + break; + } + try { + Thread.sleep(pollIntervalMs); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return false; + } + } + + if (!serverReady) { + logger.error(String.format("Image server control socket not ready within %d seconds", maxWaitSeconds)); + return false; + } + + String rule = String.format("-p tcp -m state --state NEW -m tcp --dport %d -j ACCEPT", imageServerPort); + IpTablesHelper.addConditionally(IpTablesHelper.INPUT_CHAIN, true, rule, + String.format("Error in opening up image server port %d", imageServerPort)); + + return true; + } + + public Answer execute(CreateImageTransferCommand cmd, LibvirtComputingResource resource) { + final String transferId = cmd.getTransferId(); + ImageTransfer.Backend backend = cmd.getBackend(); + + if (StringUtils.isBlank(transferId)) { + return new CreateImageTransferAnswer(cmd, false, "transferId is empty."); + } + + final Map payload = new HashMap<>(); + payload.put("backend", backend.toString()); + payload.put("idle_timeout_seconds", cmd.getIdleTimeoutSeconds()); + + if (backend == ImageTransfer.Backend.file) { + final String filePath = cmd.getFile(); + if (StringUtils.isBlank(filePath)) { + return new CreateImageTransferAnswer(cmd, false, "file path is empty for file backend."); + } + payload.put("file", filePath); + } else { + String socket = cmd.getSocket(); + final String exportName = cmd.getExportName(); + if (StringUtils.isBlank(socket)) { + return new CreateImageTransferAnswer(cmd, false, "Empty socket."); + } + if (StringUtils.isBlank(exportName)) { + return new CreateImageTransferAnswer(cmd, false, "exportName is empty."); + } + payload.put("socket", "/tmp/imagetransfer/" + socket + ".sock"); + payload.put("export", exportName); + String checkpointId = cmd.getCheckpointId(); + if (checkpointId != null) { + payload.put("export_bitmap", cmd.getCheckpointId()); + } + } + + final int imageServerPort = LibvirtComputingResource.IMAGE_SERVER_DEFAULT_PORT; + String listenAddress = resource.getImageServerListenAddress(); + if (StringUtils.isBlank(listenAddress)) { + listenAddress = resource.getPrivateIp(); + } + if (!startImageServerIfNotRunning(imageServerPort, listenAddress, resource)) { + return new CreateImageTransferAnswer(cmd, false, "Failed to start image server."); + } + + if (!ImageServerControlSocket.registerTransfer(transferId, payload)) { + return new CreateImageTransferAnswer(cmd, false, "Failed to register transfer with image server."); + } + + final String transferScheme = resource.isImageServerTlsEnabled() ? "https" : "http"; + final String transferUrl = String.format("%s://%s:%d/images/%s", transferScheme, listenAddress, imageServerPort, transferId); + return new CreateImageTransferAnswer(cmd, true, "Image transfer prepared on KVM host.", transferId, transferUrl); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDeleteVmCheckpointCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDeleteVmCheckpointCommandWrapper.java new file mode 100644 index 000000000000..edd1e09287e9 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDeleteVmCheckpointCommandWrapper.java @@ -0,0 +1,80 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import java.util.Map; + +import org.apache.cloudstack.backup.DeleteVmCheckpointCommand; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.script.Script; + +@ResourceWrapper(handles = DeleteVmCheckpointCommand.class) +public class LibvirtDeleteVmCheckpointCommandWrapper extends CommandWrapper { + + @Override + public Answer execute(DeleteVmCheckpointCommand cmd, LibvirtComputingResource resource) { + if (cmd.isStoppedVM()) { + return deleteBitmapsOnDisks(cmd); + } + return deleteDomainCheckpoint(cmd); + } + + private Answer deleteDomainCheckpoint(DeleteVmCheckpointCommand cmd) { + String vmName = cmd.getVmName(); + String checkpointId = cmd.getCheckpointId(); + String virshCmd = String.format("virsh checkpoint-delete %s %s", vmName, checkpointId); + Script script = new Script("/bin/bash"); + script.add("-c"); + script.add(virshCmd); + String result = script.execute(); + if (result != null) { + return new Answer(cmd, false, "Failed to delete checkpoint: " + result); + } + return new Answer(cmd, true, "Checkpoint deleted"); + } + + /** + * Stopped VM: persistent bitmaps on disk images ({@code qemu-img bitmap --remove}), matching {@link LibvirtStartBackupCommandWrapper} bitmap --add. + */ + private Answer deleteBitmapsOnDisks(DeleteVmCheckpointCommand cmd) { + String checkpointId = cmd.getCheckpointId(); + Map diskPathUuidMap = cmd.getDiskPathUuidMap(); + if (diskPathUuidMap == null || diskPathUuidMap.isEmpty()) { + return new Answer(cmd, false, "No disks provided for bitmap removal"); + } + for (Map.Entry entry : diskPathUuidMap.entrySet()) { + String diskPath = entry.getKey(); + Script script = new Script("sudo"); + script.add("qemu-img"); + script.add("bitmap"); + script.add("--remove"); + script.add(diskPath); + script.add(checkpointId); + String result = script.execute(); + if (result != null) { + return new Answer(cmd, false, + "Failed to remove bitmap " + checkpointId + " from disk " + diskPath + ": " + result); + } + } + return new Answer(cmd, true, "Checkpoint bitmap removed from disks"); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtFinalizeImageTransferCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtFinalizeImageTransferCommandWrapper.java new file mode 100644 index 000000000000..3d9f6563d5eb --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtFinalizeImageTransferCommandWrapper.java @@ -0,0 +1,101 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import org.apache.cloudstack.backup.FinalizeImageTransferCommand; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.ImageServerControlSocket; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.StringUtils; +import com.cloud.utils.script.Script; + +@ResourceWrapper(handles = FinalizeImageTransferCommand.class) +public class LibvirtFinalizeImageTransferCommandWrapper extends CommandWrapper { + protected Logger logger = LogManager.getLogger(getClass()); + private void resetService(String unitName) { + Script resetScript = new Script("/bin/bash", logger); + resetScript.add("-c"); + resetScript.add(String.format("systemctl reset-failed %s || true", unitName)); + resetScript.execute(); + } + + private boolean stopImageServer(int imageServerPort, LibvirtComputingResource resource) { + String unitName = resource.IMAGE_SERVER_SYSTEMD_UNIT_NAME; + + Script checkScript = new Script("/bin/bash", logger); + checkScript.add("-c"); + checkScript.add(String.format("systemctl is-active --quiet %s", unitName)); + String checkResult = checkScript.execute(); + if (checkResult != null) { + logger.info("Image server not running, resetting failed state"); + resetService(unitName); + removeFirewallRule(imageServerPort); + return true; + } + + Script stopScript = new Script("/bin/bash", logger); + stopScript.add("-c"); + stopScript.add(String.format("systemctl stop %s", unitName)); + stopScript.execute(); + resetService(unitName); + logger.info("Image server {} stopped", unitName); + + removeFirewallRule(imageServerPort); + + return true; + } + + private void removeFirewallRule(int port) { + String rule = String.format("-p tcp -m state --state NEW -m tcp --dport %d -j ACCEPT", port); + Script removeScript = new Script("/bin/bash", logger); + removeScript.add("-c"); + removeScript.add(String.format("iptables -D INPUT %s || true", rule)); + String result = removeScript.execute(); + if (result != null && !result.isEmpty() && !result.contains("iptables: Bad rule")) { + logger.debug("Firewall rule removal result for port {}: {}", port, result); + } else { + logger.info("Firewall rule removed for port {} (or did not exist)", port); + } + } + + public Answer execute(FinalizeImageTransferCommand cmd, LibvirtComputingResource resource) { + final String transferId = cmd.getTransferId(); + final int imageServerPort = LibvirtComputingResource.IMAGE_SERVER_DEFAULT_PORT; + if (StringUtils.isBlank(transferId)) { + return new Answer(cmd, false, "transferId is empty."); + } + + int activeTransfers = ImageServerControlSocket.unregisterTransfer(transferId); + if (activeTransfers < 0) { + logger.warn("Could not reach image server to unregister transfer {}; assuming server is down", transferId); + stopImageServer(imageServerPort, resource); + return new Answer(cmd, true, "Image transfer finalized (server unreachable, forced stop)."); + } + + if (activeTransfers == 0) { + stopImageServer(imageServerPort, resource); + } + + return new Answer(cmd, true, "Image transfer finalized."); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetImageTransferProgressCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetImageTransferProgressCommandWrapper.java new file mode 100644 index 000000000000..7e0cbf2934db --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetImageTransferProgressCommandWrapper.java @@ -0,0 +1,95 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import java.io.File; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.backup.GetImageTransferProgressAnswer; +import org.apache.cloudstack.backup.GetImageTransferProgressCommand; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; + +@ResourceWrapper(handles = GetImageTransferProgressCommand.class) +public class LibvirtGetImageTransferProgressCommandWrapper extends CommandWrapper { + protected Logger logger = LogManager.getLogger(getClass()); + + @Override + public Answer execute(GetImageTransferProgressCommand cmd, LibvirtComputingResource resource) { + try { + List transferIds = cmd.getTransferIds(); + Map volumePaths = cmd.getVolumePaths(); + Map volumeSizes = cmd.getVolumeSizes(); + Map progressMap = new HashMap<>(); + + if (transferIds == null || transferIds.isEmpty()) { + return new GetImageTransferProgressAnswer(cmd, true, "No transfers to check", progressMap); + } + + for (String transferId : transferIds) { + String volumePath = volumePaths.get(transferId); + Long volumeSize = volumeSizes.get(transferId); + + if (volumePath == null || volumeSize == null || volumeSize == 0) { + logger.warn("Missing volume path or size for transferId: {}", transferId); + progressMap.put(transferId, null); + continue; + } + + try { + File file = new File(volumePath); + if (!file.exists()) { + logger.warn("Volume file does not exist: {}", volumePath); + progressMap.put(transferId, null); + continue; + } + + long currentSize = file.length(); + + if (volumePath.endsWith(".qcow2") || volumePath.endsWith(".qcow")) { + try { + currentSize = KVMPhysicalDisk.getVirtualSizeFromFile(volumePath); + } catch (Exception e) { + logger.warn("Failed to get virtual size for qcow2 file: {}, using physical size", volumePath, e); + } + } + progressMap.put(transferId, currentSize); + logger.debug("Transfer {} progress, current: {})", transferId, currentSize, volumeSize); + + } catch (Exception e) { + logger.error("Error getting progress for transferId: {}, path: {}", transferId, volumePath, e); + progressMap.put(transferId, null); + } + } + + return new GetImageTransferProgressAnswer(cmd, true, "Progress retrieved successfully", progressMap); + + } catch (Exception e) { + logger.error("Error executing GetImageTransferProgressCommand", e); + return new GetImageTransferProgressAnswer(cmd, false, "Error getting transfer progress: " + e.getMessage()); + } + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartBackupCommandWrapper.java new file mode 100644 index 000000000000..2e7c8c5ae98c --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartBackupCommandWrapper.java @@ -0,0 +1,221 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import java.io.File; +import java.io.FileWriter; +import java.util.Map; + +import org.apache.cloudstack.backup.StartBackupAnswer; +import org.apache.cloudstack.backup.StartBackupCommand; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.StringUtils; +import com.cloud.utils.script.Script; + +@ResourceWrapper(handles = StartBackupCommand.class) +public class LibvirtStartBackupCommandWrapper extends CommandWrapper { + protected Logger logger = LogManager.getLogger(getClass()); + + @Override + public Answer execute(StartBackupCommand cmd, LibvirtComputingResource resource) { + if (cmd.isStoppedVM()) { + return handleStoppedVmBackup(cmd, cmd.getToCheckpointId()); + } + return handleRunningVmBackup(cmd, resource); + } + + public Answer handleRunningVmBackup(StartBackupCommand cmd, LibvirtComputingResource resource) { + String vmName = cmd.getVmName(); + String toCheckpointId = cmd.getToCheckpointId(); + String fromCheckpointId = cmd.getFromCheckpointId(); + Long fromCheckpointCreateTime = cmd.getFromCheckpointCreateTime(); + String socket = cmd.getSocket(); + + try { + if (StringUtils.isNotBlank(fromCheckpointId)) { + Answer redefineAnswer = ensureFromCheckpointExists(cmd, fromCheckpointId, fromCheckpointCreateTime); + if (redefineAnswer != null) { + return redefineAnswer; + } + } + + File dir = new File("/tmp/imagetransfer"); + if (!dir.exists()) { + dir.mkdirs(); + } + + // Create backup XML + String backupXml = createBackupXml(cmd, fromCheckpointId, socket, resource); + String checkpointXml = createCheckpointXml(toCheckpointId); + + // Write XMLs to temp files + File backupXmlFile = File.createTempFile("backup-", ".xml"); + File checkpointXmlFile = File.createTempFile("checkpoint-", ".xml"); + + try (FileWriter writer = new FileWriter(backupXmlFile)) { + writer.write(backupXml); + } + try (FileWriter writer = new FileWriter(checkpointXmlFile)) { + writer.write(checkpointXml); + } + + // Execute virsh backup-begin + String backupCmd = String.format("virsh backup-begin %s %s --checkpointxml %s", + vmName, backupXmlFile.getAbsolutePath(), checkpointXmlFile.getAbsolutePath()); + + Script script = new Script("/bin/bash"); + script.add("-c"); + script.add(backupCmd); + String result = script.execute(); + + backupXmlFile.delete(); + checkpointXmlFile.delete(); + + if (result != null) { + return new StartBackupAnswer(cmd, false, "Backup begin failed: " + result); + } + + long checkpointCreateTime = getCheckpointCreateTime(); + return new StartBackupAnswer(cmd, true, "Backup started successfully", checkpointCreateTime); + + } catch (Exception e) { + return new StartBackupAnswer(cmd, false, "Error starting backup: " + e.getMessage()); + } + } + + private Answer ensureFromCheckpointExists(StartBackupCommand cmd, String fromCheckpointId, Long fromCheckpointCreateTime) { + String vmName = cmd.getVmName(); + Script dumpScript = new Script("/bin/bash"); + dumpScript.add("-c"); + dumpScript.add(String.format("virsh checkpoint-dumpxml --domain %s --checkpointname %s --no-domain", + vmName, fromCheckpointId)); + if (dumpScript.execute() == null) { + return null; + } + if (fromCheckpointCreateTime == null) { + return new StartBackupAnswer(cmd, false, "From checkpoint create time is null for checkpoint " + fromCheckpointId); + } + + String redefineXml = createCheckpointXmlForRedefine(fromCheckpointId, fromCheckpointCreateTime); + File redefineFile; + try { + redefineFile = File.createTempFile("checkpoint-redefine-", ".xml"); + } catch (Exception e) { + return new StartBackupAnswer(cmd, false, "Failed to create temp file for checkpoint redefine: " + e.getMessage()); + } + try (FileWriter writer = new FileWriter(redefineFile)) { + writer.write(redefineXml); + } catch (Exception e) { + redefineFile.delete(); + return new StartBackupAnswer(cmd, false, "Failed to write checkpoint redefine XML: " + e.getMessage()); + } + String createCmd = String.format(LibvirtComputingResource.CHECKPOINT_CREATE_COMMAND, vmName, redefineFile.getAbsolutePath()); + Script createScript = new Script("/bin/bash"); + createScript.add("-c"); + createScript.add(createCmd); + String result = createScript.execute(); + redefineFile.delete(); + if (result != null) { + return new StartBackupAnswer(cmd, false, "Failed to redefine from-checkpoint " + fromCheckpointId + ": " + result); + } + return null; + } + + private String createCheckpointXmlForRedefine(String checkpointName, Long createTime) { + StringBuilder xml = new StringBuilder(); + xml.append("\n"); + xml.append(" ").append(checkpointName).append("\n"); + xml.append(" ").append(createTime).append("\n"); + xml.append(""); + return xml.toString(); + } + + private String createBackupXml(StartBackupCommand cmd, String fromCheckpointId, String socket, LibvirtComputingResource resource) { + StringBuilder xml = new StringBuilder(); + xml.append("\n"); + + if (StringUtils.isNotBlank(fromCheckpointId)) { + xml.append(" ").append(fromCheckpointId).append("\n"); + } + + xml.append(String.format(" \n", socket)); + + xml.append(" \n"); + + Map diskPathUuidMap = cmd.getDiskPathUuidMap(); + Map diskPathLabelMap = resource.getDiskPathLabelMap(cmd.getVmName()); + + for (Map.Entry entry : diskPathLabelMap.entrySet()) { + if (!diskPathUuidMap.containsKey(entry.getKey())) { + continue; + } + String diskName = entry.getValue(); + String export = diskPathUuidMap.get(entry.getKey()); + String scratchFile = "/var/tmp/scratch-" + export + ".qcow2"; + xml.append(" \n"); + xml.append(" \n"); + xml.append(" \n"); + } + + xml.append(" \n"); + xml.append(""); + + return xml.toString(); + } + + private String createCheckpointXml(String checkpointId) { + return "\n" + + " " + checkpointId + "\n" + + ""; + } + + private Answer handleStoppedVmBackup(StartBackupCommand cmd, String toCheckpointId) { + String vmName = cmd.getVmName(); + Map diskPathUuidMap = cmd.getDiskPathUuidMap(); + for (Map.Entry entry : diskPathUuidMap.entrySet()) { + String diskPath = entry.getKey(); + Script script = new Script("sudo"); + script.add("qemu-img"); + script.add("bitmap"); + script.add("--add"); + script.add(diskPath); + script.add(toCheckpointId); + String result = script.execute(); + if (result != null) { + return new StartBackupAnswer(cmd, false, + "Failed to add bitmap " + toCheckpointId + " to disk " + diskPath + ": " + result); + } + } + long checkpointCreateTime = getCheckpointCreateTime(); + return new StartBackupAnswer(cmd, true, "Stopped VM backup: checkpoint bitmap added successfully", + checkpointCreateTime); + } + + private long getCheckpointCreateTime() { + return System.currentTimeMillis() / 1000; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartNBDServerCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartNBDServerCommandWrapper.java new file mode 100644 index 000000000000..56d5945ced11 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartNBDServerCommandWrapper.java @@ -0,0 +1,128 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import java.io.File; + +import org.apache.cloudstack.backup.StartNBDServerAnswer; +import org.apache.cloudstack.backup.StartNBDServerCommand; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.StringUtils; +import com.cloud.utils.script.Script; + +@ResourceWrapper(handles = StartNBDServerCommand.class) +public class LibvirtStartNBDServerCommandWrapper extends CommandWrapper { + protected Logger logger = LogManager.getLogger(getClass()); + + @Override + public Answer execute(StartNBDServerCommand cmd, LibvirtComputingResource resource) { + String volumePath = cmd.getVolumePath(); + String socket = cmd.getSocket(); + String exportName = cmd.getExportName(); + String transferId = cmd.getTransferId(); + + if (StringUtils.isBlank(volumePath)) { + return new StartNBDServerAnswer(cmd, false, "Volume path is required for the nbd server"); + } + if (StringUtils.isBlank(exportName)) { + return new StartNBDServerAnswer(cmd, false, "Export name is required for the nbd server"); + } + if (StringUtils.isBlank(socket)) { + return new StartNBDServerAnswer(cmd, false, "Socket is required for the nbd server"); + } + + String unitName = "qemu-nbd-" + transferId.hashCode(); + + Script checkScript = new Script("/bin/bash", logger); + checkScript.add("-c"); + checkScript.add(String.format("systemctl is-active --quiet %s", unitName)); + String checkResult = checkScript.execute(); + if (checkResult == null) { + return new StartNBDServerAnswer(cmd, false, "A qemu-nbd service is already running on the port."); + } + + File dir = new File("/tmp/imagetransfer"); + if (!dir.exists()) { + dir.mkdirs(); + } + + String socketName = "/tmp/imagetransfer/" + socket + ".sock"; + // --persistent: Don't stop the service when the last client disconnects. + // --shared=NUM: Allow up to NUM clients to share the device (default 1), 0 for unlimited. Number of parallel connections is managed by the image server. + String systemdRunCmd = String.format( + "systemd-run --unit=%s --property=Restart=no qemu-nbd --export-name %s --socket %s --persistent --shared=0 %s %s %s", + unitName, + exportName, + socketName, + cmd.getFromCheckpointId() != null ? "-B " + cmd.getFromCheckpointId() : "", + cmd.getDirection().equals("download") ? "--read-only" : "", + volumePath + ); + + + Script startScript = new Script("/bin/bash", logger); + startScript.add("-c"); + startScript.add(systemdRunCmd); + String startResult = startScript.execute(); + + if (startResult != null) { + logger.error(String.format("Failed to start qemu-nbd service: %s", startResult)); + return new StartNBDServerAnswer(cmd, false, "Failed to start qemu-nbd service: " + startResult); + } + + // Wait with timeout until the service is up + int maxWaitSeconds = 10; + int pollIntervalMs = 1000; + int maxAttempts = (maxWaitSeconds * 1000) / pollIntervalMs; + boolean serviceActive = false; + + for (int attempt = 0; attempt < maxAttempts; attempt++) { + Script verifyScript = new Script("/bin/bash", logger); + verifyScript.add("-c"); + verifyScript.add(String.format("systemctl is-active --quiet %s", unitName)); + String verifyResult = verifyScript.execute(); + if (verifyResult == null) { + serviceActive = true; + logger.info(String.format("qemu-nbd service %s is now active (attempt %d)", unitName, attempt + 1)); + break; + } + try { + Thread.sleep(pollIntervalMs); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return new StartNBDServerAnswer(cmd, false, "Interrupted while waiting for qemu-nbd service to start"); + } + } + + if (!serviceActive) { + logger.error(String.format("qemu-nbd service %s failed to become active within %d seconds", unitName, maxWaitSeconds)); + return new StartNBDServerAnswer(cmd, false, + String.format("qemu-nbd service failed to start within %d seconds", maxWaitSeconds)); + } + + String transferUrl = String.format("nbd+unix:///%s", cmd.getSocket()); + return new StartNBDServerAnswer(cmd, true, "qemu-nbd service started for upload", + transferId, transferUrl); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStopBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStopBackupCommandWrapper.java new file mode 100644 index 000000000000..1185d89bc0b3 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStopBackupCommandWrapper.java @@ -0,0 +1,69 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import org.apache.cloudstack.backup.StopBackupAnswer; +import org.apache.cloudstack.backup.StopBackupCommand; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.libvirt.Connect; +import org.libvirt.Domain; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.resource.LibvirtConnection; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.script.Script; + +@ResourceWrapper(handles = StopBackupCommand.class) +public class LibvirtStopBackupCommandWrapper extends CommandWrapper { + protected Logger logger = LogManager.getLogger(getClass()); + + @Override + public Answer execute(StopBackupCommand cmd, LibvirtComputingResource resource) { + String vmName = cmd.getVmName(); + + try { + Connect conn = LibvirtConnection.getConnection(); + Domain dm = conn.domainLookupByName(vmName); + + if (dm == null) { + return new StopBackupAnswer(cmd, false, "Domain not found: " + vmName); + } + + // Execute virsh domjobabort + String abortCmd = String.format("virsh domjobabort %s", vmName); + + Script script = new Script("/bin/bash"); + script.add("-c"); + script.add(abortCmd); + String result = script.execute(); + + if (result != null && !result.isEmpty()) { + // Job abort may fail if no job is running, which is acceptable + logger.debug("domjobabort result: " + result); + } + + return new StopBackupAnswer(cmd, true, "Backup stopped successfully"); + + } catch (Exception e) { + return new StopBackupAnswer(cmd, false, "Error stopping backup: " + e.getMessage()); + } + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStopNBDServerCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStopNBDServerCommandWrapper.java new file mode 100644 index 000000000000..57c7ebb706bc --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStopNBDServerCommandWrapper.java @@ -0,0 +1,72 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import org.apache.cloudstack.backup.StopNBDServerCommand; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.script.Script; + +@ResourceWrapper(handles = StopNBDServerCommand.class) +public class LibvirtStopNBDServerCommandWrapper extends CommandWrapper { + protected Logger logger = LogManager.getLogger(getClass()); + + private void resetService(String unitName) { + Script resetScript = new Script("/bin/bash", logger); + resetScript.add("-c"); + resetScript.add(String.format("systemctl reset-failed %s || true", unitName)); + resetScript.execute(); + } + + @Override + public Answer execute(StopNBDServerCommand cmd, LibvirtComputingResource resource) { + try { + String unitName = "qemu-nbd-" + cmd.getTransferId().hashCode(); + + // Check if the service is running + Script checkScript = new Script("/bin/bash", logger); + checkScript.add("-c"); + checkScript.add(String.format("systemctl is-active --quiet %s", unitName)); + String checkResult = checkScript.execute(); + if (checkResult != null) { + // Service is not running, but still reset-failed to clear any stale state + logger.info(String.format("qemu-nbd service %s is not running, resetting failed state", unitName)); + resetService(unitName); + return new Answer(cmd, true, "Image transfer finalized"); + } + + // Stop the systemd service + Script stopScript = new Script("/bin/bash", logger); + stopScript.add("-c"); + stopScript.add(String.format("systemctl stop %s", unitName)); + stopScript.execute(); + resetService(unitName); + + return new Answer(cmd, true, "Image transfer finalized"); + + } catch (Exception e) { + logger.error("Error finalizing image transfer for upload", e); + return new Answer(cmd, false, "Error finalizing image transfer: " + e.getMessage()); + } + } +} diff --git a/plugins/integrations/veeam-control-service/pom.xml b/plugins/integrations/veeam-control-service/pom.xml new file mode 100644 index 000000000000..4b1b1f4501aa --- /dev/null +++ b/plugins/integrations/veeam-control-service/pom.xml @@ -0,0 +1,61 @@ + + + 4.0.0 + cloud-plugin-integrations-veeam-control-service + Apache CloudStack Plugin - Veeam Control Service + + org.apache.cloudstack + cloudstack-plugins + 4.23.0.0-SNAPSHOT + ../../pom.xml + + + + org.apache.cloudstack + cloud-utils + ${project.version} + + + org.apache.cloudstack + cloud-api + ${project.version} + + + org.apache.cloudstack + cloud-engine-schema + ${project.version} + + + org.eclipse.jetty + jetty-server + + + org.eclipse.jetty + jetty-servlet + ${cs.jetty.version} + + + com.fasterxml.jackson.dataformat + jackson-dataformat-xml + ${cs.jackson.version} + + + diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/RouteHandler.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/RouteHandler.java new file mode 100644 index 000000000000..693bfb287c68 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/RouteHandler.java @@ -0,0 +1,79 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam; + +import java.io.BufferedReader; +import java.io.IOException; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.cloudstack.veeam.utils.Negotiation; +import org.apache.logging.log4j.Logger; + +import com.cloud.utils.component.Adapter; + +public interface RouteHandler extends Adapter { + default int priority() { return 0; } + boolean canHandle(String method, String path) throws IOException; + void handle(HttpServletRequest req, HttpServletResponse resp, String path, Negotiation.OutFormat outFormat, VeeamControlServlet io) + throws IOException; + + default String getSanitizedPath(String path) { + // remove query params if exists + int qIdx = path.indexOf('?'); + if (qIdx != -1) { + return path.substring(0, qIdx); + } + return path; + } + + static String getRequestData(HttpServletRequest req, Logger logger) { + String data = RouteHandler.getRequestData(req); + logger.info("Received method: {} request. Request-data: {}", req.getMethod(), data); + return data; + } + + static String getRequestData(HttpServletRequest req) { + String contentType = req.getContentType(); + if (contentType == null) { + return null; + } + String mime = contentType.split(";")[0].trim().toLowerCase(); + if (!"application/json".equals(mime) && !"application/x-www-form-urlencoded".equals(mime)) { + return null; + } + try { + StringBuilder data = new StringBuilder(); + String line; + try (BufferedReader reader = req.getReader()) { + while ((line = reader.readLine()) != null) { + data.append(line); + } + } + return data.toString(); + } catch (IOException ignored) { + return null; + } + } + + static boolean isRequestAsync(HttpServletRequest req) { + String asyncStr = req.getParameter("async"); + return Boolean.TRUE.toString().equals(asyncStr); + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/VeeamControlServer.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/VeeamControlServer.java new file mode 100644 index 000000000000..a70babe9b279 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/VeeamControlServer.java @@ -0,0 +1,209 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam; + +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.Enumeration; +import java.util.List; + +import javax.servlet.DispatcherType; +import javax.servlet.http.HttpServletRequest; + +import org.apache.cloudstack.utils.server.ServerPropertiesUtil; +import org.apache.cloudstack.veeam.api.ApiRouteHandler; +import org.apache.cloudstack.veeam.filter.AllowedClientCidrsFilter; +import org.apache.cloudstack.veeam.filter.BearerOrBasicAuthFilter; +import org.apache.commons.lang3.StringUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.eclipse.jetty.server.Handler; +import org.eclipse.jetty.server.HttpConfiguration; +import org.eclipse.jetty.server.HttpConnectionFactory; +import org.eclipse.jetty.server.RequestLog; +import org.eclipse.jetty.server.SecureRequestCustomizer; +import org.eclipse.jetty.server.Server; +import org.eclipse.jetty.server.ServerConnector; +import org.eclipse.jetty.server.SslConnectionFactory; +import org.eclipse.jetty.server.handler.HandlerList; +import org.eclipse.jetty.server.handler.RequestLogHandler; +import org.eclipse.jetty.servlet.FilterHolder; +import org.eclipse.jetty.servlet.ServletContextHandler; +import org.eclipse.jetty.servlet.ServletHolder; +import org.eclipse.jetty.util.ssl.SslContextFactory; +import org.jetbrains.annotations.NotNull; + +public class VeeamControlServer { + private static final Logger LOGGER = LogManager.getLogger(VeeamControlServer.class); + + private final VeeamControlService veeamControlService; + private Server server; + private List routeHandlers; + + public VeeamControlServer(List routeHandlers, VeeamControlService veeamControlService) { + this.routeHandlers = new ArrayList<>(routeHandlers); + this.routeHandlers.sort((a, b) -> Integer.compare(b.priority(), a.priority())); + this.veeamControlService = veeamControlService; + } + + public void startIfEnabled() throws Exception { + final boolean enabled = VeeamControlService.Enabled.value(); + if (!enabled) { + LOGGER.info("Veeam Control API server is disabled"); + return; + } + + final String keystorePath = ServerPropertiesUtil.getKeystoreFile(); + final String keystorePassword = ServerPropertiesUtil.getKeystorePassword(); + final String keyManagerPassword = ServerPropertiesUtil.getKeystorePassword(); + final boolean sslConfigured = StringUtils.isNotEmpty(keystorePath) && + StringUtils.isNotEmpty(keystorePassword) && + StringUtils.isNotEmpty(keyManagerPassword) && + Files.exists(Paths.get(keystorePath)); + final String bind = VeeamControlService.BindAddress.value(); + final int port = VeeamControlService.Port.value(); + String ctxPath = VeeamControlService.ContextPath.value(); + LOGGER.info("Veeam Control server - bind: {}, port: {}, context: {} with {} handlers", bind, port, ctxPath, + routeHandlers != null ? routeHandlers.size() : 0); + + + server = new Server(); + + if (sslConfigured) { + final SslContextFactory.Server sslContextFactory = new SslContextFactory.Server(); + sslContextFactory.setKeyStorePath(keystorePath); + sslContextFactory.setKeyStorePassword(keystorePassword); + sslContextFactory.setKeyManagerPassword(keyManagerPassword); + + final HttpConfiguration https = new HttpConfiguration(); + https.setSecureScheme("https"); + https.setSecurePort(port); + https.addCustomizer(new SecureRequestCustomizer()); + + final ServerConnector httpsConnector = new ServerConnector( + server, + new SslConnectionFactory(sslContextFactory, "http/1.1"), + new HttpConnectionFactory(https) + ); + httpsConnector.setHost(bind); + httpsConnector.setPort(port); + server.addConnector(httpsConnector); + + LOGGER.info("Veeam Control API server HTTPS enabled on {}:{}", bind, port); + } else { + final HttpConfiguration http = new HttpConfiguration(); + final ServerConnector httpConnector = new ServerConnector(server, new HttpConnectionFactory(http)); + httpConnector.setHost(bind); + httpConnector.setPort(port); + server.addConnector(httpConnector); + + LOGGER.warn("Veeam Control API server HTTPS is NOT configured (missing keystore path/passwords). " + + "Starting HTTP on {}:{} instead.", bind, port); + } + + final ServletContextHandler ctx = + new ServletContextHandler(ServletContextHandler.NO_SESSIONS); + ctx.setContextPath(ctxPath); + + // CIDR filter for all routes + AllowedClientCidrsFilter cidrFilter = new AllowedClientCidrsFilter(veeamControlService); + FilterHolder cidrHolder = new FilterHolder(cidrFilter); + ctx.addFilter(cidrHolder, ApiRouteHandler.BASE_ROUTE + "/*", EnumSet.of(DispatcherType.REQUEST)); + + // Bearer or Basic Auth for all routes + BearerOrBasicAuthFilter authFilter = new BearerOrBasicAuthFilter(veeamControlService); + FilterHolder authHolder = new FilterHolder(authFilter); + ctx.addFilter(authHolder, ApiRouteHandler.BASE_ROUTE + "/*", EnumSet.of(DispatcherType.REQUEST)); + + // Front controller servlet + ctx.addServlet(new ServletHolder(new VeeamControlServlet(routeHandlers)), "/*"); + + // Create a RequestLog that logs every request handled by the server (all contexts/paths) + server.setHandler(buildContextHandler(ctx)); + + server.start(); + + LOGGER.info("Started Veeam Control API server on {}:{} with context {}", bind, port, ctxPath); + } + + @NotNull + private static Handler buildContextHandler(ServletContextHandler ctx) { + // Handler for root ('/') path + final ServletContextHandler root = new ServletContextHandler(ServletContextHandler.NO_SESSIONS); + root.setContextPath("/"); + root.addServlet(new ServletHolder(new javax.servlet.http.HttpServlet() { + private static final long serialVersionUID = 1L; + + @Override + protected void doGet(javax.servlet.http.HttpServletRequest req, javax.servlet.http.HttpServletResponse resp) + throws java.io.IOException { + resp.setContentType("text/plain"); + resp.setStatus(javax.servlet.http.HttpServletResponse.SC_OK); + resp.getWriter().println("Veeam Control API"); + } + + @Override + protected void doPost(javax.servlet.http.HttpServletRequest req, javax.servlet.http.HttpServletResponse resp) + throws java.io.IOException { + doGet(req, resp); + } + }), "/*"); + + final RequestLog requestLog = (request, response) -> { + final String uri = request.getRequestURI() + + (request.getQueryString() != null ? "?" + request.getQueryString() : ""); + LOGGER.info("Request - remoteAddr: {}, method: {}, uri: {}, headers: {}, status: {}", + request.getRemoteAddr(), + request.getMethod(), + uri, + dumpRequestHeaders(request), + response.getStatus()); + }; + + final RequestLogHandler requestLogHandler = new RequestLogHandler(); + requestLogHandler.setRequestLog(requestLog); + + // Attach both the configured context and the root handler; keep ctx first so contextPath has priority + final HandlerList handlers = new HandlerList(); + handlers.setHandlers(new Handler[] { ctx, root }); + requestLogHandler.setHandler(handlers); + return requestLogHandler; + } + + public void stop() throws Exception { + if (server != null) { + server.stop(); + server = null; + } + } + + private static String dumpRequestHeaders(HttpServletRequest request) { + final StringBuilder sb = new StringBuilder(); + final Enumeration headerNames = request.getHeaderNames(); + while (headerNames.hasMoreElements()) { + final String name = headerNames.nextElement(); + final Enumeration values = request.getHeaders(name); + while (values.hasMoreElements()) { + sb.append(name).append("=").append(values.nextElement()).append("; "); + } + } + return sb.toString(); + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/VeeamControlService.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/VeeamControlService.java new file mode 100644 index 000000000000..159d7eead066 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/VeeamControlService.java @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam; + +import java.util.List; + +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; +import org.apache.cloudstack.utils.CloudStackVersion; + +import com.cloud.utils.component.PluggableService; + +public interface VeeamControlService extends PluggableService, Configurable { + String PLUGIN_NAME = "CloudStack Veeam Control Service"; + + ConfigKey Enabled = new ConfigKey<>("Advanced", Boolean.class, "integration.veeam.control.enabled", + "false", "Enable the Veeam Integration REST API server", false); + ConfigKey BindAddress = new ConfigKey<>("Advanced", String.class, "integration.veeam.control.bind.address", + "127.0.0.1", "Bind address for Veeam Integration REST API server", false); + ConfigKey Port = new ConfigKey<>("Advanced", Integer.class, "integration.veeam.control.port", + "8090", "Port for Veeam Integration REST API server", false); + ConfigKey ContextPath = new ConfigKey<>("Advanced", String.class, "integration.veeam.control.context.path", + "/ovirt-engine", "Context path for Veeam Integration REST API server", false); + ConfigKey Username = new ConfigKey<>("Secure", String.class, "integration.veeam.control.api.username", + "veeam", "Username for Basic Auth on Veeam Integration REST API server", true); + ConfigKey Password = new ConfigKey<>("Secure", String.class, "integration.veeam.control.api.password", + "change-me", "Password for Basic Auth on Veeam Integration REST API server", true); + ConfigKey ServiceAccountId = new ConfigKey<>("Advanced", String.class, + "integration.veeam.control.service.account", "", + "ID of the service account used to perform operations on resources. " + + "Preferably an admin-level account with permissions to access resources across the environment " + + "and optionally assign them to other users.", + true); + ConfigKey InstanceRestoreAssignOwner = new ConfigKey<>("Advanced", Boolean.class, + "integration.veeam.instance.restore.assign.owner", + "false", "Attempt to assign restored Instance to the owner based on OVF and network " + + "details. If the assignment fails or set to false then the Instance will remain owned by the service " + + "account", true); + ConfigKey AllowedClientCidrs = new ConfigKey<>("Advanced", String.class, + "integration.veeam.control.allowed.client.cidrs", + "", "Comma-separated list of CIDR blocks representing clients allowed to access the API. " + + "If empty, all clients will be allowed. Example: '192.168.1.1/24,192.168.2.100/32", true); + + + List getAllowedClientCidrs(); + + boolean validateCredentials(String username, String password); + + static String getPackageVersion() { + return VeeamControlService.class.getPackage().getImplementationVersion(); + } + + static CloudStackVersion getCSVersion() { + try { + return CloudStackVersion.parse(getPackageVersion()); + } catch (Exception e) { + return null; + } + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/VeeamControlServiceImpl.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/VeeamControlServiceImpl.java new file mode 100644 index 000000000000..a00d6bd5b836 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/VeeamControlServiceImpl.java @@ -0,0 +1,123 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam; + +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; + +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.utils.cache.SingleCache; +import org.apache.cloudstack.veeam.utils.DataUtil; +import org.apache.commons.lang3.StringUtils; + +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.net.NetUtils; + +public class VeeamControlServiceImpl extends ManagerBase implements VeeamControlService { + + private List routeHandlers; + private VeeamControlServer veeamControlServer; + private SingleCache> allowedClientCidrsCache; + + protected List getAllowedClientCidrsInternal() { + String allowedClientCidrsStr = AllowedClientCidrs.value(); + if (StringUtils.isBlank(allowedClientCidrsStr)) { + return Collections.emptyList(); + } + List allowedClientCidrs = List.of(allowedClientCidrsStr.split(",")); + // Sanitize and remove any incorrect CIDR entries + allowedClientCidrs = allowedClientCidrs.stream() + .map(String::trim) + .filter(StringUtils::isNotBlank) + .filter(cidr -> { + boolean valid = NetUtils.isValidIp4Cidr(cidr); + if (!valid) { + logger.warn("Invalid CIDR entry '{}' in allowed client CIDRs, ignoring", cidr); + } + return valid; + }).collect(Collectors.toList()); + return allowedClientCidrs; + } + + public List getRouteHandlers() { + return routeHandlers; + } + + public void setRouteHandlers(final List routeHandlers) { + this.routeHandlers = routeHandlers; + } + + @Override + public List getAllowedClientCidrs() { + return allowedClientCidrsCache.get(); + } + + @Override + public boolean validateCredentials(String username, String password) { + return DataUtil.constantTimeEquals(Username.value(), username) && + DataUtil.constantTimeEquals(Password.value(), password); + } + + @Override + public boolean start() { + allowedClientCidrsCache = new SingleCache<>(30, this::getAllowedClientCidrsInternal); + veeamControlServer = new VeeamControlServer(getRouteHandlers(), this); + try { + veeamControlServer.startIfEnabled(); + } catch (Exception e) { + logger.error("Failed to start Veeam Control API server, continuing without it", e); + } + return true; + } + + @Override + public boolean stop() { + try { + veeamControlServer.stop(); + } catch (Exception e) { + logger.error("Failed to stop Veeam Control API server cleanly", e); + } + return true; + } + + @Override + public List> getCommands() { + return List.of(); + } + + @Override + public String getConfigComponentName() { + return VeeamControlService.class.getSimpleName(); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[] { + Enabled, + BindAddress, + Port, + ContextPath, + Username, + Password, + ServiceAccountId, + InstanceRestoreAssignOwner, + AllowedClientCidrs + }; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/VeeamControlServlet.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/VeeamControlServlet.java new file mode 100644 index 000000000000..172aa16e5d72 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/VeeamControlServlet.java @@ -0,0 +1,154 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam; + + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.cloudstack.veeam.utils.Mapper; +import org.apache.cloudstack.veeam.utils.Negotiation; +import org.apache.cloudstack.veeam.utils.ResponseWriter; +import org.apache.commons.collections4.CollectionUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +public class VeeamControlServlet extends HttpServlet { + private static final Logger LOGGER = LogManager.getLogger(VeeamControlServlet.class); + private static final boolean LOG_REQUESTS = false; + + private final ResponseWriter writer; + private final Mapper mapper; + private final List routeHandlers; + + public VeeamControlServlet(List routeHandlers) { + this.routeHandlers = routeHandlers; + mapper = new Mapper(); + writer = new ResponseWriter(mapper); + } + + public ResponseWriter getWriter() { + return writer; + } + + public Mapper getMapper() { + return mapper; + } + + @Override + protected void service(HttpServletRequest req, HttpServletResponse resp) throws IOException, ServletException { + String method = req.getMethod(); + String path = normalize(req.getPathInfo()); + Negotiation.OutFormat outFormat = Negotiation.responseFormat(req); + + LOGGER.info("Received {} request for {} with out format: {}", method, path, outFormat); + + logRequest(req, method, path); + + try { + if ("/".equals(path)) { + handleRoot(req, resp, outFormat); + return; + } + + if (CollectionUtils.isNotEmpty(this.routeHandlers)) { + for (RouteHandler handler : this.routeHandlers) { + if (handler.canHandle(method, path)) { + handler.handle(req, resp, path, outFormat, this); + return; + } + } + } + notFound(resp, null, outFormat); + } catch (Error e) { + writer.writeFault(resp, e.status, e.message, null, outFormat); + } + } + + private static void logRequest(HttpServletRequest req, String method, String path) { + if (!LOG_REQUESTS) { + return; + } + // Add a log to give all info about the request + try { + StringBuilder details = new StringBuilder(); + details.append("Request details: Method: ").append(method).append(", Path: ").append(path); + details.append(", Query: ").append(req.getQueryString() == null ? "" : req.getQueryString()); + details.append(", Headers: "); + java.util.Enumeration headerNames = req.getHeaderNames(); + while (headerNames != null && headerNames.hasMoreElements()) { + String name = headerNames.nextElement(); + details.append(name).append("=").append(req.getHeader(name)).append("; "); + } + LOGGER.debug(details.toString()); + } catch (Exception e) { + LOGGER.debug("Failed to capture request details", e); + } + } + + private String normalize(String pathInfo) { + if (pathInfo == null || pathInfo.isBlank()) return "/"; + return pathInfo; + } + + protected void handleRoot(HttpServletRequest req, HttpServletResponse resp, Negotiation.OutFormat outFormat) + throws IOException { + + String method = req.getMethod(); + if (!"GET".equals(method) && !"POST".equals(method)) { + // You didn’t list 405; keep it simple with 400 + throw Error.badRequest("Unsupported method for root: " + method); + } + + writer.write(resp, 200, Map.of( + "name", VeeamControlService.PLUGIN_NAME, + "pluginVersion", this.getClass().getPackage().getImplementationVersion()), outFormat); + } + + public void methodNotAllowed(final HttpServletResponse resp, final String allow, final Negotiation.OutFormat outFormat) throws IOException { + resp.setHeader("Allow", allow); + writer.writeFault(resp, HttpServletResponse.SC_METHOD_NOT_ALLOWED, "Method Not Allowed", "Allowed methods: " + allow, outFormat); + } + + public void badRequest(final HttpServletResponse resp, String detail, Negotiation.OutFormat outFormat) throws IOException { + writer.writeFault(resp, HttpServletResponse.SC_BAD_REQUEST, "Bad request", detail, outFormat); + } + + + public void notFound(final HttpServletResponse resp, String detail, Negotiation.OutFormat outFormat) throws IOException { + writer.writeFault(resp, HttpServletResponse.SC_NOT_FOUND, "Not found", detail, outFormat); + } + + public static class Error extends RuntimeException { + final int status; + final String message; + public Error(int status, String message) { + super(message); + this.status = status; + this.message = message; + } + public static Error badRequest(String msg) { return new Error(400, msg); } + public static Error unauthorized(String msg) { return new Error(401, msg); } + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/adapter/ApiAccess.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/adapter/ApiAccess.java new file mode 100644 index 000000000000..4bb6de06e47e --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/adapter/ApiAccess.java @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.adapter; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import org.apache.cloudstack.api.BaseCmd; + +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.METHOD) +public @interface ApiAccess { + Class command(); +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/adapter/ApiAccessInterceptor.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/adapter/ApiAccessInterceptor.java new file mode 100644 index 000000000000..b0cd0cd33781 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/adapter/ApiAccessInterceptor.java @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.adapter; + +import java.lang.reflect.Method; + +import javax.inject.Inject; + +import org.aopalliance.intercept.MethodInterceptor; +import org.aopalliance.intercept.MethodInvocation; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.context.CallContext; + +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.User; +import com.cloud.utils.Pair; + +public class ApiAccessInterceptor implements MethodInterceptor { + @Inject + AccountManager accountManager; + + @Override + public Object invoke(MethodInvocation invocation) throws Throwable { + Method m = invocation.getMethod(); + Object target = invocation.getThis(); + if (target == null) { + return invocation.proceed(); + } + + ApiAccess access = m.getAnnotation(ApiAccess.class); + if (access == null) { + m = target.getClass().getMethod(m.getName(), m.getParameterTypes()); + access = m.getAnnotation(ApiAccess.class); + } + if (access == null) { + return invocation.proceed(); + } + + ServerAdapter adapter = (ServerAdapter) target; + Pair serviceUserAccount = adapter.getServiceAccount(); + String apiName = BaseCmd.getCommandNameByClass(access.command()); + + accountManager.checkApiAccess(serviceUserAccount.second(), apiName); + + CallContext.register(serviceUserAccount.first(), serviceUserAccount.second()); + try { + return invocation.proceed(); + } finally { + CallContext.unregister(); + } + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/adapter/ServerAdapter.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/adapter/ServerAdapter.java new file mode 100644 index 000000000000..3990b1e129ac --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/adapter/ServerAdapter.java @@ -0,0 +1,1790 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.adapter; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Base64; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.Role; +import org.apache.cloudstack.acl.RolePermissionEntity; +import org.apache.cloudstack.acl.RoleService; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.acl.Rule; +import org.apache.cloudstack.acl.SecurityChecker; +import org.apache.cloudstack.affinity.AffinityGroupVO; +import org.apache.cloudstack.affinity.dao.AffinityGroupDao; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiServerService; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.command.admin.backup.CreateImageTransferCmd; +import org.apache.cloudstack.api.command.admin.backup.DeleteVmCheckpointCmd; +import org.apache.cloudstack.api.command.admin.backup.FinalizeBackupCmd; +import org.apache.cloudstack.api.command.admin.backup.FinalizeImageTransferCmd; +import org.apache.cloudstack.api.command.admin.backup.ListImageTransfersCmd; +import org.apache.cloudstack.api.command.admin.backup.ListVmCheckpointsCmd; +import org.apache.cloudstack.api.command.admin.backup.StartBackupCmd; +import org.apache.cloudstack.api.command.admin.cluster.ListClustersCmd; +import org.apache.cloudstack.api.command.admin.host.ListHostsCmd; +import org.apache.cloudstack.api.command.admin.storage.ListStoragePoolsCmd; +import org.apache.cloudstack.api.command.admin.vm.AssignVMCmd; +import org.apache.cloudstack.api.command.admin.vm.DeployVMCmdByAdmin; +import org.apache.cloudstack.api.command.user.backup.ListBackupsCmd; +import org.apache.cloudstack.api.command.user.job.ListAsyncJobsCmd; +import org.apache.cloudstack.api.command.user.job.QueryAsyncJobResultCmd; +import org.apache.cloudstack.api.command.user.network.ListNetworksCmd; +import org.apache.cloudstack.api.command.user.offering.ListServiceOfferingsCmd; +import org.apache.cloudstack.api.command.user.tag.ListTagsCmd; +import org.apache.cloudstack.api.command.user.vm.AddNicToVMCmd; +import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; +import org.apache.cloudstack.api.command.user.vm.DestroyVMCmd; +import org.apache.cloudstack.api.command.user.vm.ListNicsCmd; +import org.apache.cloudstack.api.command.user.vm.ListVMsCmd; +import org.apache.cloudstack.api.command.user.vm.StartVMCmd; +import org.apache.cloudstack.api.command.user.vm.StopVMCmd; +import org.apache.cloudstack.api.command.user.vm.UpdateVMCmd; +import org.apache.cloudstack.api.command.user.vmsnapshot.CreateVMSnapshotCmd; +import org.apache.cloudstack.api.command.user.vmsnapshot.DeleteVMSnapshotCmd; +import org.apache.cloudstack.api.command.user.vmsnapshot.ListVMSnapshotCmd; +import org.apache.cloudstack.api.command.user.vmsnapshot.RevertToVMSnapshotCmd; +import org.apache.cloudstack.api.command.user.volume.AssignVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.DeleteVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.DestroyVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.ListVolumesCmd; +import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.UpdateVolumeCmd; +import org.apache.cloudstack.api.command.user.zone.ListZonesCmd; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.ServiceOfferingResponse; +import org.apache.cloudstack.backup.BackupVO; +import org.apache.cloudstack.backup.ImageTransfer.Direction; +import org.apache.cloudstack.backup.ImageTransfer.Format; +import org.apache.cloudstack.backup.ImageTransferVO; +import org.apache.cloudstack.backup.KVMBackupExportService; +import org.apache.cloudstack.backup.dao.BackupDao; +import org.apache.cloudstack.backup.dao.ImageTransferDao; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.framework.jobs.dao.AsyncJobDao; +import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; +import org.apache.cloudstack.query.QueryService; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.veeam.VeeamControlService; +import org.apache.cloudstack.veeam.api.TagsRouteHandler; +import org.apache.cloudstack.veeam.api.converter.AsyncJobJoinVOToJobConverter; +import org.apache.cloudstack.veeam.api.converter.BackupVOToBackupConverter; +import org.apache.cloudstack.veeam.api.converter.ClusterVOToClusterConverter; +import org.apache.cloudstack.veeam.api.converter.DataCenterJoinVOToDataCenterConverter; +import org.apache.cloudstack.veeam.api.converter.HostJoinVOToHostConverter; +import org.apache.cloudstack.veeam.api.converter.ImageTransferVOToImageTransferConverter; +import org.apache.cloudstack.veeam.api.converter.NetworkVOToNetworkConverter; +import org.apache.cloudstack.veeam.api.converter.NetworkVOToVnicProfileConverter; +import org.apache.cloudstack.veeam.api.converter.NicVOToNicConverter; +import org.apache.cloudstack.veeam.api.converter.ResourceTagVOToTagConverter; +import org.apache.cloudstack.veeam.api.converter.StoreVOToStorageDomainConverter; +import org.apache.cloudstack.veeam.api.converter.UserVmJoinVOToVmConverter; +import org.apache.cloudstack.veeam.api.converter.UserVmVOToCheckpointConverter; +import org.apache.cloudstack.veeam.api.converter.VmSnapshotVOToSnapshotConverter; +import org.apache.cloudstack.veeam.api.converter.VolumeJoinVOToDiskConverter; +import org.apache.cloudstack.veeam.api.dto.Backup; +import org.apache.cloudstack.veeam.api.dto.BaseDto; +import org.apache.cloudstack.veeam.api.dto.Checkpoint; +import org.apache.cloudstack.veeam.api.dto.Cluster; +import org.apache.cloudstack.veeam.api.dto.DataCenter; +import org.apache.cloudstack.veeam.api.dto.Disk; +import org.apache.cloudstack.veeam.api.dto.DiskAttachment; +import org.apache.cloudstack.veeam.api.dto.Host; +import org.apache.cloudstack.veeam.api.dto.ImageTransfer; +import org.apache.cloudstack.veeam.api.dto.Job; +import org.apache.cloudstack.veeam.api.dto.Network; +import org.apache.cloudstack.veeam.api.dto.Nic; +import org.apache.cloudstack.veeam.api.dto.OvfXmlUtil; +import org.apache.cloudstack.veeam.api.dto.ResourceAction; +import org.apache.cloudstack.veeam.api.dto.Snapshot; +import org.apache.cloudstack.veeam.api.dto.StorageDomain; +import org.apache.cloudstack.veeam.api.dto.Tag; +import org.apache.cloudstack.veeam.api.dto.Vm; +import org.apache.cloudstack.veeam.api.dto.VmAction; +import org.apache.cloudstack.veeam.api.dto.VnicProfile; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; +import org.apache.commons.lang3.StringUtils; +import org.jetbrains.annotations.NotNull; + +import com.cloud.api.query.dao.AsyncJobJoinDao; +import com.cloud.api.query.dao.DataCenterJoinDao; +import com.cloud.api.query.dao.HostJoinDao; +import com.cloud.api.query.dao.StoragePoolJoinDao; +import com.cloud.api.query.dao.UserVmJoinDao; +import com.cloud.api.query.dao.VolumeJoinDao; +import com.cloud.api.query.vo.AsyncJobJoinVO; +import com.cloud.api.query.vo.DataCenterJoinVO; +import com.cloud.api.query.vo.HostJoinVO; +import com.cloud.api.query.vo.StoragePoolJoinVO; +import com.cloud.api.query.vo.UserVmJoinVO; +import com.cloud.api.query.vo.VolumeJoinVO; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.domain.Domain; +import com.cloud.domain.dao.DomainDao; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.network.NetworkModel; +import com.cloud.network.Networks; +import com.cloud.network.dao.NetworkDao; +import com.cloud.network.dao.NetworkVO; +import com.cloud.offering.ServiceOffering; +import com.cloud.org.Grouping; +import com.cloud.projects.Project; +import com.cloud.projects.ProjectManager; +import com.cloud.server.ResourceTag; +import com.cloud.service.ServiceOfferingVO; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.storage.Storage; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeApiService; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeDetailsDao; +import com.cloud.tags.ResourceTagVO; +import com.cloud.tags.dao.ResourceTagDao; +import com.cloud.user.Account; +import com.cloud.user.AccountService; +import com.cloud.user.DomainService; +import com.cloud.user.User; +import com.cloud.user.UserAccount; +import com.cloud.user.UserDataVO; +import com.cloud.user.dao.UserDataDao; +import com.cloud.uservm.UserVm; +import com.cloud.utils.EnumUtils; +import com.cloud.utils.Pair; +import com.cloud.utils.Ternary; +import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.db.Filter; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.NicVO; +import com.cloud.vm.UserVmManager; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VmDetailConstants; +import com.cloud.vm.dao.NicDao; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDetailsDao; +import com.cloud.vm.snapshot.VMSnapshotVO; +import com.cloud.vm.snapshot.dao.VMSnapshotDao; + +// ToDo: check access for list APIs when not ROOT admin + +public class ServerAdapter extends ManagerBase { + private static final String SERVICE_ACCOUNT_NAME = "veemserviceuser"; + private static final String SERVICE_ACCOUNT_ROLE_NAME = "Veeam Service Role"; + private static final String SERVICE_ACCOUNT_FIRST_NAME = "Veeam"; + private static final String SERVICE_ACCOUNT_LAST_NAME = "Service User"; + private static final List> SERVICE_ACCOUNT_ROLE_ALLOWED_APIS = Arrays.asList( + QueryAsyncJobResultCmd.class, + ListVMsCmd.class, + DeployVMCmd.class, + StartVMCmd.class, + StopVMCmd.class, + DestroyVMCmd.class, + ListVolumesCmd.class, + CreateVolumeCmd.class, + DeleteVolumeCmd.class, + AttachVolumeCmd.class, + DetachVolumeCmd.class, + ResizeVolumeCmd.class, + ListNetworksCmd.class + ); + private static final List SUPPORTED_STORAGE_TYPES = Arrays.asList( + Storage.StoragePoolType.Filesystem, + Storage.StoragePoolType.NetworkFilesystem, + Storage.StoragePoolType.SharedMountPoint + ); + private static final String VM_TA_KEY = "veeam_tag"; + private static final String WORKER_VM_GUEST_CPU_MODE = "host-passthrough"; + + @Inject + RoleService roleService; + + @Inject + AccountService accountService; + + @Inject + DataCenterDao dataCenterDao; + + @Inject + DataCenterJoinDao dataCenterJoinDao; + + @Inject + StoragePoolJoinDao storagePoolJoinDao; + + @Inject + ClusterDao clusterDao; + + @Inject + HostJoinDao hostJoinDao; + + @Inject + NetworkDao networkDao; + + @Inject + UserVmDao userVmDao; + + @Inject + UserVmJoinDao userVmJoinDao; + + @Inject + VMInstanceDetailsDao vmInstanceDetailsDao; + + @Inject + VolumeDao volumeDao; + + @Inject + VolumeJoinDao volumeJoinDao; + + @Inject + VolumeDetailsDao volumeDetailsDao; + + @Inject + VolumeApiService volumeApiService; + + @Inject + PrimaryDataStoreDao primaryDataStoreDao; + + @Inject + ImageTransferDao imageTransferDao; + + @Inject + KVMBackupExportService kvmBackupExportService; + + @Inject + QueryService queryService; + + @Inject + ServiceOfferingDao serviceOfferingDao; + + @Inject + VMTemplateDao templateDao; + + @Inject + UserVmManager userVmManager; + + @Inject + NicDao nicDao; + + @Inject + ApiServerService apiServerService; + + @Inject + AsyncJobDao asyncJobDao; + + @Inject + AsyncJobJoinDao asyncJobJoinDao; + + @Inject + VMSnapshotDao vmSnapshotDao; + + @Inject + BackupDao backupDao; + + @Inject + ResourceTagDao resourceTagDao; + + @Inject + NetworkModel networkModel; + + @Inject + ProjectManager projectManager; + + @Inject + AffinityGroupDao affinityGroupDao; + + @Inject + UserDataDao userDataDao; + + @Inject + DomainService domainService; + + @Inject + DomainDao domainDao; + + protected static Tag getDummyTagByName(String name) { + Tag tag = new Tag(); + String id = UUID.nameUUIDFromBytes(String.format("veeam:%s", name.toLowerCase()).getBytes()).toString(); + tag.setId(id); + tag.setName(name); + tag.setDescription(String.format("Default %s tag", name.toLowerCase())); + tag.setHref(VeeamControlService.ContextPath.value() + TagsRouteHandler.BASE_ROUTE + "/" + id); + tag.setParent(ResourceTagVOToTagConverter.getRootTagRef()); + return tag; + } + + protected static Map getDummyTags() { + Map tags = new HashMap<>(); + Tag rootTag = ResourceTagVOToTagConverter.getRootTag(); + tags.put(rootTag.getId(), rootTag); + return tags; + } + + protected Role createServiceAccountRole() { + Role role = roleService.createRole(SERVICE_ACCOUNT_ROLE_NAME, RoleType.User, + SERVICE_ACCOUNT_ROLE_NAME, false); + for (Class allowedApi : SERVICE_ACCOUNT_ROLE_ALLOWED_APIS) { + final String apiName = BaseCmd.getCommandNameByClass(allowedApi); + roleService.createRolePermission(role, new Rule(apiName), RolePermissionEntity.Permission.ALLOW, + String.format("Allow %s", apiName)); + } + roleService.createRolePermission(role, new Rule("*"), RolePermissionEntity.Permission.DENY, + "Deny all"); + logger.debug("Created default role for Veeam service account in projects: {}", role); + return role; + } + + protected Role getServiceAccountRole() { + List roles = roleService.findRolesByName(SERVICE_ACCOUNT_ROLE_NAME); + if (CollectionUtils.isNotEmpty(roles)) { + Role role = roles.get(0); + logger.debug("Found default role for Veeam service account in projects: {}", role); + return role; + } + return createServiceAccountRole(); + } + + protected UserAccount createServiceAccount() { + CallContext.register(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM); + try { + Role role = getServiceAccountRole(); + UserAccount userAccount = accountService.createUserAccount(SERVICE_ACCOUNT_NAME, + UUID.randomUUID().toString(), SERVICE_ACCOUNT_FIRST_NAME, + SERVICE_ACCOUNT_LAST_NAME, null, null, SERVICE_ACCOUNT_NAME, Account.Type.NORMAL, role.getId(), + 1L, null, null, null, null, User.Source.NATIVE); + logger.debug("Created Veeam service account: {}", userAccount); + return userAccount; + } finally { + CallContext.unregister(); + } + } + + protected Pair getDefaultServiceAccount() { + UserAccount userAccount = accountService.getActiveUserAccount(SERVICE_ACCOUNT_NAME, 1L); + if (userAccount == null) { + userAccount = createServiceAccount(); + } else { + logger.debug("Veeam service user account found: {}", userAccount); + } + return new Pair<>(accountService.getActiveUser(userAccount.getId()), + accountService.getActiveAccountById(userAccount.getAccountId())); + } + + protected void waitForJobCompletion(long jobId) { + long timeoutNanos = TimeUnit.MINUTES.toNanos(5); + final long deadline = System.nanoTime() + timeoutNanos; + long sleepMillis = 500; + while (true) { + AsyncJobVO job = asyncJobDao.findById(jobId); + if (job == null) { + logger.warn("Async job with ID {} not found", jobId); + return; + } + if (job.getStatus() == AsyncJobVO.Status.SUCCEEDED || job.getStatus() == AsyncJobVO.Status.FAILED) { + return; + } + if (System.nanoTime() > deadline) { + logger.warn("Timed out waiting for {} completion", job); + } + try { + Thread.sleep(sleepMillis); + // back off gradually to reduce DB pressure + sleepMillis = Math.min(5000, sleepMillis + 500); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.warn("Interrupted while waiting for async job completion"); + } + } + } + + protected void waitForJobCompletion(AsyncJobJoinVO job) { + if (job == null) { + logger.warn("Async job not found"); + return; + } + if (job.getStatus() == AsyncJobVO.Status.SUCCEEDED.ordinal() || + job.getStatus() == AsyncJobVO.Status.FAILED.ordinal()) { + logger.warn("Async job with ID {} already completed with status {}", job.getId(), job.getStatus()); + } + waitForJobCompletion(job.getId()); + } + + protected ApiServerService.AsyncCmdResult processAsyncCmdWithContext(BaseAsyncCmd cmd, Map params) + throws Exception { + final CallContext ctx = CallContext.current(); + final long callerUserId = ctx.getCallingUserId(); + final Account caller = ctx.getCallingAccount(); + return apiServerService.processAsyncCmd(cmd, params, ctx, callerUserId, caller); + } + + protected Account getOwnerForInstanceCreation(Vm request) { + if (!VeeamControlService.InstanceRestoreAssignOwner.value()) { + return null; + } + String accountUuid = request.getAccountId(); + if (StringUtils.isBlank(accountUuid)) { + return null; + } + Account account = accountService.getActiveAccountByUuid(accountUuid); + if (account == null) { + logger.warn("Account with ID {} not found, unable to determine owner for VM creation request", accountUuid); + return null; + } + return account; + } + + protected Ternary getOwnerDetailsForInstanceCreation(Account account) { + if (account == null) { + return new Ternary<>(null, null, null); + } + String accountName = account.getAccountName(); + Long projectId = null; + if (Account.Type.PROJECT.equals(account.getType())) { + Project project = projectManager.findByProjectAccountId(account.getId()); + if (project == null) { + logger.warn("Project for {} not found, unable to determine owner for VM creation request", account); + return new Ternary<>(null, null, null); + } + projectId = project.getId(); + accountName = null; + } + return new Ternary<>(account.getDomainId(), accountName, projectId); + } + + protected Pair, String> getResourceOwnerFilters() { + final Account caller = CallContext.current().getCallingAccount(); + final Account.Type type = caller.getType(); + if (Account.Type.ADMIN.equals(type)) { + return new Pair<>(null, null); + } + List permittedAccountIds = null; + String domainPath = null; + if (Account.Type.DOMAIN_ADMIN.equals(type) || Account.Type.NORMAL.equals(type)) { + permittedAccountIds = projectManager.listPermittedProjectAccounts(caller.getId()); + permittedAccountIds.add(caller.getId()); + } + if (Account.Type.DOMAIN_ADMIN.equals(type)) { + Domain domain = domainService.getDomain(caller.getDomainId()); + if (domain == null) { + throw new InvalidParameterValueException("Invalid service account specified"); + } + domainPath = domain.getPath(); + } + if (Account.Type.PROJECT.equals(type)) { + Project project = projectManager.findByProjectAccountId(caller.getId()); + if (project == null) { + throw new InvalidParameterValueException("Invalid service account specified"); + } + permittedAccountIds = new ArrayList<>(); + permittedAccountIds.add(caller.getId()); + } + return new Pair<>(permittedAccountIds, domainPath); + } + + protected Pair, List> getResourceOwnerFiltersWithDomainIds() { + Pair, String> filters = getResourceOwnerFilters(); + if (StringUtils.isNotBlank(filters.second())) { + return new Pair<>(filters.first(), domainDao.getDomainChildrenIds(filters.second())); + } + return new Pair<>(filters.first(), null); + } + + protected ServiceOfferingVO getServiceOfferingFromRequest(com.cloud.dc.DataCenter zone, Account account, + String uuid, int cpu, int memory) { + if (StringUtils.isBlank(uuid)) { + return null; + } + ServiceOfferingVO offering = serviceOfferingDao.findByUuid(uuid); + if (offering == null) { + logger.warn("Service offering with ID {} linked with the VM request not found", uuid); + return null; + } + try { + accountService.checkAccess(account, offering, zone); + } catch (PermissionDeniedException e) { + logger.warn("Service offering with ID {} linked with the VM request is not accessible for the account {}. Offering: {}, zone: {}", + uuid, account, offering, zone); + return null; + } + if (!offering.isCustomized() && (offering.getCpu() != cpu || offering.getRamSize() != memory)) { + logger.warn("Service offering with ID {} linked with the VM request has different CPU or memory than requested. Offering: {}, requested CPU: {}, requested memory: {}", + uuid, offering, cpu, memory); + return null; + } + if (offering.isCustomized()) { + Map params = Map.of( + VmDetailConstants.CPU_NUMBER, String.valueOf(cpu), + VmDetailConstants.MEMORY, String.valueOf(memory) + ); + try { + userVmManager.validateCustomParameters(offering, params); + offering.setCpu(cpu); + offering.setRamSize(memory); + } catch (InvalidParameterValueException e) { + logger.warn("Service offering with ID {} linked with the VM request is customized but does not support requested CPU or memory. Offering: {}, requested CPU: {}, requested memory: {}", + uuid, offering, cpu, memory); + return null; + } + } + return offering; + } + + protected ServiceOffering getServiceOfferingIdForVmCreation(com.cloud.dc.DataCenter zone, Account account, + String serviceOfferingUuid, int cpu, int memory) { + ServiceOfferingVO offering = getServiceOfferingFromRequest(zone, account, serviceOfferingUuid, cpu, memory); + if (offering != null) { + return offering; + } + ListServiceOfferingsCmd cmd = new ListServiceOfferingsCmd(); + ComponentContext.inject(cmd); + cmd.setZoneId(zone.getId()); + cmd.setCpuNumber(cpu); + cmd.setMemory(memory); + ListResponse offerings = queryService.searchForServiceOfferings(cmd); + if (offerings.getResponses().isEmpty()) { + return null; + } + String uuid = offerings.getResponses().get(0).getId(); + offering = serviceOfferingDao.findByUuid(uuid); + if (offering.isCustomized()) { + offering.setCpu(cpu); + offering.setRamSize(memory); + } + return offering; + } + + protected VMTemplateVO getTemplateForInstanceCreation(String templateUuid) { + if (StringUtils.isBlank(templateUuid)) { + return null; + } + VMTemplateVO template = templateDao.findByUuid(templateUuid); + if (template == null) { + logger.warn("Template with ID {} not found, VM will be created with default template", templateUuid); + return null; + } + return template; + } + + protected Vm createInstance(com.cloud.dc.DataCenter zone, Long clusterId, Account owner, Long domainId, + String accountName, Long projectId, String name, String displayName, String serviceOfferingUuid, + int cpu, int memory, String templateUuid, String userdata, ApiConstants.BootType bootType, + ApiConstants.BootMode bootMode, String affinityGroupId, String userDataId, Map details) { + Account account = owner != null ? owner : CallContext.current().getCallingAccount(); + ServiceOffering serviceOffering = getServiceOfferingIdForVmCreation(zone, account, serviceOfferingUuid, cpu, + memory); + if (serviceOffering == null) { + throw new CloudRuntimeException("No service offering found for VM creation with specified CPU and memory"); + } + DeployVMCmdByAdmin cmd = new DeployVMCmdByAdmin(); + cmd.setHttpMethod(BaseCmd.HTTPMethod.POST.name()); + ComponentContext.inject(cmd); + cmd.setZoneId(zone.getId()); + cmd.setClusterId(clusterId); + if (domainId != null && StringUtils.isNotEmpty(accountName)) { + cmd.setDomainId(domainId); + cmd.setAccountName(accountName); + } + if (projectId != null) { + cmd.setProjectId(projectId); + } + cmd.setName(name); + if (displayName != null) { + cmd.setDisplayName(displayName); + } + cmd.setServiceOfferingId(serviceOffering.getId()); + if (StringUtils.isNotEmpty(userdata)) { + cmd.setUserData(Base64.getEncoder().encodeToString(userdata.getBytes(StandardCharsets.UTF_8))); + } + if (bootType != null) { + cmd.setBootType(bootType.toString()); + } + if (bootMode != null) { + cmd.setBootMode(bootMode.toString()); + } + VMTemplateVO template = getTemplateForInstanceCreation(templateUuid); + if (template != null) { + cmd.setTemplateId(template.getId()); + } + if (StringUtils.isNotBlank(affinityGroupId)) { + AffinityGroupVO group = affinityGroupDao.findByUuid(affinityGroupId); + if (group == null) { + logger.warn("Failed to find affinity group with ID {} specified in Instance creation request, " + + "skipping affinity group assignment", affinityGroupId); + } else { + cmd.setAffinityGroupIds(List.of(group.getId())); + } + } + if (StringUtils.isNotBlank(userDataId)) { + UserDataVO userData = userDataDao.findByUuid(userDataId); + if (userData == null) { + logger.warn("Failed to find userdata with ID {} specified in Instance creation request, " + + "skipping userdata assignment", userDataId); + } else { + cmd.setUserDataId(userData.getId()); + } + } + cmd.setHypervisor(Hypervisor.HypervisorType.KVM.name()); + Map instanceDetails = getDetailsForInstanceCreation(userdata, serviceOffering, details); + if (MapUtils.isNotEmpty(instanceDetails)) { + Map> map = new HashMap<>(); + map.put(0, instanceDetails); + cmd.setDetails(map); + } + cmd.setBlankInstance(true); + try { + UserVm vm = userVmManager.createVirtualMachine(cmd); + vm = userVmManager.finalizeCreateVirtualMachine(vm.getId()); + UserVmJoinVO vo = userVmJoinDao.findById(vm.getId()); + return UserVmJoinVOToVmConverter.toVm(vo, this::getHostById, this::getDetailsByInstanceId, + this::listTagsByInstanceId, this::listDiskAttachmentsByInstanceId, this::listNicsByInstance, false); + } catch (InsufficientCapacityException | ResourceUnavailableException | ResourceAllocationException | CloudRuntimeException e) { + throw new CloudRuntimeException("Failed to create VM: " + e.getMessage(), e); + } + } + + @NotNull + protected static Map getDetailsForInstanceCreation(String userdata, ServiceOffering serviceOffering, + Map existingDetails) { + Map details = new HashMap<>(); + List detailsTobeSkipped = List.of( + ApiConstants.BootType.BIOS.toString(), + ApiConstants.BootType.UEFI.toString()); + if (MapUtils.isNotEmpty(existingDetails)) { + for (Map.Entry entry : existingDetails.entrySet()) { + if (detailsTobeSkipped.contains(entry.getKey())) { + continue; + } + details.put(entry.getKey(), entry.getValue()); + } + } + if (StringUtils.isNotEmpty(userdata)) { + // Assumption: Only worker VM will have userdata and it needs CPU mode + details.put(VmDetailConstants.GUEST_CPU_MODE, WORKER_VM_GUEST_CPU_MODE); + } + if (serviceOffering.isCustomized()) { + details.put(VmDetailConstants.CPU_NUMBER, String.valueOf(serviceOffering.getCpu())); + details.put(VmDetailConstants.MEMORY, String.valueOf(serviceOffering.getRamSize())); + if (serviceOffering.getSpeed() == null && !details.containsKey(VmDetailConstants.CPU_SPEED)) { + details.put(VmDetailConstants.CPU_SPEED, String.valueOf(1000)); + } + } + return details; + } + + protected static long getProvisionedSizeInGb(String sizeStr) { + long provisionedSizeInGb; + try { + provisionedSizeInGb = Long.parseLong(sizeStr); + } catch (NumberFormatException ex) { + throw new InvalidParameterValueException("Invalid provisioned size: " + sizeStr); + } + if (provisionedSizeInGb <= 0) { + throw new InvalidParameterValueException("Provisioned size must be greater than zero"); + } + // round-up provisionedSizeInGb to the next whole GB + long GB = 1024L * 1024L * 1024L; + provisionedSizeInGb = Math.max(1L, (provisionedSizeInGb + GB - 1) / GB); + return provisionedSizeInGb; + } + + protected Long getVolumePhysicalSize(VolumeJoinVO vo) { + return volumeApiService.getVolumePhysicalSize(vo.getFormat(), vo.getPath(), vo.getChainInfo()); + } + + @NotNull + protected Disk createDisk(Account serviceAccount, StoragePoolVO pool, String name, Long diskOfferingId, long sizeInGb, Long initialSize) { + Volume volume; + try { + volume = volumeApiService.allocVolume(serviceAccount.getId(), pool.getDataCenterId(), diskOfferingId, null, + null, name, sizeInGb, null, null, null, null); + } catch (ResourceAllocationException e) { + throw new CloudRuntimeException(e.getMessage(), e); + } + if (volume == null) { + throw new CloudRuntimeException("Failed to create volume"); + } + volume = volumeApiService.createVolume(volume.getId(), null, null, pool.getId(), true); + if (initialSize != null) { + volumeDetailsDao.addDetail(volume.getId(), ApiConstants.VIRTUAL_SIZE, String.valueOf(initialSize), true); + } + + // Implementation for creating a Disk resource + return VolumeJoinVOToDiskConverter.toDisk(volumeJoinDao.findById(volume.getId()), this::getVolumePhysicalSize); + } + + protected List listNicsByInstance(final long instanceId, final String instanceUuid) { + List nics = nicDao.listByVmId(instanceId); + return NicVOToNicConverter.toNicList(nics, instanceUuid, this::getNetworkById); + } + + protected List listNicsByInstance(final UserVmJoinVO vo) { + return listNicsByInstance(vo.getId(), vo.getUuid()); + } + + protected boolean accountCannotAccessNetwork(NetworkVO networkVO, long accountId) { + Account account = accountService.getActiveAccountById(accountId); + try { + networkModel.checkNetworkPermissions(account, networkVO); + return false; + } catch (CloudRuntimeException e) { + logger.debug("{} cannot access {}: {}", account, networkVO, e.getMessage()); + } + return true; + } + + protected void assignVmToAccount(UserVmVO vmVO, long accountId) { + Account account = accountService.getActiveAccountById(accountId); + if (account == null) { + throw new InvalidParameterValueException("Account with ID " + accountId + " not found"); + } + try { + AssignVMCmd cmd = new AssignVMCmd(); + ComponentContext.inject(cmd); + cmd.setVirtualMachineId(vmVO.getId()); + cmd.setDomainId(account.getDomainId()); + if (Account.Type.PROJECT.equals(account.getType())) { + Project project = projectManager.findByProjectAccountId(account.getId()); + if (project == null) { + throw new InvalidParameterValueException("Project for " + account + " not found"); + } + cmd.setProjectId(project.getId()); + } else { + cmd.setAccountName(account.getAccountName()); + } + cmd.setSkipNetwork(true); + userVmManager.moveVmToUser(cmd); + } catch (ResourceAllocationException | CloudRuntimeException | ResourceUnavailableException | + InsufficientCapacityException e) { + logger.error("Failed to assign {} to {}: {}", vmVO, account, e.getMessage(), e); + } + } + + protected ImageTransfer createImageTransfer(Long backupId, Long volumeId, Direction direction, Format format) { + org.apache.cloudstack.backup.ImageTransfer imageTransfer = + kvmBackupExportService.createImageTransfer(volumeId, backupId, direction, format); + ImageTransferVO imageTransferVO = imageTransferDao.findById(imageTransfer.getId()); + return ImageTransferVOToImageTransferConverter.toImageTransfer(imageTransferVO, this::getHostById, + this::getVolumeById); + } + + protected DataCenterJoinVO getZoneById(Long zoneId) { + if (zoneId == null) { + return null; + } + return dataCenterJoinDao.findById(zoneId); + } + + protected HostJoinVO getHostById(Long hostId) { + if (hostId == null) { + return null; + } + return hostJoinDao.findById(hostId); + } + + protected VolumeJoinVO getVolumeById(Long volumeId) { + if (volumeId == null) { + return null; + } + return volumeJoinDao.findById(volumeId); + } + + protected NetworkVO getNetworkById(Long networkId) { + if (networkId == null) { + return null; + } + return networkDao.findById(networkId); + } + + protected Map getDetailsByInstanceId(Long instanceId) { + return vmInstanceDetailsDao.listDetailsKeyPairs(instanceId, true); + } + + public Pair getServiceAccount() { + String serviceAccountUuid = VeeamControlService.ServiceAccountId.value(); + if (StringUtils.isEmpty(serviceAccountUuid)) { + throw new CloudRuntimeException("Service account is not configured, unable to proceed"); + } + Account account = accountService.getActiveAccountByUuid(serviceAccountUuid); + if (account == null) { + throw new CloudRuntimeException("Service account with ID " + serviceAccountUuid + " not found, unable to proceed"); + } + User user = accountService.getOneActiveUserForAccount(account); + if (user == null) { + throw new CloudRuntimeException("No active user found for service account with ID " + serviceAccountUuid); + } + return new Pair<>(user, account); + } + + @Override + public boolean start() { + getServiceAccount(); + return true; + } + + @ApiAccess(command = ListZonesCmd.class) + public List listAllDataCenters(Long offset, Long limit) { + Filter filter = new Filter(DataCenterJoinVO.class, "id", true, offset, limit); + final List clusters = dataCenterJoinDao.listAll(filter); + return DataCenterJoinVOToDataCenterConverter.toDCList(clusters); + } + + @ApiAccess(command = ListZonesCmd.class) + public DataCenter getDataCenter(String uuid) { + final DataCenterJoinVO vo = dataCenterJoinDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("DataCenter with ID " + uuid + " not found"); + } + return DataCenterJoinVOToDataCenterConverter.toDataCenter(vo); + } + + @ApiAccess(command = ListStoragePoolsCmd.class) + public List listStorageDomainsByDcId(final String uuid, final Long offset, final Long limit) { + final DataCenterVO dataCenterVO = dataCenterDao.findByUuid(uuid); + if (dataCenterVO == null) { + throw new InvalidParameterValueException("DataCenter with ID " + uuid + " not found"); + } + Filter filter = new Filter(StoragePoolJoinVO.class, "id", true, offset, limit); + List storagePoolVOS = storagePoolJoinDao.listByZoneAndType(dataCenterVO.getId(), + SUPPORTED_STORAGE_TYPES, filter); + return StoreVOToStorageDomainConverter.toStorageDomainListFromPools(storagePoolVOS); + } + + @ApiAccess(command = ListNetworksCmd.class) + public List listNetworksByDcId(final String uuid, final Long offset, final Long limit) { + final DataCenterJoinVO dataCenterVO = dataCenterJoinDao.findByUuid(uuid); + if (dataCenterVO == null) { + throw new InvalidParameterValueException("DataCenter with ID " + uuid + " not found"); + } + Filter filter = new Filter(NetworkVO.class, "id", true, offset, limit); + List networks = networkDao.listByZoneAndTrafficType(dataCenterVO.getId(), Networks.TrafficType.Guest, filter); + return NetworkVOToNetworkConverter.toNetworkList(networks, (dcId) -> dataCenterVO); + } + + @ApiAccess(command = ListClustersCmd.class) + public List listAllClusters(Long offset, Long limit) { + Filter filter = new Filter(ClusterVO.class, "id", true, offset, limit); + final List clusters = clusterDao.listByHypervisorType(Hypervisor.HypervisorType.KVM, filter); + return ClusterVOToClusterConverter.toClusterList(clusters, this::getZoneById); + } + + @ApiAccess(command = ListClustersCmd.class) + public Cluster getCluster(String uuid) { + final ClusterVO vo = clusterDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("Cluster with ID " + uuid + " not found"); + } + return ClusterVOToClusterConverter.toCluster(vo, this::getZoneById); + } + + @ApiAccess(command = ListHostsCmd.class) + public List listAllHosts(Long offset, Long limit) { + Filter filter = new Filter(HostJoinVO.class, "id", true, offset, limit); + final List hosts = hostJoinDao.listRoutingHostsByHypervisor(Hypervisor.HypervisorType.KVM, filter); + return HostJoinVOToHostConverter.toHostList(hosts); + } + + @ApiAccess(command = ListHostsCmd.class) + public Host getHost(String uuid) { + final HostJoinVO vo = hostJoinDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("Host with ID " + uuid + " not found"); + } + return HostJoinVOToHostConverter.toHost(vo); + } + + @ApiAccess(command = ListNetworksCmd.class) + public List listAllNetworks(Long offset, Long limit) { + Filter filter = new Filter(NetworkVO.class, "id", true, offset, limit); + Pair, List> ownerDetails = getResourceOwnerFiltersWithDomainIds(); + final List networks = networkDao.listByTrafficTypeAndOwners(Networks.TrafficType.Guest, + ownerDetails.first(), ownerDetails.second(), filter); + return NetworkVOToNetworkConverter.toNetworkList(networks, this::getZoneById); + } + + @ApiAccess(command = ListNetworksCmd.class) + public Network getNetwork(String uuid) { + final NetworkVO vo = networkDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("Network with ID " + uuid + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), null, false, vo); + return NetworkVOToNetworkConverter.toNetwork(vo, this::getZoneById); + } + + @ApiAccess(command = ListNetworksCmd.class) + public List listAllVnicProfiles(Long offset, Long limit) { + Filter filter = new Filter(NetworkVO.class, "id", true, offset, limit); + Pair, List> ownerDetails = getResourceOwnerFiltersWithDomainIds(); + final List networks = networkDao.listByTrafficTypeAndOwners(Networks.TrafficType.Guest, + ownerDetails.first(), ownerDetails.second(), filter); + return NetworkVOToVnicProfileConverter.toVnicProfileList(networks, this::getZoneById); + } + + @ApiAccess(command = ListNetworksCmd.class) + public VnicProfile getVnicProfile(String uuid) { + final NetworkVO vo = networkDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("Nic profile with ID " + uuid + " not found"); + } + return NetworkVOToVnicProfileConverter.toVnicProfile(vo, this::getZoneById); + } + + @ApiAccess(command = ListVMsCmd.class) + public List listAllInstances(boolean includeTags, boolean includeDisks, boolean includeNics, + boolean allContent, Long offset, Long limit) { + Filter filter = new Filter(UserVmJoinVO.class, "id", true, offset, limit); + Pair, String> ownerDetails = getResourceOwnerFilters(); + List vms = userVmJoinDao.listByHypervisorTypeAndOwners(Hypervisor.HypervisorType.KVM, + ownerDetails.first(), ownerDetails.second(), filter); + return UserVmJoinVOToVmConverter.toVmList(vms, + this::getHostById, + this::getDetailsByInstanceId, + includeTags ? this::listTagsByInstanceId : null, + includeDisks ? this::listDiskAttachmentsByInstanceId : null, + includeNics ? this::listNicsByInstance : null, + allContent); + } + + @ApiAccess(command = ListVMsCmd.class) + public Vm getInstance(String uuid, boolean includeTags, boolean includeDisks, boolean includeNics, + boolean allContent) { + UserVmJoinVO vo = userVmJoinDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("VM with ID " + uuid + " not found"); + } + return UserVmJoinVOToVmConverter.toVm(vo, + this::getHostById, + this::getDetailsByInstanceId, + includeTags ? this::listTagsByInstanceId : null, + includeDisks ? this::listDiskAttachmentsByInstanceId : null, + includeNics ? this::listNicsByInstance : null, + allContent); + } + + @ApiAccess(command = DeployVMCmd.class) + public Vm createInstance(Vm request) { + if (request == null) { + throw new InvalidParameterValueException("Request disk data is empty"); + } + OvfXmlUtil.updateFromConfiguration(request); + String name = request.getName(); + if (StringUtils.isBlank(name)) { + throw new InvalidParameterValueException("Invalid name specified for the VM"); + } + String displayName = name; + name = name.replace("_", "-"); + Long zoneId = null; + Long clusterId = null; + if (request.getCluster() != null && StringUtils.isNotEmpty(request.getCluster().getId())) { + ClusterVO clusterVO = clusterDao.findByUuid(request.getCluster().getId()); + if (clusterVO != null) { + zoneId = clusterVO.getDataCenterId(); + clusterId = clusterVO.getId(); + } + } + if (zoneId == null) { + throw new InvalidParameterValueException("Failed to determine datacenter for VM creation request"); + } + DataCenterVO zone = dataCenterDao.findById(zoneId); + if (zone == null) { + throw new InvalidParameterValueException("DataCenter could not be determined for the request"); + } + Integer cpu = null; + try { + cpu = Integer.valueOf(request.getCpu().getTopology().getSockets()); + } catch (Exception ignored) { + } + if (cpu == null) { + throw new InvalidParameterValueException("CPU topology sockets must be specified"); + } + Long memory = null; + try { + memory = Long.valueOf(request.getMemory()); + } catch (Exception ignored) { + } + if (memory == null) { + throw new InvalidParameterValueException("Memory must be specified"); + } + int memoryMB = (int)(memory / (1024L * 1024L)); + String userdata = null; + if (request.getInitialization() != null) { + userdata = request.getInitialization().getCustomScript(); + } + Pair bootOptions = Vm.Bios.retrieveBootOptions(request.getBios()); + Account owner = getOwnerForInstanceCreation(request); + Ternary ownerDetails = getOwnerDetailsForInstanceCreation(owner); + String serviceOfferingUuid = null; + if (request.getCpuProfile() != null && StringUtils.isNotEmpty(request.getCpuProfile().getId())) { + serviceOfferingUuid = request.getCpuProfile().getId(); + } + String templateUuid = null; + if (request.getTemplate() != null && StringUtils.isNotEmpty(request.getTemplate().getId())) { + templateUuid = request.getTemplate().getId(); + } + return createInstance(zone, clusterId, owner, ownerDetails.first(), ownerDetails.second(), + ownerDetails.third(), name, displayName, serviceOfferingUuid, cpu, memoryMB, templateUuid, + userdata, bootOptions.first(), bootOptions.second(), request.getAffinityGroupId(), + request.getUserDataId(), request.getDetails()); + } + + @ApiAccess(command = UpdateVMCmd.class) + public Vm updateInstance(String uuid, Vm request) { + logger.warn("Received request to update VM with ID {}. No action, returning existing VM data.", uuid); + return getInstance(uuid, false, false, false, false); + } + + @ApiAccess(command = DestroyVMCmd.class) + public VmAction deleteInstance(String uuid, boolean async) { + UserVmVO vo = userVmDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("VM with ID " + uuid + " not found"); + } + try { + DestroyVMCmd cmd = new DestroyVMCmd(); + cmd.setHttpMethod(BaseCmd.HTTPMethod.POST.name()); + ComponentContext.inject(cmd); + Map params = new HashMap<>(); + params.put(ApiConstants.ID, vo.getUuid()); + params.put(ApiConstants.EXPUNGE, Boolean.TRUE.toString()); + ApiServerService.AsyncCmdResult result = processAsyncCmdWithContext(cmd, params); + AsyncJobJoinVO jobVo = asyncJobJoinDao.findById(result.jobId); + if (jobVo == null) { + throw new CloudRuntimeException("Failed to find job for VM deletion"); + } + if (!async) { + waitForJobCompletion(jobVo); + } + return AsyncJobJoinVOToJobConverter.toVmAction(jobVo, userVmJoinDao.findById(vo.getId())); + } catch (Exception e) { + throw new CloudRuntimeException("Failed to delete VM: " + e.getMessage(), e); + } + } + + @ApiAccess(command = StartVMCmd.class) + public VmAction startInstance(String uuid, boolean async) { + UserVmVO vo = userVmDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("VM with ID " + uuid + " not found"); + } + try { + StartVMCmd cmd = new StartVMCmd(); + cmd.setHttpMethod(BaseCmd.HTTPMethod.POST.name()); + ComponentContext.inject(cmd); + Map params = new HashMap<>(); + params.put(ApiConstants.ID, vo.getUuid()); + ApiServerService.AsyncCmdResult result = processAsyncCmdWithContext(cmd, params); + AsyncJobJoinVO jobVo = asyncJobJoinDao.findById(result.jobId); + if (jobVo == null) { + throw new CloudRuntimeException("Failed to find job for VM start"); + } + if (!async) { + waitForJobCompletion(jobVo); + } + return AsyncJobJoinVOToJobConverter.toVmAction(jobVo, userVmJoinDao.findById(vo.getId())); + } catch (Exception e) { + throw new CloudRuntimeException("Failed to start VM: " + e.getMessage(), e); + } + } + + @ApiAccess(command = StopVMCmd.class) + public VmAction stopInstance(String uuid, boolean async) { + UserVmVO vo = userVmDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("VM with ID " + uuid + " not found"); + } + try { + StopVMCmd cmd = new StopVMCmd(); + cmd.setHttpMethod(BaseCmd.HTTPMethod.POST.name()); + ComponentContext.inject(cmd); + Map params = new HashMap<>(); + params.put(ApiConstants.ID, vo.getUuid()); + params.put(ApiConstants.FORCED, Boolean.TRUE.toString()); + ApiServerService.AsyncCmdResult result = processAsyncCmdWithContext(cmd, params); + AsyncJobJoinVO jobVo = asyncJobJoinDao.findById(result.jobId); + if (jobVo == null) { + throw new CloudRuntimeException("Failed to find job for VM stop"); + } + if (!async) { + waitForJobCompletion(jobVo); + } + return AsyncJobJoinVOToJobConverter.toVmAction(jobVo, userVmJoinDao.findById(vo.getId())); + } catch (Exception e) { + throw new CloudRuntimeException("Failed to stop VM: " + e.getMessage(), e); + } + } + + @ApiAccess(command = StopVMCmd.class) + public VmAction shutdownInstance(String uuid, boolean async) { + UserVmVO vo = userVmDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("VM with ID " + uuid + " not found"); + } + try { + StopVMCmd cmd = new StopVMCmd(); + cmd.setHttpMethod(BaseCmd.HTTPMethod.POST.name()); + ComponentContext.inject(cmd); + Map params = new HashMap<>(); + params.put(ApiConstants.ID, vo.getUuid()); + params.put(ApiConstants.FORCED, Boolean.FALSE.toString()); + ApiServerService.AsyncCmdResult result = processAsyncCmdWithContext(cmd, params); + AsyncJobJoinVO jobVo = asyncJobJoinDao.findById(result.jobId); + if (jobVo == null) { + throw new CloudRuntimeException("Failed to find job for VM shutdown"); + } + if (!async) { + waitForJobCompletion(jobVo); + } + return AsyncJobJoinVOToJobConverter.toVmAction(jobVo, userVmJoinDao.findById(vo.getId())); + } catch (Exception e) { + throw new CloudRuntimeException("Failed to shutdown VM: " + e.getMessage(), e); + } + } + + @ApiAccess(command = ListTagsCmd.class) + protected List listTagsByInstanceId(final long instanceId) { + ResourceTag vmResourceTag = resourceTagDao.findByKey(instanceId, + ResourceTag.ResourceObjectType.UserVm, VM_TA_KEY); + List tags = new ArrayList<>(); + if (vmResourceTag instanceof ResourceTagVO) { + tags.add((ResourceTagVO)vmResourceTag); + } else { + tags.add(resourceTagDao.findById(vmResourceTag.getId())); + } + return ResourceTagVOToTagConverter.toTags(tags); + } + + @ApiAccess(command = ListVolumesCmd.class) + protected List listDiskAttachmentsByInstanceId(final long instanceId) { + List kvmVolumes = volumeJoinDao.listByInstanceId(instanceId); + return VolumeJoinVOToDiskConverter.toDiskAttachmentList(kvmVolumes, this::getVolumePhysicalSize); + } + + @ApiAccess(command = ListVolumesCmd.class) + public List listDiskAttachmentsByInstanceUuid(final String uuid) { + UserVmVO vo = userVmDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("VM with ID " + uuid + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), null, false, vo); + return listDiskAttachmentsByInstanceId(vo.getId()); + } + + @ApiAccess(command = ListVolumesCmd.class) + public List listAllDisks(Long offset, Long limit) { + Filter filter = new Filter(VolumeJoinVO.class, "id", true, offset, limit); + Pair, String> ownerDetails = getResourceOwnerFilters(); + List kvmVolumes = volumeJoinDao.listByHypervisorTypeAndOwners(Hypervisor.HypervisorType.KVM, + ownerDetails.first(), ownerDetails.second(), filter); + return VolumeJoinVOToDiskConverter.toDiskList(kvmVolumes, this::getVolumePhysicalSize); + } + + @ApiAccess(command = ListVolumesCmd.class) + public Disk getDisk(String uuid) { + VolumeVO vo = volumeDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("Disk with ID " + uuid + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), null, false, vo); + return VolumeJoinVOToDiskConverter.toDisk(volumeJoinDao.findByUuid(uuid), this::getVolumePhysicalSize); + } + + protected void assignVolumeToAccount(VolumeVO volumeVO, long accountId) { + Account account = accountService.getActiveAccountById(accountId); + if (account == null) { + throw new InvalidParameterValueException("Account with ID " + accountId + " not found"); + } + try { + AssignVolumeCmd cmd = new AssignVolumeCmd(); + ComponentContext.inject(cmd); + Map params = new HashMap<>(); + cmd.setVolumeId(volumeVO.getId()); + params.put(ApiConstants.VOLUME_ID, volumeVO.getUuid()); + if (Account.Type.PROJECT.equals(account.getType())) { + Project project = projectManager.findByProjectAccountId(account.getId()); + if (project == null) { + throw new InvalidParameterValueException("Project for " + account + " not found"); + } + cmd.setProjectId(project.getId()); + params.put(ApiConstants.PROJECT_ID, project.getUuid()); + } else { + cmd.setAccountId(account.getId()); + params.put(ApiConstants.ACCOUNT_ID, account.getUuid()); + } + cmd.setFullUrlParams(params); + volumeApiService.assignVolumeToAccount(cmd); + } catch (ResourceAllocationException | CloudRuntimeException e) { + logger.error("Failed to assign {} to {}: {}", volumeVO, account, e.getMessage(), e); + } + } + + @ApiAccess(command = AttachVolumeCmd.class) + public DiskAttachment attachInstanceDisk(final String vmUuid, final DiskAttachment request) { + UserVmVO vmVo = userVmDao.findByUuid(vmUuid); + if (vmVo == null) { + throw new InvalidParameterValueException("VM with ID " + vmUuid + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, + false, vmVo); + if (request == null || request.getDisk() == null || StringUtils.isEmpty(request.getDisk().getId())) { + throw new InvalidParameterValueException("Request disk data is empty"); + } + VolumeVO volumeVO = volumeDao.findByUuid(request.getDisk().getId()); + if (volumeVO == null) { + throw new InvalidParameterValueException("Disk with ID " + request.getDisk().getId() + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, + false, vmVo); + if (vmVo.getAccountId() != volumeVO.getAccountId()) { + if (VeeamControlService.InstanceRestoreAssignOwner.value()) { + assignVolumeToAccount(volumeVO, vmVo.getAccountId()); + } else { + throw new PermissionDeniedException("Disk with ID " + request.getDisk().getId() + + " belongs to a different account and cannot be attached to the VM"); + } + } + Long deviceId = null; + List volumes = volumeDao.findUsableVolumesForInstance(vmVo.getId()); + if (CollectionUtils.isEmpty(volumes)) { + deviceId = 0L; + } + Volume volume = volumeApiService.attachVolumeToVM(vmVo.getId(), volumeVO.getId(), deviceId, false); + VolumeJoinVO attachedVolumeVO = volumeJoinDao.findById(volume.getId()); + return VolumeJoinVOToDiskConverter.toDiskAttachment(attachedVolumeVO, this::getVolumePhysicalSize); + } + + @ApiAccess(command = CreateVolumeCmd.class) + public Disk createDisk(Disk request) { + if (request == null) { + throw new InvalidParameterValueException("Request disk data is empty"); + } + String name = request.getName(); + if (request.getStorageDomains() == null || CollectionUtils.isEmpty(request.getStorageDomains().getItems()) || + request.getStorageDomains().getItems().size() > 1) { + throw new InvalidParameterValueException("Exactly one storage domain must be specified"); + } + StorageDomain domain = request.getStorageDomains().getItems().get(0); + if (domain == null || domain.getId() == null) { + throw new InvalidParameterValueException("Storage domain ID must be specified"); + } + StoragePoolVO pool = primaryDataStoreDao.findByUuid(domain.getId()); + if (pool == null) { + throw new InvalidParameterValueException("Storage domain with ID " + domain.getId() + " not found"); + } + String sizeStr = request.getProvisionedSize(); + if (StringUtils.isBlank(sizeStr)) { + throw new InvalidParameterValueException("Provisioned size must be specified"); + } + long provisionedSizeInGb = getProvisionedSizeInGb(sizeStr); + Long initialSize = null; + if (StringUtils.isNotBlank(request.getInitialSize())) { + try { + initialSize = Long.parseLong(request.getInitialSize()); + } catch (NumberFormatException ignored) {} + } + Account caller = CallContext.current().getCallingAccount(); + DataCenterVO zone = dataCenterDao.findById(pool.getDataCenterId()); + if (zone == null || !Grouping.AllocationState.Enabled.equals(zone.getAllocationState())) { + throw new InvalidParameterValueException("Datacenter for the specified storage domain is not found or not active"); + } + Long diskOfferingId = volumeApiService.getCustomDiskOfferingIdForVolumeUpload(caller, zone); + if (diskOfferingId == null) { + throw new CloudRuntimeException("Failed to find custom offering for disk" + zone.getName()); + } + return createDisk(caller, pool, name, diskOfferingId, provisionedSizeInGb, initialSize); + } + + @ApiAccess(command = DestroyVolumeCmd.class) + public void deleteDisk(String uuid) { + VolumeVO vo = volumeDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("Disk with ID " + uuid + " not found"); + } + volumeApiService.deleteVolume(vo.getId(), accountService.getSystemAccount()); + } + + @ApiAccess(command = UpdateVolumeCmd.class) + public Disk updateDisk(String uuid, Disk request) { + VolumeVO vo = volumeDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("Disk with ID " + uuid + " not found"); + } + logger.warn("Update disk is not implemented, returning disk ID: {} as it is", uuid); + return getDisk(uuid); + } + + @ApiAccess(command = UpdateVolumeCmd.class) + public Disk copyDisk(String uuid) { + throw new InvalidParameterValueException("Copy Disk with ID " + uuid + " not implemented"); + } + + @ApiAccess(command = UpdateVolumeCmd.class) + public Disk reduceDisk(String uuid) { + throw new InvalidParameterValueException("Reduce Disk with ID " + uuid + " not implemented"); + } + + @ApiAccess(command = ListNicsCmd.class) + public List listNicsByInstanceUuid(final String uuid) { + UserVmVO vo = userVmDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("VM with ID " + uuid + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), null, false, vo); + return listNicsByInstance(vo.getId(), vo.getUuid()); + } + + @ApiAccess(command = AddNicToVMCmd.class) + public Nic attachInstanceNic(final String vmUuid, final Nic request) { + UserVmVO vmVo = userVmDao.findByUuid(vmUuid); + if (vmVo == null) { + throw new InvalidParameterValueException("VM with ID " + vmUuid + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, + false, vmVo); + if (request == null || request.getVnicProfile() == null || StringUtils.isEmpty(request.getVnicProfile().getId())) { + throw new InvalidParameterValueException("Request nic data is empty"); + } + NetworkVO networkVO = networkDao.findByUuid(request.getVnicProfile().getId()); + if (networkVO == null) { + throw new InvalidParameterValueException("VNic profile " + request.getVnicProfile().getId() + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, + false, networkVO); + if (vmVo.getAccountId() != networkVO.getAccountId() && + networkVO.getAccountId() != Account.ACCOUNT_ID_SYSTEM && + VeeamControlService.InstanceRestoreAssignOwner.value() && + accountCannotAccessNetwork(networkVO, vmVo.getAccountId())) { + assignVmToAccount(vmVo, networkVO.getAccountId()); + } + AddNicToVMCmd cmd = new AddNicToVMCmd(); + ComponentContext.inject(cmd); + cmd.setVmId(vmVo.getId()); + cmd.setNetworkId(networkVO.getId()); + if (request.getMac() != null && StringUtils.isNotBlank(request.getMac().getAddress())) { + cmd.setMacAddress(request.getMac().getAddress()); + } + userVmManager.addNicToVirtualMachine(cmd); + NicVO nic = nicDao.findByInstanceIdAndNetworkIdIncludingRemoved(networkVO.getId(), vmVo.getId()); + if (nic == null) { + throw new CloudRuntimeException("Failed to attach NIC to VM"); + } + return NicVOToNicConverter.toNic(nic, vmUuid, this::getNetworkById); + } + + @ApiAccess(command = ListImageTransfersCmd.class) + public List listAllImageTransfers(Long offset, Long limit) { + Filter filter = new Filter(ImageTransferVO.class, "id", true, offset, limit); + Pair, List> ownerDetails = getResourceOwnerFiltersWithDomainIds(); + List imageTransfers = imageTransferDao.listByOwners(ownerDetails.first(), + ownerDetails.second(), filter); + return ImageTransferVOToImageTransferConverter.toImageTransferList(imageTransfers, this::getHostById, this::getVolumeById); + } + + @ApiAccess(command = ListImageTransfersCmd.class) + public ImageTransfer getImageTransfer(String uuid) { + ImageTransferVO vo = imageTransferDao.findByUuidIncludingRemoved(uuid); + if (vo == null) { + throw new InvalidParameterValueException("Image transfer with ID " + uuid + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), null, false, vo); + return ImageTransferVOToImageTransferConverter.toImageTransfer(vo, this::getHostById, this::getVolumeById); + } + + @ApiAccess(command = CreateImageTransferCmd.class) + public ImageTransfer createImageTransfer(ImageTransfer request) { + if (request == null) { + throw new InvalidParameterValueException("Request image transfer data is empty"); + } + if (request.getDisk() == null || StringUtils.isBlank(request.getDisk().getId())) { + throw new InvalidParameterValueException("Disk ID must be specified"); + } + VolumeJoinVO volumeVO = volumeJoinDao.findByUuid(request.getDisk().getId()); + if (volumeVO == null) { + throw new InvalidParameterValueException("Disk with ID " + request.getDisk().getId() + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), null, false, + volumeVO); + Direction direction = EnumUtils.getEnum(Direction.class, request.getDirection()); + if (direction == null) { + throw new InvalidParameterValueException("Invalid or missing direction"); + } + Format format = EnumUtils.getEnum(Format.class, request.getFormat()); + Long backupId = null; + if (request.getBackup() != null && StringUtils.isNotBlank(request.getBackup().getId())) { + BackupVO backupVO = backupDao.findByUuid(request.getBackup().getId()); + if (backupVO == null) { + throw new InvalidParameterValueException("Backup with ID " + request.getBackup().getId() + " not found"); + } + backupId = backupVO.getId(); + } + return createImageTransfer(backupId, volumeVO.getId(), direction, format); + } + + @ApiAccess(command = FinalizeImageTransferCmd.class) + public boolean cancelImageTransfer(String uuid) { + ImageTransferVO vo = imageTransferDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("Image transfer with ID " + uuid + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, false, vo); + return kvmBackupExportService.cancelImageTransfer(vo.getId()); + } + + @ApiAccess(command = FinalizeImageTransferCmd.class) + public boolean finalizeImageTransfer(String uuid) { + ImageTransferVO vo = imageTransferDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("Image transfer with ID " + uuid + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, false, vo); + return kvmBackupExportService.finalizeImageTransfer(vo.getId()); + } + + @ApiAccess(command = ListAsyncJobsCmd.class) + public List listPendingJobs() { + List jobIds = asyncJobDao.listPendingJobIdsForAccount(CallContext.current().getCallingAccountId()); + List jobJoinVOs = asyncJobJoinDao.listByIds(jobIds); + return AsyncJobJoinVOToJobConverter.toJobList(jobJoinVOs); + } + + @ApiAccess(command = ListAsyncJobsCmd.class) + public Job getJob(String uuid) { + final AsyncJobJoinVO vo = asyncJobJoinDao.findByUuidIncludingRemoved(uuid); + if (vo == null) { + throw new InvalidParameterValueException("Job with ID " + uuid + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), null, false, vo); + return AsyncJobJoinVOToJobConverter.toJob(vo); + } + + @ApiAccess(command = ListVMSnapshotCmd.class) + public List listSnapshotsByInstanceUuid(final String uuid) { + UserVmVO vo = userVmDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("VM with ID " + uuid + " not found"); + } + List snapshots = vmSnapshotDao.findByVm(vo.getId()); + return VmSnapshotVOToSnapshotConverter.toSnapshotList(snapshots, vo.getUuid()); + } + + @ApiAccess(command = CreateVMSnapshotCmd.class) + public Snapshot createInstanceSnapshot(final String vmUuid, final Snapshot request) { + UserVmVO vmVo = userVmDao.findByUuid(vmUuid); + if (vmVo == null) { + throw new InvalidParameterValueException("VM with ID " + vmUuid + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, + false, vmVo); + try { + CreateVMSnapshotCmd cmd = new CreateVMSnapshotCmd(); + ComponentContext.inject(cmd); + Map params = new HashMap<>(); + params.put(ApiConstants.VIRTUAL_MACHINE_ID, vmVo.getUuid()); + params.put(ApiConstants.VM_SNAPSHOT_DESCRIPTION, request.getDescription()); + params.put(ApiConstants.VM_SNAPSHOT_MEMORY, String.valueOf(Boolean.parseBoolean(request.getPersistMemorystate()))); + ApiServerService.AsyncCmdResult result = processAsyncCmdWithContext(cmd, params); + if (result.objectId == null) { + throw new CloudRuntimeException("No snapshot ID returned"); + } + VMSnapshotVO vo = vmSnapshotDao.findById(result.objectId); + if (vo == null) { + throw new CloudRuntimeException("Snapshot not found"); + } + return VmSnapshotVOToSnapshotConverter.toSnapshot(vo, vmVo.getUuid()); + } catch (Exception e) { + throw new CloudRuntimeException("Failed to create snapshot: " + e.getMessage(), e); + } + } + + @ApiAccess(command = ListVMSnapshotCmd.class) + public Snapshot getSnapshot(String uuid) { + VMSnapshotVO vo = vmSnapshotDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("Snapshot with ID " + uuid + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), null, false, vo); + UserVmVO vm = userVmDao.findById(vo.getVmId()); + return VmSnapshotVOToSnapshotConverter.toSnapshot(vo, vm.getUuid()); + } + + public ResourceAction deleteSnapshot(String uuid, boolean async) { + ResourceAction action = null; + VMSnapshotVO vo = vmSnapshotDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("Snapshot with ID " + uuid + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, + false, vo); + try { + DeleteVMSnapshotCmd cmd = new DeleteVMSnapshotCmd(); + ComponentContext.inject(cmd); + Map params = new HashMap<>(); + params.put(ApiConstants.VM_SNAPSHOT_ID, vo.getUuid()); + ApiServerService.AsyncCmdResult result = processAsyncCmdWithContext(cmd, params); + AsyncJobJoinVO jobVo = asyncJobJoinDao.findById(result.jobId); + if (jobVo == null) { + throw new CloudRuntimeException("Failed to find job for snapshot deletion"); + } + if (!async) { + waitForJobCompletion(jobVo); + } + action = AsyncJobJoinVOToJobConverter.toAction(jobVo); + } catch (Exception e) { + throw new CloudRuntimeException("Failed to delete snapshot: " + e.getMessage(), e); + } + return action; + } + + @ApiAccess(command = RevertToVMSnapshotCmd.class) + public ResourceAction revertInstanceToSnapshot(String uuid, boolean async) { + ResourceAction action = null; + VMSnapshotVO vo = vmSnapshotDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("Snapshot with ID " + uuid + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, + false, vo); + try { + RevertToVMSnapshotCmd cmd = new RevertToVMSnapshotCmd(); + ComponentContext.inject(cmd); + Map params = new HashMap<>(); + params.put(ApiConstants.VM_SNAPSHOT_ID, vo.getUuid()); + ApiServerService.AsyncCmdResult result = processAsyncCmdWithContext(cmd, params); + AsyncJobJoinVO jobVo = asyncJobJoinDao.findById(result.jobId); + if (jobVo == null) { + throw new CloudRuntimeException("Failed to find job for snapshot revert"); + } + if (!async) { + waitForJobCompletion(jobVo); + } + action = AsyncJobJoinVOToJobConverter.toAction(jobVo); + } catch (Exception e) { + throw new CloudRuntimeException("Failed to revert to snapshot: " + e.getMessage(), e); + } + return action; + } + + @ApiAccess(command = ListBackupsCmd.class) + public List listBackupsByInstanceUuid(final String uuid) { + UserVmVO vo = userVmDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("VM with ID " + uuid + " not found"); + } + List backups = backupDao.searchByVmIds(List.of(vo.getId())); + return BackupVOToBackupConverter.toBackupList(backups, id -> vo, this::getHostById); + } + + protected void validateInstanceStorage(UserVmVO vm) { + List volumes = volumeDao.findUsableVolumesForInstance(vm.getId()); + List storageIds = volumes.stream().map(VolumeVO::getPoolId).distinct().collect(Collectors.toList()); + List pools = primaryDataStoreDao.listByIds(storageIds); + pools.stream().filter(p -> !SUPPORTED_STORAGE_TYPES.contains(p.getPoolType())) + .findAny().ifPresent(p -> { + throw new InvalidParameterValueException("VM is using storage pool " + p.getName() + + " of type " + p.getPoolType() + + " which is not supported for backup operations"); + }); + } + + @ApiAccess(command = StartBackupCmd.class) + public Backup createInstanceBackup(final String vmUuid, final Backup request) { + UserVmVO vmVo = userVmDao.findByUuid(vmUuid); + if (vmVo == null) { + throw new InvalidParameterValueException("VM with ID " + vmUuid + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, + false, vmVo); + validateInstanceStorage(vmVo); + try { + StartBackupCmd cmd = new StartBackupCmd(); + ComponentContext.inject(cmd); + Map params = new HashMap<>(); + params.put(ApiConstants.VIRTUAL_MACHINE_ID, vmVo.getUuid()); + params.put(ApiConstants.NAME, request.getName()); + params.put(ApiConstants.DESCRIPTION, request.getDescription()); + ApiServerService.AsyncCmdResult result = processAsyncCmdWithContext(cmd, params); + if (result == null || result.objectId == null) { + throw new CloudRuntimeException("Unexpected backup ID returned"); + } + BackupVO vo = backupDao.findById(result.objectId); + if (vo == null) { + throw new CloudRuntimeException("Backup not found"); + } + return BackupVOToBackupConverter.toBackup(vo, id -> vmVo, this::getHostById, this::getBackupDisks); + } catch (Exception e) { + throw new CloudRuntimeException("Failed to create backup: " + e.getMessage(), e); + } + } + + @ApiAccess(command = ListBackupsCmd.class) + public Backup getBackup(String uuid) { + BackupVO vo = backupDao.findByUuidIncludingRemoved(uuid); + if (vo == null) { + throw new InvalidParameterValueException("Backup with ID " + uuid + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), null, false, vo); + return BackupVOToBackupConverter.toBackup(vo, id -> userVmDao.findById(id), this::getHostById, + this::getBackupDisks); + } + + @ApiAccess(command = ListBackupsCmd.class) + public List listDisksByBackupUuid(final String uuid) { + throw new InvalidParameterValueException("List Backup Disks with ID " + uuid + " not implemented"); + // This won't be feasible with current structure + } + + @ApiAccess(command = FinalizeBackupCmd.class) + public Backup finalizeBackup(final String vmUuid, final String backupUuid) { + UserVmVO vm = userVmDao.findByUuid(vmUuid); + if (vm == null) { + throw new InvalidParameterValueException("Instance with ID " + vmUuid + " not found"); + } + BackupVO backup = backupDao.findByUuid(backupUuid); + if (backup == null) { + throw new InvalidParameterValueException("Backup with ID " + backupUuid + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, + false, backup); + try { + FinalizeBackupCmd cmd = new FinalizeBackupCmd(); + ComponentContext.inject(cmd); + Map params = new HashMap<>(); + params.put(ApiConstants.VIRTUAL_MACHINE_ID, vm.getUuid()); + params.put(ApiConstants.ID, backup.getUuid()); + ApiServerService.AsyncCmdResult result = processAsyncCmdWithContext(cmd, params); + if (result == null) { + throw new CloudRuntimeException("Failed to finalize backup"); + } + backup = backupDao.findByIdIncludingRemoved(backup.getId()); + return BackupVOToBackupConverter.toBackup(backup, id -> vm, this::getHostById, this::getBackupDisks); + } catch (Exception e) { + throw new CloudRuntimeException("Failed to finalize backup: " + e.getMessage(), e); + } + } + + @ApiAccess(command = ListBackupsCmd.class) + protected List getBackupDisks(final BackupVO backup) { + List volumeInfos = backup.getBackedUpVolumes(); + if (CollectionUtils.isEmpty(volumeInfos)) { + return Collections.emptyList(); + } + return VolumeJoinVOToDiskConverter.toDiskListFromVolumeInfos(volumeInfos); + } + + @ApiAccess(command = ListVmCheckpointsCmd.class) + public List listCheckpointsByInstanceUuid(final String uuid) { + UserVmVO vo = userVmDao.findByUuid(uuid); + if (vo == null) { + throw new InvalidParameterValueException("VM with ID " + uuid + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), null, false, vo); + Map details = vmInstanceDetailsDao.listDetailsKeyPairs(vo.getId()); + Checkpoint checkpoint = UserVmVOToCheckpointConverter.toCheckpoint( + details.get(VmDetailConstants.ACTIVE_CHECKPOINT_ID), + details.get(VmDetailConstants.ACTIVE_CHECKPOINT_CREATE_TIME)); + if (checkpoint == null) { + return Collections.emptyList(); + } + return List.of(checkpoint); + } + + @ApiAccess(command = DeleteVmCheckpointCmd.class) + public void deleteCheckpoint(String vmUuid, String checkpointId) { + UserVmVO vo = userVmDao.findByUuid(vmUuid); + if (vo == null) { + throw new InvalidParameterValueException("VM with ID " + vmUuid + " not found"); + } + accountService.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, false, vo); + Map details = vmInstanceDetailsDao.listDetailsKeyPairs(vo.getId()); + if (!Objects.equals(details.get(VmDetailConstants.ACTIVE_CHECKPOINT_ID), checkpointId)) { + logger.warn("Checkpoint ID {} does not match active checkpoint for VM {}", checkpointId, vmUuid); + return; + } + try { + DeleteVmCheckpointCmd cmd = new DeleteVmCheckpointCmd(); + ComponentContext.inject(cmd); + cmd.setVmId(vo.getId()); + cmd.setCheckpointId(checkpointId); + kvmBackupExportService.deleteVmCheckpoint(cmd); + } catch (Exception e) { + throw new CloudRuntimeException("Failed to delete checkpoint: " + e.getMessage(), e); + } + } + + @ApiAccess(command = ListTagsCmd.class) + public List listAllTags(final Long offset, final Long limit) { + List tags = new ArrayList<>(getDummyTags().values()); + Filter filter = new Filter(ResourceTagVO.class, "id", true, offset, limit); + Pair, List> ownerDetails = getResourceOwnerFiltersWithDomainIds(); + List vmResourceTags = resourceTagDao.listByResourceTypeKeyAndOwners( + ResourceTag.ResourceObjectType.UserVm, VM_TA_KEY, ownerDetails.first(), ownerDetails.second(), filter); + if (CollectionUtils.isNotEmpty(vmResourceTags)) { + tags.addAll(ResourceTagVOToTagConverter.toTags(vmResourceTags)); + } + return tags; + } + + @ApiAccess(command = ListTagsCmd.class) + public Tag getTag(String uuid) { + if (BaseDto.ZERO_UUID.equals(uuid)) { + return ResourceTagVOToTagConverter.getRootTag(); + } + Tag tag = getDummyTags().get(uuid); + if (tag == null) { + ResourceTagVO resourceTagVO = resourceTagDao.findByResourceTypeKeyAndValue( + ResourceTag.ResourceObjectType.UserVm, VM_TA_KEY, uuid); + accountService.checkAccess(CallContext.current().getCallingAccount(), null, false, + resourceTagVO); + if (resourceTagVO != null) { + tag = ResourceTagVOToTagConverter.toTag(resourceTagVO); + } + } + if (tag == null) { + throw new InvalidParameterValueException("Tag with ID " + uuid + " not found"); + } + return tag; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/ApiRouteHandler.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/ApiRouteHandler.java new file mode 100644 index 000000000000..be71164d672b --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/ApiRouteHandler.java @@ -0,0 +1,129 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.cloudstack.veeam.RouteHandler; +import org.apache.cloudstack.veeam.VeeamControlService; +import org.apache.cloudstack.veeam.VeeamControlServlet; +import org.apache.cloudstack.veeam.adapter.ServerAdapter; +import org.apache.cloudstack.veeam.api.dto.Api; +import org.apache.cloudstack.veeam.api.dto.ApiSummary; +import org.apache.cloudstack.veeam.api.dto.EmptyElement; +import org.apache.cloudstack.veeam.api.dto.Link; +import org.apache.cloudstack.veeam.api.dto.ProductInfo; +import org.apache.cloudstack.veeam.api.dto.Ref; +import org.apache.cloudstack.veeam.api.dto.SummaryCount; +import org.apache.cloudstack.veeam.api.dto.Version; +import org.apache.cloudstack.veeam.utils.Negotiation; + +import com.cloud.utils.UuidUtils; +import com.cloud.utils.component.ManagerBase; + +public class ApiRouteHandler extends ManagerBase implements RouteHandler { + public static final String BASE_ROUTE = "/api"; + + @Inject + ServerAdapter serverAdapter; + + @Override + public boolean canHandle(String method, String path) { + return getSanitizedPath(path).startsWith("/api"); + } + + @Override + public void handle(HttpServletRequest req, HttpServletResponse resp, String path, Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + final String sanitizedPath = getSanitizedPath(path); + if (sanitizedPath.equals(BASE_ROUTE)) { + handleRootApiRequest(req, resp, outFormat, io); + return; + } + io.notFound(resp, null, outFormat); + } + + private void handleRootApiRequest(HttpServletRequest req, HttpServletResponse resp, Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + io.getWriter().write(resp, HttpServletResponse.SC_OK, + createApiObject(VeeamControlService.ContextPath.value() + BASE_ROUTE), + outFormat); + } + + protected Api createApiObject(String basePath) { + Api api = new Api(); + + /* ---------------- Links ---------------- */ + List links = new ArrayList<>(); + add(links, basePath + "/clusters", "clusters"); + add(links, basePath + "/clusters?search={query}", "clusters/search"); + add(links, basePath + "/datacenters", "datacenters"); + add(links, basePath + "/datacenters?search={query}", "datacenters/search"); + add(links, basePath + "/hosts", "hosts"); + add(links, basePath + "/hosts?search={query}", "hosts/search"); + add(links, basePath + "/networks", "networks"); + add(links, basePath + "/networks?search={query}", "networks/search"); + add(links, basePath + "/storagedomains", "storagedomains"); + add(links, basePath + "/storagedomains?search={query}", "storagedomains/search"); + add(links, basePath + "/vms", "vms"); + add(links, basePath + "/vms?search={query}", "vms/search"); + add(links, basePath + "/disks", "disks"); + add(links, basePath + "/disks?search={query}", "disks/search"); + + api.setLink(links); + + /* ---------------- Engine backup ---------------- */ + api.setEngineBackup(new EmptyElement()); + + /* ---------------- Product info ---------------- */ + ProductInfo productInfo = new ProductInfo(); + productInfo.setInstanceId(UuidUtils.nameUUIDFromBytes( + VeeamControlService.BindAddress.value().getBytes(StandardCharsets.UTF_8)).toString()); + productInfo.name = VeeamControlService.PLUGIN_NAME; + + productInfo.version = Version.fromPackageAndCSVersion(true); + api.setProductInfo(productInfo); + + /* ---------------- Summary ---------------- */ + ApiSummary summary = new ApiSummary(); + summary.setHosts(new SummaryCount(1, 1)); + summary.setStorageDomains(new SummaryCount(1, 2)); + summary.setUsers(new SummaryCount(1, 1)); + summary.setVms(new SummaryCount(1, 8)); + api.setSummary(summary); + + /* ---------------- Time ---------------- */ + api.setTime(System.currentTimeMillis()); + + /* ---------------- Users ---------------- */ + String userId = serverAdapter.getServiceAccount().first().getUuid(); + api.setAuthenticatedUser(Ref.of(basePath + "/users/" + userId, userId)); + api.setEffectiveUser(Ref.of(basePath + "/users/" + userId, userId)); + + return api; + } + + private static void add(List links, String href, String rel) { + links.add(Link.of(href, rel)); + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/ClustersRouteHandler.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/ClustersRouteHandler.java new file mode 100644 index 000000000000..f4107ff3735e --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/ClustersRouteHandler.java @@ -0,0 +1,110 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api; + +import java.io.IOException; +import java.util.List; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.cloudstack.veeam.RouteHandler; +import org.apache.cloudstack.veeam.VeeamControlServlet; +import org.apache.cloudstack.veeam.adapter.ServerAdapter; +import org.apache.cloudstack.veeam.api.dto.Cluster; +import org.apache.cloudstack.veeam.api.dto.NamedList; +import org.apache.cloudstack.veeam.api.request.ListQuery; +import org.apache.cloudstack.veeam.utils.Negotiation; +import org.apache.cloudstack.veeam.utils.PathUtil; +import org.apache.commons.collections.CollectionUtils; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.utils.component.ManagerBase; + +public class ClustersRouteHandler extends ManagerBase implements RouteHandler { + public static final String BASE_ROUTE = "/api/clusters"; + + @Inject + ServerAdapter serverAdapter; + + @Override + public boolean start() { + return true; + } + + @Override + public int priority() { + return 5; + } + + @Override + public boolean canHandle(String method, String path) { + return getSanitizedPath(path).startsWith(BASE_ROUTE); + } + + @Override + public void handle(HttpServletRequest req, HttpServletResponse resp, String path, Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + final String method = req.getMethod(); + if (!"GET".equalsIgnoreCase(method)) { + io.methodNotAllowed(resp, "GET", outFormat); + return; + } + final String sanitizedPath = getSanitizedPath(path); + if (sanitizedPath.equals(BASE_ROUTE)) { + handleGet(req, resp, outFormat, io); + return; + } + + List idAndSubPath = PathUtil.extractIdAndSubPath(sanitizedPath, BASE_ROUTE); + if (CollectionUtils.isNotEmpty(idAndSubPath)) { + String id = idAndSubPath.get(0); + if (idAndSubPath.size() == 1) { + handleGetById(id, resp, outFormat, io); + return; + } + } + + io.notFound(resp, null, outFormat); + } + + protected void handleGet(final HttpServletRequest req, final HttpServletResponse resp, + Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + try { + ListQuery query = ListQuery.fromRequest(req); + final List result = serverAdapter.listAllClusters(query.getOffset(), query.getLimit()); + NamedList response = NamedList.of("cluster", result); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (PermissionDeniedException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleGetById(final String id, final HttpServletResponse resp, final Negotiation.OutFormat outFormat, + final VeeamControlServlet io) throws IOException { + try { + Cluster response = serverAdapter.getCluster(id); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (InvalidParameterValueException e) { + io.notFound(resp, e.getMessage(), outFormat); + } catch (PermissionDeniedException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/DataCentersRouteHandler.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/DataCentersRouteHandler.java new file mode 100644 index 000000000000..a06af4f24429 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/DataCentersRouteHandler.java @@ -0,0 +1,151 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api; + +import java.io.IOException; +import java.util.List; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.cloudstack.veeam.RouteHandler; +import org.apache.cloudstack.veeam.VeeamControlServlet; +import org.apache.cloudstack.veeam.adapter.ServerAdapter; +import org.apache.cloudstack.veeam.api.dto.DataCenter; +import org.apache.cloudstack.veeam.api.dto.NamedList; +import org.apache.cloudstack.veeam.api.dto.Network; +import org.apache.cloudstack.veeam.api.dto.StorageDomain; +import org.apache.cloudstack.veeam.api.request.ListQuery; +import org.apache.cloudstack.veeam.utils.Negotiation; +import org.apache.cloudstack.veeam.utils.PathUtil; +import org.apache.commons.collections.CollectionUtils; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.exception.CloudRuntimeException; + +public class DataCentersRouteHandler extends ManagerBase implements RouteHandler { + public static final String BASE_ROUTE = "/api/datacenters"; + + @Inject + ServerAdapter serverAdapter; + + @Override + public boolean start() { + return true; + } + + @Override + public int priority() { + return 5; + } + + @Override + public boolean canHandle(String method, String path) { + return getSanitizedPath(path).startsWith(BASE_ROUTE); + } + + @Override + public void handle(HttpServletRequest req, HttpServletResponse resp, String path, Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + final String method = req.getMethod(); + if (!"GET".equalsIgnoreCase(method)) { + io.methodNotAllowed(resp, "GET", outFormat); + return; + } + final String sanitizedPath = getSanitizedPath(path); + if (sanitizedPath.equals(BASE_ROUTE)) { + handleGet(req, resp, outFormat, io); + return; + } + + List idAndSubPath = PathUtil.extractIdAndSubPath(sanitizedPath, BASE_ROUTE); + if (CollectionUtils.isNotEmpty(idAndSubPath)) { + String id = idAndSubPath.get(0); + if (idAndSubPath.size() == 1) { + handleGetById(id, resp, outFormat, io); + return; + } else if (idAndSubPath.size() == 2) { + String subPath = idAndSubPath.get(1); + if ("storagedomains".equals(subPath)) { + handleGetStorageDomainsByDcId(id, req, resp, outFormat, io); + return; + } + if ("networks".equals(subPath)) { + handleGetNetworksByDcId(id, req, resp, outFormat, io); + return; + } + } + } + + io.notFound(resp, null, outFormat); + } + + protected void handleGet(final HttpServletRequest req, final HttpServletResponse resp, + Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + ListQuery query = ListQuery.fromRequest(req); + final List result = serverAdapter.listAllDataCenters(query.getOffset(), query.getLimit()); + NamedList response = NamedList.of("data_center", result); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } + + protected void handleGetById(final String id, final HttpServletResponse resp, final Negotiation.OutFormat outFormat, + final VeeamControlServlet io) throws IOException { + try { + DataCenter response = serverAdapter.getDataCenter(id); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (InvalidParameterValueException e) { + io.notFound(resp, e.getMessage(), outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleGetStorageDomainsByDcId(final String id, final HttpServletRequest req, + final HttpServletResponse resp, final Negotiation.OutFormat outFormat, final VeeamControlServlet io) + throws IOException { + try { + ListQuery query = ListQuery.fromRequest(req); + List storageDomains = serverAdapter.listStorageDomainsByDcId(id, query.getOffset(), + query.getMax()); + NamedList response = NamedList.of("storage_domain", storageDomains); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (InvalidParameterValueException e) { + io.notFound(resp, e.getMessage(), outFormat); + } catch (PermissionDeniedException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleGetNetworksByDcId(final String id, final HttpServletRequest req, + final HttpServletResponse resp, final Negotiation.OutFormat outFormat, final VeeamControlServlet io) + throws IOException { + try { + ListQuery query = ListQuery.fromRequest(req); + List networks = serverAdapter.listNetworksByDcId(id, query.getOffset(), + query.getMax()); + NamedList response = NamedList.of("network", networks); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (InvalidParameterValueException e) { + io.notFound(resp, e.getMessage(), outFormat); + } catch (PermissionDeniedException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/DisksRouteHandler.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/DisksRouteHandler.java new file mode 100644 index 000000000000..d12745769e1b --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/DisksRouteHandler.java @@ -0,0 +1,201 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api; + +import java.io.IOException; +import java.util.List; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.cloudstack.veeam.RouteHandler; +import org.apache.cloudstack.veeam.VeeamControlServlet; +import org.apache.cloudstack.veeam.adapter.ServerAdapter; +import org.apache.cloudstack.veeam.api.dto.Disk; +import org.apache.cloudstack.veeam.api.dto.NamedList; +import org.apache.cloudstack.veeam.api.request.ListQuery; +import org.apache.cloudstack.veeam.utils.Negotiation; +import org.apache.cloudstack.veeam.utils.PathUtil; +import org.apache.commons.collections.CollectionUtils; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.exception.CloudRuntimeException; +import com.fasterxml.jackson.core.JsonProcessingException; + +public class DisksRouteHandler extends ManagerBase implements RouteHandler { + public static final String BASE_ROUTE = "/api/disks"; + + @Inject + ServerAdapter serverAdapter; + + @Override + public boolean start() { + return true; + } + + @Override + public int priority() { + return 5; + } + + @Override + public boolean canHandle(String method, String path) { + return getSanitizedPath(path).startsWith(BASE_ROUTE); + } + + @Override + public void handle(HttpServletRequest req, HttpServletResponse resp, String path, Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + final String method = req.getMethod(); + final String sanitizedPath = getSanitizedPath(path); + if (sanitizedPath.equals(BASE_ROUTE)) { + if ("GET".equalsIgnoreCase(method)) { + handleGet(req, resp, outFormat, io); + return; + } + if ("POST".equalsIgnoreCase(method)) { + handlePost(req, resp, outFormat, io); + return; + } + } + + List idAndSubPath = PathUtil.extractIdAndSubPath(sanitizedPath, BASE_ROUTE); + if (CollectionUtils.isNotEmpty(idAndSubPath)) { + String id = idAndSubPath.get(0); + if (idAndSubPath.size() == 1) { + if (!"GET".equalsIgnoreCase(method) && !"DELETE".equalsIgnoreCase(method) && + !"PUT".equalsIgnoreCase(method)) { + io.methodNotAllowed(resp, "GET, DELETE, PUT", outFormat); + return; + } + if ("GET".equalsIgnoreCase(method)) { + handleGetById(id, resp, outFormat, io); + return; + } + if ("DELETE".equalsIgnoreCase(method)) { + handleDeleteById(id, resp, outFormat, io); + return; + } + if ("PUT".equalsIgnoreCase(method)) { + handlePutById(id, req, resp, outFormat, io); + return; + } + } else if (idAndSubPath.size() == 2) { + String subPath = idAndSubPath.get(1); + if ("copy".equals(subPath)) { + if ("POST".equalsIgnoreCase(method)) { + handlePostDiskCopy(id, req, resp, outFormat, io); + } else { + io.methodNotAllowed(resp, "POST", outFormat); + } + return; + } else if ("reduce".equals(subPath)) { + if ("POST".equalsIgnoreCase(method)) { + handlePostDiskReduce(id, req, resp, outFormat, io); + } else { + io.methodNotAllowed(resp, "POST", outFormat); + } + return; + } + } + } + + io.notFound(resp, null, outFormat); + } + + protected void handleGet(final HttpServletRequest req, final HttpServletResponse resp, + Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + try { + ListQuery query = ListQuery.fromRequest(req); + final List result = serverAdapter.listAllDisks(query.getOffset(), query.getLimit()); + NamedList response = NamedList.of("disk", result); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handlePost(final HttpServletRequest req, final HttpServletResponse resp, + Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + String data = RouteHandler.getRequestData(req, logger); + try { + Disk request = io.getMapper().jsonMapper().readValue(data, Disk.class); + Disk response = serverAdapter.createDisk(request); + io.getWriter().write(resp, HttpServletResponse.SC_CREATED, response, outFormat); + } catch (JsonProcessingException | CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleGetById(final String id, final HttpServletResponse resp, final Negotiation.OutFormat outFormat, + final VeeamControlServlet io) throws IOException { + try { + Disk response = serverAdapter.getDisk(id); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (InvalidParameterValueException e) { + io.notFound(resp, e.getMessage(), outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleDeleteById(final String id, final HttpServletResponse resp, final Negotiation.OutFormat outFormat, + final VeeamControlServlet io) throws IOException { + try { + serverAdapter.deleteDisk(id); + io.getWriter().write(resp, HttpServletResponse.SC_OK, "Deleted disk ID: " + id, outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handlePutById(final String id, final HttpServletRequest req, final HttpServletResponse resp, + final Negotiation.OutFormat outFormat, final VeeamControlServlet io) throws IOException { + String data = RouteHandler.getRequestData(req, logger); + try { + Disk request = io.getMapper().jsonMapper().readValue(data, Disk.class); + Disk response = serverAdapter.updateDisk(id, request); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (JsonProcessingException | CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handlePostDiskCopy(final String id, final HttpServletRequest req, final HttpServletResponse resp, + final Negotiation.OutFormat outFormat, final VeeamControlServlet io) throws IOException { + String data = RouteHandler.getRequestData(req, logger); + try { + Disk response = serverAdapter.copyDisk(id); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handlePostDiskReduce(final String id, final HttpServletRequest req, final HttpServletResponse resp, + final Negotiation.OutFormat outFormat, final VeeamControlServlet io) throws IOException { + String data = RouteHandler.getRequestData(req, logger); + try { + Disk response = serverAdapter.reduceDisk(id); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/HostsRouteHandler.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/HostsRouteHandler.java new file mode 100644 index 000000000000..931291714c6c --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/HostsRouteHandler.java @@ -0,0 +1,110 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api; + +import java.io.IOException; +import java.util.List; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.cloudstack.veeam.RouteHandler; +import org.apache.cloudstack.veeam.VeeamControlServlet; +import org.apache.cloudstack.veeam.adapter.ServerAdapter; +import org.apache.cloudstack.veeam.api.dto.Host; +import org.apache.cloudstack.veeam.api.dto.NamedList; +import org.apache.cloudstack.veeam.api.request.ListQuery; +import org.apache.cloudstack.veeam.utils.Negotiation; +import org.apache.cloudstack.veeam.utils.PathUtil; +import org.apache.commons.collections.CollectionUtils; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.utils.component.ManagerBase; + +public class HostsRouteHandler extends ManagerBase implements RouteHandler { + public static final String BASE_ROUTE = "/api/hosts"; + + @Inject + ServerAdapter serverAdapter; + + @Override + public boolean start() { + return true; + } + + @Override + public int priority() { + return 5; + } + + @Override + public boolean canHandle(String method, String path) { + return getSanitizedPath(path).startsWith(BASE_ROUTE); + } + + @Override + public void handle(HttpServletRequest req, HttpServletResponse resp, String path, Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + final String method = req.getMethod(); + if (!"GET".equalsIgnoreCase(method)) { + io.methodNotAllowed(resp, "GET", outFormat); + return; + } + final String sanitizedPath = getSanitizedPath(path); + if (sanitizedPath.equals(BASE_ROUTE)) { + handleGet(req, resp, outFormat, io); + return; + } + + List idAndSubPath = PathUtil.extractIdAndSubPath(sanitizedPath, BASE_ROUTE); + if (CollectionUtils.isNotEmpty(idAndSubPath)) { + String id = idAndSubPath.get(0); + if (idAndSubPath.size() == 1) { + handleGetById(id, resp, outFormat, io); + return; + } + } + + io.notFound(resp, null, outFormat); + } + + protected void handleGet(final HttpServletRequest req, final HttpServletResponse resp, + Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + try { + ListQuery query = ListQuery.fromRequest(req); + final List result = serverAdapter.listAllHosts(query.getOffset(), query.getLimit()); + NamedList response = NamedList.of("host", result); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (PermissionDeniedException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleGetById(final String id, final HttpServletResponse resp, final Negotiation.OutFormat outFormat, + final VeeamControlServlet io) throws IOException { + try { + Host response = serverAdapter.getHost(id); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (InvalidParameterValueException e) { + io.notFound(resp, e.getMessage(), outFormat); + } catch (PermissionDeniedException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/ImageTransfersRouteHandler.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/ImageTransfersRouteHandler.java new file mode 100644 index 000000000000..1a04e4028cf0 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/ImageTransfersRouteHandler.java @@ -0,0 +1,162 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api; + +import java.io.IOException; +import java.util.List; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.cloudstack.veeam.RouteHandler; +import org.apache.cloudstack.veeam.VeeamControlServlet; +import org.apache.cloudstack.veeam.adapter.ServerAdapter; +import org.apache.cloudstack.veeam.api.dto.ImageTransfer; +import org.apache.cloudstack.veeam.api.dto.NamedList; +import org.apache.cloudstack.veeam.api.request.ListQuery; +import org.apache.cloudstack.veeam.utils.Negotiation; +import org.apache.cloudstack.veeam.utils.PathUtil; +import org.apache.commons.collections.CollectionUtils; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.exception.CloudRuntimeException; +import com.fasterxml.jackson.core.JsonProcessingException; + +public class ImageTransfersRouteHandler extends ManagerBase implements RouteHandler { + public static final String BASE_ROUTE = "/api/imagetransfers"; + + @Inject + ServerAdapter serverAdapter; + + @Override + public boolean start() { + return true; + } + + @Override + public int priority() { + return 5; + } + + @Override + public boolean canHandle(String method, String path) { + return getSanitizedPath(path).startsWith(BASE_ROUTE); + } + + @Override + public void handle(HttpServletRequest req, HttpServletResponse resp, String path, Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + final String method = req.getMethod(); + final String sanitizedPath = getSanitizedPath(path); + if (sanitizedPath.equals(BASE_ROUTE)) { + if ("GET".equalsIgnoreCase(method)) { + handleGet(req, resp, outFormat, io); + return; + } + if ("POST".equalsIgnoreCase(method)) { + handlePost(req, resp, outFormat, io); + return; + } + } + List idAndSubPath = PathUtil.extractIdAndSubPath(sanitizedPath, BASE_ROUTE); + if (CollectionUtils.isNotEmpty(idAndSubPath)) { + String id = idAndSubPath.get(0); + if (idAndSubPath.size() == 1) { + if (!"GET".equalsIgnoreCase(method)) { + io.methodNotAllowed(resp, "GET", outFormat); + return; + } + handleGetById(id, resp, outFormat, io); + return; + } else if (idAndSubPath.size() == 2) { + if (!"POST".equalsIgnoreCase(method)) { + io.methodNotAllowed(resp, "POST", outFormat); + return; + } + String subPath = idAndSubPath.get(1); + if ("cancel".equals(subPath)) { + handleCancelById(id, resp, outFormat, io); + return; + } + if ("finalize".equals(subPath)) { + handleFinalizeById(id, resp, outFormat, io); + return; + } + } + } + + io.notFound(resp, null, outFormat); + } + + protected void handleGet(final HttpServletRequest req, final HttpServletResponse resp, + Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + try { + ListQuery query = ListQuery.fromRequest(req); + final List result = serverAdapter.listAllImageTransfers(query.getOffset(), query.getLimit()); + NamedList response = NamedList.of("image_transfer", result); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handlePost(final HttpServletRequest req, final HttpServletResponse resp, + Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + String data = RouteHandler.getRequestData(req, logger); + try { + ImageTransfer request = io.getMapper().jsonMapper().readValue(data, ImageTransfer.class); + ImageTransfer response = serverAdapter.createImageTransfer(request); + io.getWriter().write(resp, HttpServletResponse.SC_CREATED, response, outFormat); + } catch (JsonProcessingException | CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleGetById(final String id, final HttpServletResponse resp, final Negotiation.OutFormat outFormat, + final VeeamControlServlet io) throws IOException { + try { + ImageTransfer response = serverAdapter.getImageTransfer(id); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (InvalidParameterValueException e) { + io.notFound(resp, e.getMessage(), outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleCancelById(final String id, final HttpServletResponse resp, final Negotiation.OutFormat outFormat, + final VeeamControlServlet io) throws IOException { + try { + serverAdapter.cancelImageTransfer(id); + io.getWriter().write(resp, HttpServletResponse.SC_OK, "Image transfer cancelled successfully", outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleFinalizeById(final String id, final HttpServletResponse resp, final Negotiation.OutFormat outFormat, + final VeeamControlServlet io) throws IOException { + try { + serverAdapter.finalizeImageTransfer(id); + io.getWriter().write(resp, HttpServletResponse.SC_OK, "Image transfer finalized successfully", outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/JobsRouteHandler.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/JobsRouteHandler.java new file mode 100644 index 000000000000..95e4e3c9559a --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/JobsRouteHandler.java @@ -0,0 +1,108 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api; + +import java.io.IOException; +import java.util.List; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.cloudstack.veeam.RouteHandler; +import org.apache.cloudstack.veeam.VeeamControlServlet; +import org.apache.cloudstack.veeam.adapter.ServerAdapter; +import org.apache.cloudstack.veeam.api.dto.Job; +import org.apache.cloudstack.veeam.api.dto.NamedList; +import org.apache.cloudstack.veeam.utils.Negotiation; +import org.apache.cloudstack.veeam.utils.PathUtil; +import org.apache.commons.collections.CollectionUtils; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.exception.CloudRuntimeException; + +public class JobsRouteHandler extends ManagerBase implements RouteHandler { + public static final String BASE_ROUTE = "/api/jobs"; + + @Inject + ServerAdapter serverAdapter; + + @Override + public boolean start() { + return true; + } + + @Override + public int priority() { + return 5; + } + + @Override + public boolean canHandle(String method, String path) { + return getSanitizedPath(path).startsWith(BASE_ROUTE); + } + + @Override + public void handle(HttpServletRequest req, HttpServletResponse resp, String path, Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + final String method = req.getMethod(); + if (!"GET".equalsIgnoreCase(method)) { + io.methodNotAllowed(resp, "GET", outFormat); + return; + } + final String sanitizedPath = getSanitizedPath(path); + if (sanitizedPath.equals(BASE_ROUTE)) { + handleGet(req, resp, outFormat, io); + return; + } + + List idAndSubPath = PathUtil.extractIdAndSubPath(sanitizedPath, BASE_ROUTE); + if (CollectionUtils.isNotEmpty(idAndSubPath)) { + String id = idAndSubPath.get(0); + if (idAndSubPath.size() == 1) { + handleGetById(id, resp, outFormat, io); + return; + } + } + + io.notFound(resp, null, outFormat); + } + + protected void handleGet(final HttpServletRequest req, final HttpServletResponse resp, + Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + try { + final List result = serverAdapter.listPendingJobs(); + NamedList response = NamedList.of("job", result); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleGetById(final String id, final HttpServletResponse resp, final Negotiation.OutFormat outFormat, + final VeeamControlServlet io) throws IOException { + try { + Job response = serverAdapter.getJob(id); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (InvalidParameterValueException e) { + io.notFound(resp, e.getMessage(), outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/NetworksRouteHandler.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/NetworksRouteHandler.java new file mode 100644 index 000000000000..2d1f0962c2b6 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/NetworksRouteHandler.java @@ -0,0 +1,110 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api; + +import java.io.IOException; +import java.util.List; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.cloudstack.veeam.RouteHandler; +import org.apache.cloudstack.veeam.VeeamControlServlet; +import org.apache.cloudstack.veeam.adapter.ServerAdapter; +import org.apache.cloudstack.veeam.api.dto.NamedList; +import org.apache.cloudstack.veeam.api.dto.Network; +import org.apache.cloudstack.veeam.api.request.ListQuery; +import org.apache.cloudstack.veeam.utils.Negotiation; +import org.apache.cloudstack.veeam.utils.PathUtil; +import org.apache.commons.collections.CollectionUtils; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.exception.CloudRuntimeException; + +public class NetworksRouteHandler extends ManagerBase implements RouteHandler { + public static final String BASE_ROUTE = "/api/networks"; + + @Inject + ServerAdapter serverAdapter; + + @Override + public boolean start() { + return true; + } + + @Override + public int priority() { + return 5; + } + + @Override + public boolean canHandle(String method, String path) { + return getSanitizedPath(path).startsWith(BASE_ROUTE); + } + + @Override + public void handle(HttpServletRequest req, HttpServletResponse resp, String path, Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + final String method = req.getMethod(); + if (!"GET".equalsIgnoreCase(method)) { + io.methodNotAllowed(resp, "GET", outFormat); + return; + } + final String sanitizedPath = getSanitizedPath(path); + if (sanitizedPath.equals(BASE_ROUTE)) { + handleGet(req, resp, outFormat, io); + return; + } + + List idAndSubPath = PathUtil.extractIdAndSubPath(sanitizedPath, BASE_ROUTE); + if (CollectionUtils.isNotEmpty(idAndSubPath)) { + String id = idAndSubPath.get(0); + if (idAndSubPath.size() == 1) { + handleGetById(id, resp, outFormat, io); + return; + } + } + + io.notFound(resp, null, outFormat); + } + + protected void handleGet(final HttpServletRequest req, final HttpServletResponse resp, + Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + try { + ListQuery query = ListQuery.fromRequest(req); + final List result = serverAdapter.listAllNetworks(query.getOffset(), query.getLimit()); + NamedList response = NamedList.of("network", result); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleGetById(final String id, final HttpServletResponse resp, final Negotiation.OutFormat outFormat, + final VeeamControlServlet io) throws IOException { + try { + Network response = serverAdapter.getNetwork(id); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (InvalidParameterValueException e) { + io.notFound(resp, e.getMessage(), outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/TagsRouteHandler.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/TagsRouteHandler.java new file mode 100644 index 000000000000..e1daefc1c443 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/TagsRouteHandler.java @@ -0,0 +1,111 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api; + +import java.io.IOException; +import java.util.List; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.cloudstack.veeam.RouteHandler; +import org.apache.cloudstack.veeam.VeeamControlServlet; +import org.apache.cloudstack.veeam.adapter.ServerAdapter; +import org.apache.cloudstack.veeam.api.dto.NamedList; +import org.apache.cloudstack.veeam.api.dto.Tag; +import org.apache.cloudstack.veeam.api.request.ListQuery; +import org.apache.cloudstack.veeam.utils.Negotiation; +import org.apache.cloudstack.veeam.utils.PathUtil; +import org.apache.commons.collections.CollectionUtils; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.exception.CloudRuntimeException; + +public class TagsRouteHandler extends ManagerBase implements RouteHandler { + public static final String BASE_ROUTE = "/api/tags"; + + @Inject + ServerAdapter serverAdapter; + + @Override + public boolean start() { + return true; + } + + @Override + public int priority() { + return 5; + } + + @Override + public boolean canHandle(String method, String path) { + return getSanitizedPath(path).startsWith(BASE_ROUTE); + } + + @Override + public void handle(HttpServletRequest req, HttpServletResponse resp, String path, Negotiation.OutFormat outFormat, + VeeamControlServlet io) throws IOException { + final String method = req.getMethod(); + if (!"GET".equalsIgnoreCase(method)) { + io.methodNotAllowed(resp, "GET", outFormat); + return; + } + final String sanitizedPath = getSanitizedPath(path); + if (sanitizedPath.equals(BASE_ROUTE)) { + handleGet(req, resp, outFormat, io); + return; + } + + List idAndSubPath = PathUtil.extractIdAndSubPath(sanitizedPath, BASE_ROUTE); + if (CollectionUtils.isNotEmpty(idAndSubPath)) { + String id = idAndSubPath.get(0); + if (idAndSubPath.size() == 1) { + handleGetById(id, resp, outFormat, io); + return; + } + } + + io.notFound(resp, null, outFormat); + } + + protected void handleGet(final HttpServletRequest req, final HttpServletResponse resp, + Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + try { + ListQuery query = ListQuery.fromRequest(req); + final List result = serverAdapter.listAllTags(query.getOffset(), query.getLimit()); + NamedList response = NamedList.of("tag", result); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleGetById(final String id, final HttpServletResponse resp, final Negotiation.OutFormat outFormat, + final VeeamControlServlet io) throws IOException { + try { + Tag response = serverAdapter.getTag(id); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (InvalidParameterValueException e) { + io.notFound(resp, e.getMessage(), outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/VmsRouteHandler.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/VmsRouteHandler.java new file mode 100644 index 000000000000..4855147a333e --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/VmsRouteHandler.java @@ -0,0 +1,532 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api; + +import java.io.IOException; +import java.util.List; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.cloudstack.veeam.RouteHandler; +import org.apache.cloudstack.veeam.VeeamControlServlet; +import org.apache.cloudstack.veeam.adapter.ServerAdapter; +import org.apache.cloudstack.veeam.api.dto.Backup; +import org.apache.cloudstack.veeam.api.dto.Checkpoint; +import org.apache.cloudstack.veeam.api.dto.Disk; +import org.apache.cloudstack.veeam.api.dto.DiskAttachment; +import org.apache.cloudstack.veeam.api.dto.NamedList; +import org.apache.cloudstack.veeam.api.dto.Nic; +import org.apache.cloudstack.veeam.api.dto.ResourceAction; +import org.apache.cloudstack.veeam.api.dto.Snapshot; +import org.apache.cloudstack.veeam.api.dto.Vm; +import org.apache.cloudstack.veeam.api.dto.VmAction; +import org.apache.cloudstack.veeam.api.request.ListQuery; +import org.apache.cloudstack.veeam.utils.Negotiation; +import org.apache.cloudstack.veeam.utils.PathUtil; +import org.apache.commons.collections.CollectionUtils; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.exception.CloudRuntimeException; +import com.fasterxml.jackson.core.JsonProcessingException; + +public class VmsRouteHandler extends ManagerBase implements RouteHandler { + public static final String BASE_ROUTE = "/api/vms"; + + @Inject + ServerAdapter serverAdapter; + + @Override + public int priority() { + return 5; + } + + @Override + public boolean canHandle(String method, String path) { + return getSanitizedPath(path).startsWith(BASE_ROUTE); + } + + @Override + public void handle(HttpServletRequest req, HttpServletResponse resp, String path, Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + final String method = req.getMethod(); + final String sanitizedPath = getSanitizedPath(path); + if (sanitizedPath.equals(BASE_ROUTE)) { + if (!"GET".equalsIgnoreCase(method) && !"POST".equalsIgnoreCase(method) && !"DELETE".equalsIgnoreCase(method)) { + io.methodNotAllowed(resp, "GET, POST, DELETE", outFormat); + return; + } + if ("GET".equalsIgnoreCase(method)) { + handleGet(req, resp, outFormat, io); + return; + } + if ("POST".equalsIgnoreCase(method)) { + handlePost(req, resp, outFormat, io); + return; + } + } + + List idAndSubPath = PathUtil.extractIdAndSubPath(sanitizedPath, BASE_ROUTE); + if (CollectionUtils.isNotEmpty(idAndSubPath)) { + String id = idAndSubPath.get(0); + if (idAndSubPath.size() == 1) { + if (!"GET".equalsIgnoreCase(method) && !"PUT".equalsIgnoreCase(method) && !"DELETE".equalsIgnoreCase(method)) { + io.methodNotAllowed(resp, "GET, PUT, DELETE", outFormat); + } else if ("GET".equalsIgnoreCase(method)) { + handleGetById(id, req, resp, outFormat, io); + } else if ("PUT".equalsIgnoreCase(method)) { + handleUpdateById(id, req, resp, outFormat, io); + } else if ("DELETE".equalsIgnoreCase(method)) { + handleDeleteById(id, req, resp, outFormat, io); + } + return; + } else if (idAndSubPath.size() == 2) { + String subPath = idAndSubPath.get(1); + if ("start".equals(subPath)) { + if ("POST".equalsIgnoreCase(method)) { + handleStartVmById(id, req, resp, outFormat, io); + } else { + io.methodNotAllowed(resp, "POST", outFormat); + } + return; + } else if ("stop".equals(subPath)) { + if ("POST".equalsIgnoreCase(method)) { + handleStopVmById(id, req, resp, outFormat, io); + } else { + io.methodNotAllowed(resp, "POST", outFormat); + } + return; + } else if ("shutdown".equals(subPath)) { + if ("POST".equalsIgnoreCase(method)) { + handleShutdownVmById(id, req, resp, outFormat, io); + } else { + io.methodNotAllowed(resp, "POST", outFormat); + } + return; + } else if ("diskattachments".equals(subPath)) { + if (!"GET".equalsIgnoreCase(method) && !"POST".equalsIgnoreCase(method)) { + io.methodNotAllowed(resp, "GET, POST", outFormat); + } else if ("GET".equalsIgnoreCase(method)) { + handleGetDiskAttachmentsByVmId(id, resp, outFormat, io); + } else if ("POST".equalsIgnoreCase(method)) { + handlePostDiskAttachmentForVmId(id, req, resp, outFormat, io); + } + return; + } else if ("nics".equals(subPath)) { + if (!"GET".equalsIgnoreCase(method) && !"POST".equalsIgnoreCase(method)) { + io.methodNotAllowed(resp, "GET, POST", outFormat); + } else if ("GET".equalsIgnoreCase(method)) { + handleGetNicsByVmId(id, resp, outFormat, io); + } else if ("POST".equalsIgnoreCase(method)) { + handlePostNicForVmId(id, req, resp, outFormat, io); + } + return; + } else if ("snapshots".equals(subPath)) { + if (!"GET".equalsIgnoreCase(method) && !"POST".equalsIgnoreCase(method)) { + io.methodNotAllowed(resp, "GET, POST", outFormat); + } else if ("GET".equalsIgnoreCase(method)) { + handleGetSnapshotsByVmId(id, resp, outFormat, io); + } else if ("POST".equalsIgnoreCase(method)) { + handlePostSnapshotForVmId(id, req, resp, outFormat, io); + } + return; + } else if ("backups".equals(subPath)) { + if (!"GET".equalsIgnoreCase(method) && !"POST".equalsIgnoreCase(method)) { + io.methodNotAllowed(resp, "GET, POST", outFormat); + } else if ("GET".equalsIgnoreCase(method)) { + handleGetBackupsByVmId(id, resp, outFormat, io); + } else if ("POST".equalsIgnoreCase(method)) { + handlePostBackupForVmId(id, req, resp, outFormat, io); + } + return; + } else if ("checkpoints".equals(subPath)) { + if ("GET".equalsIgnoreCase(method)) { + handleGetCheckpointsByVmId(id, resp, outFormat, io); + } else { + io.methodNotAllowed(resp, "GET, POST", outFormat); + } + return; + } + } else if (idAndSubPath.size() == 3) { + String subPath = idAndSubPath.get(1); + String subId = idAndSubPath.get(2); + if ("snapshots".equals(subPath)) { + if (!"GET".equalsIgnoreCase(method) && !"DELETE".equalsIgnoreCase(method)) { + io.methodNotAllowed(resp, "GET, DELETE", outFormat); + } else if ("GET".equalsIgnoreCase(method)) { + handleGetSnapshotById(subId, resp, outFormat, io); + } else if ("DELETE".equalsIgnoreCase(method)) { + handleDeleteSnapshotById(subId, req, resp, outFormat, io); + } + return; + } else if ("backups".equals(subPath)) { + if ("GET".equalsIgnoreCase(method)) { + handleGetBackupById(subId, resp, outFormat, io); + } else { + io.methodNotAllowed(resp, "GET", outFormat); + } + return; + } else if ("checkpoints".equals(subPath)) { + if ("DELETE".equalsIgnoreCase(method)) { + handleDeleteCheckpoint(id, subId, resp, outFormat, io); + } else { + io.methodNotAllowed(resp, "DELETE", outFormat); + } + return; + } + } else if (idAndSubPath.size() == 4) { + String subPath = idAndSubPath.get(1); + String subId = idAndSubPath.get(2); + String action = idAndSubPath.get(3); + if ("snapshots".equals(subPath) && "restore".equals(action)) { + if ("POST".equalsIgnoreCase(method)) { + handleRestoreSnapshotById(subId, req, resp, outFormat, io); + } else { + io.methodNotAllowed(resp, "POST", outFormat); + } + return; + } else if ("backups".equals(subPath) && "disks".equals(action)) { + if ("GET".equalsIgnoreCase(method)) { + handleGetBackupDisksById(subId, req, resp, outFormat, io); + } else { + io.methodNotAllowed(resp, "GET", outFormat); + } + return; + } else if ("backups".equals(subPath) && "finalize".equals(action)) { + if ("POST".equalsIgnoreCase(method)) { + handleFinalizeBackupById(id, subId, req, resp, outFormat, io); + } else { + io.methodNotAllowed(resp, "POST", outFormat); + } + return; + } + } + } + + io.notFound(resp, null, outFormat); + } + + protected void handleGet(final HttpServletRequest req, final HttpServletResponse resp, + Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + try { + ListQuery query = ListQuery.fromRequest(req); + final List result = serverAdapter.listAllInstances(query.followContains("tags"), + query.followContains("disk_attachments.disk"), + query.followContains("nics.reporteddevices"), + query.isAllContent(), + query.getOffset(), + query.getLimit()); + NamedList response = NamedList.of("vm", result); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handlePost(final HttpServletRequest req, final HttpServletResponse resp, + Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + String data = RouteHandler.getRequestData(req, logger); + try { + Vm request = io.getMapper().jsonMapper().readValue(data, Vm.class); + Vm response = serverAdapter.createInstance(request); + io.getWriter().write(resp, HttpServletResponse.SC_CREATED, response, outFormat); + } catch (JsonProcessingException | CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleGetById(final String id, final HttpServletRequest req, final HttpServletResponse resp, + final Negotiation.OutFormat outFormat, final VeeamControlServlet io) throws IOException { + try { + ListQuery query = ListQuery.fromRequest(req); + Vm response = serverAdapter.getInstance(id, + query.followContains("tags"), + query.followContains("disk_attachments.disk"), + query.followContains("nics.reporteddevices"), + query.isAllContent()); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (InvalidParameterValueException e) { + io.notFound(resp, e.getMessage(), outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleUpdateById(final String id, final HttpServletRequest req, final HttpServletResponse resp, final Negotiation.OutFormat outFormat, + final VeeamControlServlet io) throws IOException { + String data = RouteHandler.getRequestData(req, logger); + try { + Vm request = io.getMapper().jsonMapper().readValue(data, Vm.class); + Vm response = serverAdapter.updateInstance(id, request); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleDeleteById(final String id, final HttpServletRequest req, final HttpServletResponse resp, + final Negotiation.OutFormat outFormat, final VeeamControlServlet io) throws IOException { + boolean async = RouteHandler.isRequestAsync(req); + try { + VmAction vm = serverAdapter.deleteInstance(id, async); + io.getWriter().write(resp, HttpServletResponse.SC_OK, vm, outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleStartVmById(final String id, final HttpServletRequest req, final HttpServletResponse resp, + final Negotiation.OutFormat outFormat, final VeeamControlServlet io) throws IOException { + boolean async = RouteHandler.isRequestAsync(req); + try { + VmAction vm = serverAdapter.startInstance(id, async); + io.getWriter().write(resp, HttpServletResponse.SC_ACCEPTED, vm, outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleStopVmById(final String id, final HttpServletRequest req, final HttpServletResponse resp, + final Negotiation.OutFormat outFormat, final VeeamControlServlet io) throws IOException { + boolean async = RouteHandler.isRequestAsync(req); + try { + VmAction vm = serverAdapter.stopInstance(id, async); + io.getWriter().write(resp, HttpServletResponse.SC_ACCEPTED, vm, outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleShutdownVmById(final String id, final HttpServletRequest req, final HttpServletResponse resp, + final Negotiation.OutFormat outFormat, final VeeamControlServlet io) throws IOException { + boolean async = RouteHandler.isRequestAsync(req); + try { + VmAction vm = serverAdapter.shutdownInstance(id, async); + io.getWriter().write(resp, HttpServletResponse.SC_ACCEPTED, vm, outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleGetDiskAttachmentsByVmId(final String id, final HttpServletResponse resp, + final Negotiation.OutFormat outFormat, final VeeamControlServlet io) throws IOException { + try { + List disks = serverAdapter.listDiskAttachmentsByInstanceUuid(id); + NamedList response = NamedList.of("disk_attachment", disks); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (InvalidParameterValueException e) { + io.notFound(resp, e.getMessage(), outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handlePostDiskAttachmentForVmId(final String id, final HttpServletRequest req, + final HttpServletResponse resp, final Negotiation.OutFormat outFormat, final VeeamControlServlet io) + throws IOException { + String data = RouteHandler.getRequestData(req, logger); + try { + DiskAttachment request = io.getMapper().jsonMapper().readValue(data, DiskAttachment.class); + DiskAttachment response = serverAdapter.attachInstanceDisk(id, request); + io.getWriter().write(resp, HttpServletResponse.SC_CREATED, response, outFormat); + } catch (JsonProcessingException | CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleGetNicsByVmId(final String id, final HttpServletResponse resp, + final Negotiation.OutFormat outFormat, final VeeamControlServlet io) throws IOException { + try { + List nics = serverAdapter.listNicsByInstanceUuid(id); + NamedList response = NamedList.of("nic", nics); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (InvalidParameterValueException e) { + io.notFound(resp, e.getMessage(), outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handlePostNicForVmId(final String id, final HttpServletRequest req, + final HttpServletResponse resp, final Negotiation.OutFormat outFormat, final VeeamControlServlet io) + throws IOException { + String data = RouteHandler.getRequestData(req, logger); + try { + Nic request = io.getMapper().jsonMapper().readValue(data, Nic.class); + Nic response = serverAdapter.attachInstanceNic(id, request); + io.getWriter().write(resp, HttpServletResponse.SC_CREATED, response, outFormat); + } catch (JsonProcessingException | CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleGetSnapshotsByVmId(final String id, final HttpServletResponse resp, + final Negotiation.OutFormat outFormat, final VeeamControlServlet io) throws IOException { + try { + List snapshots = serverAdapter.listSnapshotsByInstanceUuid(id); + NamedList response = NamedList.of("snapshot", snapshots); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (InvalidParameterValueException e) { + io.notFound(resp, e.getMessage(), outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handlePostSnapshotForVmId(final String id, final HttpServletRequest req, + final HttpServletResponse resp, final Negotiation.OutFormat outFormat, final VeeamControlServlet io) + throws IOException { + String data = RouteHandler.getRequestData(req, logger); + try { + Snapshot request = io.getMapper().jsonMapper().readValue(data, Snapshot.class); + Snapshot response = serverAdapter.createInstanceSnapshot(id, request); + io.getWriter().write(resp, HttpServletResponse.SC_ACCEPTED, response, outFormat); + } catch (JsonProcessingException | CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleGetSnapshotById(final String id, final HttpServletResponse resp, + final Negotiation.OutFormat outFormat, final VeeamControlServlet io) throws IOException { + try { + Snapshot response = serverAdapter.getSnapshot(id); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (InvalidParameterValueException e) { + io.notFound(resp, e.getMessage(), outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleDeleteSnapshotById(final String id, final HttpServletRequest req, + final HttpServletResponse resp, final Negotiation.OutFormat outFormat, final VeeamControlServlet io) + throws IOException { + boolean async = RouteHandler.isRequestAsync(req); + try { + ResourceAction action = serverAdapter.deleteSnapshot(id, async); + if (action != null) { + io.getWriter().write(resp, HttpServletResponse.SC_ACCEPTED, action, outFormat); + } else { + io.getWriter().write(resp, HttpServletResponse.SC_OK, null, outFormat); + } + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleRestoreSnapshotById(final String id, final HttpServletRequest req, + final HttpServletResponse resp, final Negotiation.OutFormat outFormat, final VeeamControlServlet io) + throws IOException { + boolean async = RouteHandler.isRequestAsync(req); + String data = RouteHandler.getRequestData(req, logger); + try { + ResourceAction response = serverAdapter.revertInstanceToSnapshot(id, async); + io.getWriter().write(resp, HttpServletResponse.SC_ACCEPTED, response, outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleGetBackupsByVmId(final String id, final HttpServletResponse resp, + final Negotiation.OutFormat outFormat, final VeeamControlServlet io) throws IOException { + try { + List backups = serverAdapter.listBackupsByInstanceUuid(id); + NamedList response = NamedList.of("backup", backups); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (InvalidParameterValueException e) { + io.notFound(resp, e.getMessage(), outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handlePostBackupForVmId(final String id, final HttpServletRequest req, + final HttpServletResponse resp, final Negotiation.OutFormat outFormat, final VeeamControlServlet io) + throws IOException { + String data = RouteHandler.getRequestData(req, logger); + try { + Backup request = io.getMapper().jsonMapper().readValue(data, Backup.class); + Backup response = serverAdapter.createInstanceBackup(id, request); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (JsonProcessingException | CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleGetBackupById(final String id, final HttpServletResponse resp, + final Negotiation.OutFormat outFormat, final VeeamControlServlet io) throws IOException { + try { + Backup response = serverAdapter.getBackup(id); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (InvalidParameterValueException e) { + io.notFound(resp, e.getMessage(), outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleGetBackupDisksById(final String id, final HttpServletRequest req, + final HttpServletResponse resp, final Negotiation.OutFormat outFormat, final VeeamControlServlet io) + throws IOException { + try { + List disks = serverAdapter.listDisksByBackupUuid(id); + NamedList response = NamedList.of("disk", disks); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (InvalidParameterValueException e) { + io.notFound(resp, e.getMessage(), outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleFinalizeBackupById(final String vmId, final String backupId, final HttpServletRequest req, + final HttpServletResponse resp, final Negotiation.OutFormat outFormat, final VeeamControlServlet io) + throws IOException { + try { + Backup backup = serverAdapter.finalizeBackup(vmId, backupId); + io.getWriter().write(resp, HttpServletResponse.SC_OK, backup, outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleGetCheckpointsByVmId(final String id, final HttpServletResponse resp, + final Negotiation.OutFormat outFormat, final VeeamControlServlet io) throws IOException { + try { + List checkpoints = serverAdapter.listCheckpointsByInstanceUuid(id); + NamedList response = NamedList.of("checkpoints", checkpoints); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (InvalidParameterValueException e) { + io.notFound(resp, e.getMessage(), outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleDeleteCheckpoint(final String vmId, final String checkpointId, + final HttpServletResponse resp, final Negotiation.OutFormat outFormat, final VeeamControlServlet io) + throws IOException { + try { + serverAdapter.deleteCheckpoint(vmId, checkpointId); + io.getWriter().write(resp, HttpServletResponse.SC_OK, null, outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/VnicProfilesRouteHandler.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/VnicProfilesRouteHandler.java new file mode 100644 index 000000000000..fbfc0c9a92dd --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/VnicProfilesRouteHandler.java @@ -0,0 +1,110 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api; + +import java.io.IOException; +import java.util.List; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.cloudstack.veeam.RouteHandler; +import org.apache.cloudstack.veeam.VeeamControlServlet; +import org.apache.cloudstack.veeam.adapter.ServerAdapter; +import org.apache.cloudstack.veeam.api.dto.NamedList; +import org.apache.cloudstack.veeam.api.dto.VnicProfile; +import org.apache.cloudstack.veeam.api.request.ListQuery; +import org.apache.cloudstack.veeam.utils.Negotiation; +import org.apache.cloudstack.veeam.utils.PathUtil; +import org.apache.commons.collections.CollectionUtils; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.exception.CloudRuntimeException; + +public class VnicProfilesRouteHandler extends ManagerBase implements RouteHandler { + public static final String BASE_ROUTE = "/api/vnicprofiles"; + + @Inject + ServerAdapter serverAdapter; + + @Override + public boolean start() { + return true; + } + + @Override + public int priority() { + return 5; + } + + @Override + public boolean canHandle(String method, String path) { + return getSanitizedPath(path).startsWith(BASE_ROUTE); + } + + @Override + public void handle(HttpServletRequest req, HttpServletResponse resp, String path, Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + final String method = req.getMethod(); + if (!"GET".equalsIgnoreCase(method)) { + io.methodNotAllowed(resp, "GET", outFormat); + return; + } + final String sanitizedPath = getSanitizedPath(path); + if (sanitizedPath.equals(BASE_ROUTE)) { + handleGet(req, resp, outFormat, io); + return; + } + + List idAndSubPath = PathUtil.extractIdAndSubPath(sanitizedPath, BASE_ROUTE); + if (CollectionUtils.isNotEmpty(idAndSubPath)) { + String id = idAndSubPath.get(0); + if (idAndSubPath.size() == 1) { + handleGetById(id, resp, outFormat, io); + return; + } + } + + io.notFound(resp, null, outFormat); + } + + protected void handleGet(final HttpServletRequest req, final HttpServletResponse resp, + Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + try { + ListQuery query = ListQuery.fromRequest(req); + final List result = serverAdapter.listAllVnicProfiles(query.getOffset(), query.getLimit()); + NamedList response = NamedList.of("vnic_profile", result); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } + + protected void handleGetById(final String id, final HttpServletResponse resp, final Negotiation.OutFormat outFormat, + final VeeamControlServlet io) throws IOException { + try { + VnicProfile response = serverAdapter.getVnicProfile(id); + io.getWriter().write(resp, HttpServletResponse.SC_OK, response, outFormat); + } catch (InvalidParameterValueException e) { + io.notFound(resp, e.getMessage(), outFormat); + } catch (CloudRuntimeException e) { + io.badRequest(resp, e.getMessage(), outFormat); + } + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/AsyncJobJoinVOToJobConverter.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/AsyncJobJoinVOToJobConverter.java new file mode 100644 index 000000000000..c50f4a0ecfe6 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/AsyncJobJoinVOToJobConverter.java @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.converter; + +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; + +import org.apache.cloudstack.jobs.JobInfo; +import org.apache.cloudstack.veeam.VeeamControlService; +import org.apache.cloudstack.veeam.api.JobsRouteHandler; +import org.apache.cloudstack.veeam.api.dto.Job; +import org.apache.cloudstack.veeam.api.dto.Ref; +import org.apache.cloudstack.veeam.api.dto.ResourceAction; +import org.apache.cloudstack.veeam.api.dto.VmAction; + +import com.cloud.api.query.vo.AsyncJobJoinVO; +import com.cloud.api.query.vo.UserVmJoinVO; + +public class AsyncJobJoinVOToJobConverter { + + public static Job toJob(AsyncJobJoinVO vo) { + Job job = new Job(); + final String basePath = VeeamControlService.ContextPath.value(); + job.setId(vo.getUuid()); + job.setHref(basePath + JobsRouteHandler.BASE_ROUTE + "/" + vo.getUuid()); + job.setAutoCleared(Boolean.TRUE.toString()); + job.setExternal(Boolean.TRUE.toString()); + job.setLastUpdated(System.currentTimeMillis()); + job.setStartTime(vo.getCreated().getTime()); + JobInfo.Status status = JobInfo.Status.values()[vo.getStatus()]; + Long endTime = System.currentTimeMillis(); + if (status == JobInfo.Status.SUCCEEDED) { + job.setStatus("finished"); + job.setEndTime(System.currentTimeMillis()); + } else if (status == JobInfo.Status.FAILED) { + job.setStatus(status.name().toLowerCase()); + } else if (status == JobInfo.Status.CANCELLED) { + job.setStatus("aborted"); + } else { + job.setStatus("started"); + endTime = null; + } + if (endTime != null) { + job.setEndTime(endTime); + } + job.setOwner(Ref.of(basePath + "/api/users/" + vo.getUserUuid(), vo.getUserUuid())); + job.setDescription("Something"); + job.setLink(Collections.emptyList()); + return job; + } + + public static List toJobList(List vos) { + return vos.stream().map(AsyncJobJoinVOToJobConverter::toJob).collect(Collectors.toList()); + } + + protected static void fillAction(final ResourceAction action, final AsyncJobJoinVO vo) { + final String basePath = VeeamControlService.ContextPath.value(); + action.setJob(Ref.of(basePath + JobsRouteHandler.BASE_ROUTE + vo.getUuid(), vo.getUuid())); + action.setStatus("complete"); + } + + public static VmAction toVmAction(final AsyncJobJoinVO vo, final UserVmJoinVO vm) { + VmAction action = new VmAction(); + fillAction(action, vo); + action.setVm(UserVmJoinVOToVmConverter.toVm(vm, null, null, null, null, null, false)); + return action; + } + + public static ResourceAction toAction(final AsyncJobJoinVO vo) { + VmAction action = new VmAction(); + fillAction(action, vo); + return action; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/BackupVOToBackupConverter.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/BackupVOToBackupConverter.java new file mode 100644 index 000000000000..2f2b40908e89 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/BackupVOToBackupConverter.java @@ -0,0 +1,99 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.converter; + +import java.util.List; +import java.util.function.Function; +import java.util.stream.Collectors; + +import org.apache.cloudstack.backup.BackupVO; +import org.apache.cloudstack.veeam.VeeamControlService; +import org.apache.cloudstack.veeam.api.ApiRouteHandler; +import org.apache.cloudstack.veeam.api.VmsRouteHandler; +import org.apache.cloudstack.veeam.api.dto.Backup; +import org.apache.cloudstack.veeam.api.dto.Disk; +import org.apache.cloudstack.veeam.api.dto.Host; +import org.apache.cloudstack.veeam.api.dto.NamedList; +import org.apache.cloudstack.veeam.api.dto.Vm; + +import com.cloud.api.query.vo.HostJoinVO; +import com.cloud.vm.UserVmVO; + +public class BackupVOToBackupConverter { + + public static Backup toBackup(final BackupVO backupVO, final Function vmResolver, + final Function hostResolver, final Function> disksResolver) { + Backup backup = new Backup(); + final String basePath = VeeamControlService.ContextPath.value(); + backup.setHref(basePath + VmsRouteHandler.BASE_ROUTE + "/backups/" + backupVO.getUuid()); + backup.setId(backupVO.getUuid()); + backup.setName(backupVO.getName()); + backup.setDescription(backupVO.getDescription()); + backup.setCreationDate(backupVO.getDate().getTime()); + backup.setPhase(mapStatusToPhase(backupVO.getStatus())); + if (backupVO.getFromCheckpointId() != null) { + backup.setFromCheckpointId(backupVO.getFromCheckpointId()); + } + if (backupVO.getToCheckpointId() != null) { + backup.setToCheckpointId(backupVO.getToCheckpointId()); + } + if (vmResolver != null) { + final UserVmVO vmVO = vmResolver.apply(backupVO.getVmId()); + if (vmVO != null) { + backup.setVm(Vm.of(basePath + VmsRouteHandler.BASE_ROUTE + "/" + vmVO.getUuid(), vmVO.getUuid())); + } + } + if (backupVO.getHostId() != null && hostResolver != null) { + final HostJoinVO hostVO = hostResolver.apply(backupVO.getHostId()); + if (hostVO != null) { + backup.setHost(Host.of(basePath + ApiRouteHandler.BASE_ROUTE + "/" + hostVO.getUuid(), hostVO.getUuid())); + } + } + if (disksResolver != null) { + List disks = disksResolver.apply(backupVO); + backup.setDisks(NamedList.of("disks", disks)); + } + return backup; + } + + public static List toBackupList(final List backupVOs, final Function vmResolver, + final Function hostResolver) { + return backupVOs + .stream() + .map(backupVO -> toBackup(backupVO, vmResolver, hostResolver, null)) + .collect(Collectors.toList()); + } + + private static String mapStatusToPhase(final BackupVO.Status status) { + switch (status) { + case Allocated: + case Queued: + return "initializing"; + case BackingUp: + return "starting"; + case ReadyForTransfer: + return "ready"; + case FinalizingTransfer: + return "finalizing"; + case Restoring: + case BackedUp: + return "succeeded"; + } + return "failed"; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/ClusterVOToClusterConverter.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/ClusterVOToClusterConverter.java new file mode 100644 index 000000000000..42b2233393da --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/ClusterVOToClusterConverter.java @@ -0,0 +1,155 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.converter; + +import java.util.Collections; +import java.util.List; +import java.util.function.Function; +import java.util.stream.Collectors; + +import org.apache.cloudstack.veeam.VeeamControlService; +import org.apache.cloudstack.veeam.api.ClustersRouteHandler; +import org.apache.cloudstack.veeam.api.DataCentersRouteHandler; +import org.apache.cloudstack.veeam.api.dto.Cluster; +import org.apache.cloudstack.veeam.api.dto.Cpu; +import org.apache.cloudstack.veeam.api.dto.Link; +import org.apache.cloudstack.veeam.api.dto.Ref; +import org.apache.cloudstack.veeam.api.dto.Version; +import org.apache.cloudstack.veeam.api.dto.Vm; + +import com.cloud.api.query.vo.DataCenterJoinVO; +import com.cloud.dc.ClusterVO; +import com.cloud.utils.UuidUtils; + +public class ClusterVOToClusterConverter { + public static Cluster toCluster(final ClusterVO vo, final Function dataCenterResolver) { + final Cluster c = new Cluster(); + final String basePath = VeeamControlService.ContextPath.value(); + final String clusterId = vo.getUuid(); + c.setId(clusterId); + c.setHref(basePath + ClustersRouteHandler.BASE_ROUTE + "/" + clusterId); + + c.setName(vo.getName()); + + c.setBallooningEnabled("true"); + c.setBiosType(Vm.Bios.getDefault().getType()); + c.setFipsMode("disabled"); + c.setFirewallType("firewalld"); + c.setGlusterService("false"); + c.setHaReservation("false"); + c.setSwitchType("legacy"); + c.setThreadsAsCores("false"); + c.setTrustedService("false"); + c.setTunnelMigration("false"); + c.setUpgradeInProgress("false"); + c.setUpgradePercentComplete("0"); + c.setVirtService("true"); + c.setVncEncryption("false"); + + // --- cpu (best-effort defaults) + final Cpu cpu = new Cpu(); + cpu.setArchitecture(vo.getArch().getType()); + cpu.setType(vo.getArch().getType()); // replace if you can detect host cpu model + c.setCpu(cpu); + + final Version ver = Version.fromPackageAndCSVersion(false); + c.setVersion(ver); + + // --- ksm / memory policy (defaults) + c.setKsm(new Cluster.Ksm()); + c.getKsm().enabled = "true"; + c.getKsm().mergeAcrossNodes = "true"; + + c.setMemoryPolicy(new Cluster.MemoryPolicy()); + c.getMemoryPolicy().overCommit = new Cluster.OverCommit(); + c.getMemoryPolicy().overCommit.percent = "100"; + c.getMemoryPolicy().transparentHugepages = new Cluster.TransparentHugepages(); + c.getMemoryPolicy().transparentHugepages.enabled = "true"; + + // --- migration defaults + c.setMigration(new Cluster.Migration()); + c.getMigration().autoConverge = "inherit"; + c.getMigration().bandwidth = new Cluster.Bandwidth(); + c.getMigration().bandwidth.assignmentMethod = "auto"; + c.getMigration().compressed = "inherit"; + c.getMigration().encrypted = "inherit"; + c.getMigration().parallelMigrationsPolicy = "disabled"; + // policy ref (dummy but valid shape) + c.getMigration().policy = Ref.of(basePath + "/migrationpolicies/" + stableUuid("migrationpolicy:default"), + stableUuid("migrationpolicy:default") + ); + + // --- rng sources + c.setRequiredRngSources(new Cluster.RequiredRngSources()); + c.getRequiredRngSources().requiredRngSource = Collections.singletonList("urandom"); + + // --- error handling + c.setErrorHandling(new Cluster.ErrorHandling()); + c.getErrorHandling().onError = "migrate"; + + // --- fencing policy defaults + c.setFencingPolicy(new Cluster.FencingPolicy()); + c.getFencingPolicy().enabled = "true"; + c.getFencingPolicy().skipIfConnectivityBroken = new Cluster.SkipIfConnectivityBroken(); + c.getFencingPolicy().skipIfConnectivityBroken.enabled = "false"; + c.getFencingPolicy().skipIfConnectivityBroken.threshold = "50"; + c.getFencingPolicy().skipIfGlusterBricksUp = "false"; + c.getFencingPolicy().skipIfGlusterQuorumNotMet = "false"; + c.getFencingPolicy().skipIfSdActive = new Cluster.SkipIfSdActive(); + c.getFencingPolicy().skipIfSdActive.enabled = "false"; + + // --- scheduling policy props (optional; dummy ok) + c.setCustomSchedulingPolicyProperties(new Cluster.CustomSchedulingPolicyProperties()); + final Cluster.Property p1 = new Cluster.Property(); p1.name = "HighUtilization"; p1.value = "80"; + final Cluster.Property p2 = new Cluster.Property(); p2.name = "CpuOverCommitDurationMinutes"; p2.value = "2"; + c.getCustomSchedulingPolicyProperties().property = List.of(p1, p2); + + // --- data_center ref mapping (CloudStack cluster -> pod -> zone) + if (dataCenterResolver != null) { + final DataCenterJoinVO zone = dataCenterResolver.apply(vo.getDataCenterId()); + if (zone != null) { + c.setDataCenter(Ref.of(basePath + DataCentersRouteHandler.BASE_ROUTE + "/" + zone.getUuid(), zone.getUuid())); + } + } + + // --- mac pool & scheduling policy refs (dummy but consistent) + c.setMacPool(Ref.of(basePath + "/macpools/" + stableUuid("macpool:default"), + stableUuid("macpool:default"))); + c.setSchedulingPolicy(Ref.of(basePath + "/schedulingpolicies/" + stableUuid("schedpolicy:default"), + stableUuid("schedpolicy:default"))); + + // --- related links (optional) + c.setLink(List.of( + Link.of("networks", c.getHref() + "/networks") + )); + + return c; + } + + public static List toClusterList(final List voList, + final Function dataCenterResolver) { + return voList.stream() + .map(vo -> toCluster(vo, dataCenterResolver)) + .collect(Collectors.toList()); + } + + private static String stableUuid(final String key) { + // deterministic UUID, so the same ClusterVO maps to same "ovirt id" every time + return UuidUtils.nameUUIDFromBytes(key.getBytes()).toString(); + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/DataCenterJoinVOToDataCenterConverter.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/DataCenterJoinVOToDataCenterConverter.java new file mode 100644 index 000000000000..659e0e1f5a83 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/DataCenterJoinVOToDataCenterConverter.java @@ -0,0 +1,78 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.converter; + +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +import org.apache.cloudstack.veeam.VeeamControlService; +import org.apache.cloudstack.veeam.api.DataCentersRouteHandler; +import org.apache.cloudstack.veeam.api.dto.DataCenter; +import org.apache.cloudstack.veeam.api.dto.Link; +import org.apache.cloudstack.veeam.api.dto.Ref; +import org.apache.cloudstack.veeam.api.dto.SupportedVersions; +import org.apache.cloudstack.veeam.api.dto.Version; + +import com.cloud.api.query.vo.DataCenterJoinVO; +import com.cloud.org.Grouping; + +public class DataCenterJoinVOToDataCenterConverter { + public static DataCenter toDataCenter(final DataCenterJoinVO zone) { + final String id = zone.getUuid(); + final String basePath = VeeamControlService.ContextPath.value(); + final String href = basePath + DataCentersRouteHandler.BASE_ROUTE + DataCentersRouteHandler.BASE_ROUTE + "/" + id; + + final DataCenter dc = new DataCenter(); + + // ---- Identity ---- + dc.setId(id); + dc.setHref(href); + dc.setName(zone.getName()); + dc.setDescription(zone.getDescription()); + + // ---- State ---- + dc.setStatus(Grouping.AllocationState.Enabled.equals(zone.getAllocationState()) ? "up" : "down"); + dc.setLocal("false"); + dc.setQuotaMode("disabled"); + dc.setStorageFormat("v5"); + + // ---- Versions ---- + final Version ver = Version.fromPackageAndCSVersion(false); + dc.setVersion(ver); + dc.setSupportedVersions(new SupportedVersions(List.of(ver))); + + // ---- mac_pool (static placeholder) ---- + dc.setMacPool(Ref.of(basePath + "/macpools/default", "default")); + + // ---- Related links ---- + dc.link = Arrays.asList( + Link.of(href + "/clusters", "clusters"), + Link.of(href + "/networks", "networks"), + Link.of(href + "/storagedomains", "storagedomains") + ); + + return dc; + } + + public static List toDCList(final List srcList) { + return srcList.stream() + .map(DataCenterJoinVOToDataCenterConverter::toDataCenter) + .collect(Collectors.toList()); + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/HostJoinVOToHostConverter.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/HostJoinVOToHostConverter.java new file mode 100644 index 000000000000..d8230fddc62a --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/HostJoinVOToHostConverter.java @@ -0,0 +1,103 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.converter; + +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; + +import org.apache.cloudstack.veeam.VeeamControlService; +import org.apache.cloudstack.veeam.api.ClustersRouteHandler; +import org.apache.cloudstack.veeam.api.HostsRouteHandler; +import org.apache.cloudstack.veeam.api.dto.Cpu; +import org.apache.cloudstack.veeam.api.dto.Host; +import org.apache.cloudstack.veeam.api.dto.Ref; +import org.apache.cloudstack.veeam.api.dto.Topology; + +import com.cloud.api.query.vo.HostJoinVO; +import com.cloud.host.Status; +import com.cloud.resource.ResourceState; + +public class HostJoinVOToHostConverter { + + /** + * Convert CloudStack HostJoinVO -> oVirt-like Host. + * + */ + public static Host toHost(final HostJoinVO vo) { + final Host h = new Host(); + + final String hostUuid = vo.getUuid(); + + h.setId(hostUuid); + final String basePath = VeeamControlService.ContextPath.value(); + h.setHref(basePath + HostsRouteHandler.BASE_ROUTE + "/" + hostUuid); + + final String name = vo.getName() != null ? vo.getName() : ("host-" + hostUuid); + h.setName(name); + + String addr = vo.getPrivateIpAddress(); + h.setAddress(addr); + + h.setStatus(mapStatus(vo)); + h.setExternalStatus("ok"); + + // --- cluster --- + final String clusterUuid = vo.getClusterUuid(); + h.setCluster(Ref.of(basePath + ClustersRouteHandler.BASE_ROUTE + "/" + clusterUuid, clusterUuid)); + + // --- CPU --- + final Cpu cpu = new Cpu(); + cpu.setSpeed(String.valueOf(Math.toIntExact(vo.getSpeed()))); + final Topology topo = new Topology(vo.getCpuSockets(), vo.getCpus(), 1); + cpu.setTopology(topo); + h.setCpu(cpu); + + // --- Memory --- + h.setMemory(String.valueOf(vo.getTotalMemory())); + h.setMaxSchedulingMemory(String.valueOf(vo.getTotalMemory() - vo.getMemUsedCapacity())); + + h.setType("ovirt_node"); + h.setAutoNumaStatus("unknown"); + h.setKdumpStatus("disabled"); + h.setNumaSupported("false"); + h.setReinstallationRequired("false"); + h.setUpdateAvailable("false"); + + + h.setActions(null); + h.setLink(Collections.emptyList()); + + return h; + } + + public static List toHostList(final List vos) { + return vos.stream().map(HostJoinVOToHostConverter::toHost).collect(Collectors.toList()); + } + + private static String mapStatus(final HostJoinVO vo) { + if (vo.isInMaintenanceStates()) { + return "maintenance"; + } + if (Status.Up.equals(vo.getStatus()) && + ResourceState.Enabled.equals(vo.getResourceState())) { + return "up"; + } + return "down"; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/ImageTransferVOToImageTransferConverter.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/ImageTransferVOToImageTransferConverter.java new file mode 100644 index 000000000000..084f644d317f --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/ImageTransferVOToImageTransferConverter.java @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.converter; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; +import java.util.stream.Collectors; + +import org.apache.cloudstack.backup.ImageTransferVO; +import org.apache.cloudstack.veeam.VeeamControlService; +import org.apache.cloudstack.veeam.api.DisksRouteHandler; +import org.apache.cloudstack.veeam.api.HostsRouteHandler; +import org.apache.cloudstack.veeam.api.ImageTransfersRouteHandler; +import org.apache.cloudstack.veeam.api.dto.ImageTransfer; +import org.apache.cloudstack.veeam.api.dto.Link; +import org.apache.cloudstack.veeam.api.dto.NamedList; +import org.apache.cloudstack.veeam.api.dto.Ref; + +import com.cloud.api.query.vo.HostJoinVO; +import com.cloud.api.query.vo.VolumeJoinVO; + +public class ImageTransferVOToImageTransferConverter { + public static ImageTransfer toImageTransfer(ImageTransferVO vo, final Function hostResolver, + final Function volumeResolver) { + ImageTransfer imageTransfer = new ImageTransfer(); + final String basePath = VeeamControlService.ContextPath.value(); + imageTransfer.setId(vo.getUuid()); + imageTransfer.setHref(basePath + ImageTransfersRouteHandler.BASE_ROUTE + "/" + vo.getUuid()); + imageTransfer.setActive(Boolean.toString(vo.getProgress() != null && vo.getProgress() > 0 && vo.getProgress() < 100)); + imageTransfer.setDirection(vo.getDirection().name()); + imageTransfer.setFormat("cow"); + imageTransfer.setInactivityTimeout(Integer.toString(3600)); + imageTransfer.setPhase(vo.getPhase().name()); + if (org.apache.cloudstack.backup.ImageTransfer.Phase.finished.equals(vo.getPhase())) { + imageTransfer.setPhase("finished_success"); + } else if (org.apache.cloudstack.backup.ImageTransfer.Phase.failed.equals(vo.getPhase())) { + imageTransfer.setPhase("finished_failed"); + } + imageTransfer.setProxyUrl(vo.getTransferUrl()); + imageTransfer.setShallow(Boolean.toString(false)); + imageTransfer.setTimeoutPolicy("legacy"); + imageTransfer.setTransferUrl(vo.getTransferUrl()); + imageTransfer.setTransferred(Long.toString(0)); + if (hostResolver != null) { + HostJoinVO hostVo = hostResolver.apply(vo.getHostId()); + if (hostVo != null) { + imageTransfer.setHost(Ref.of(basePath + HostsRouteHandler.BASE_ROUTE + "/" + hostVo.getUuid(), hostVo.getUuid())); + } + } + if (volumeResolver != null) { + VolumeJoinVO volumeVo = volumeResolver.apply(vo.getDiskId()); + if (volumeVo != null) { + imageTransfer.setDisk(Ref.of(basePath + DisksRouteHandler.BASE_ROUTE + "/" + volumeVo.getUuid(), volumeVo.getUuid())); + imageTransfer.setImage(Ref.of(null, volumeVo.getUuid())); + } + } + final List links = new ArrayList<>(); + links.add(getLink(imageTransfer, "cancel")); + links.add(getLink(imageTransfer, "finalize")); + imageTransfer.setActions(NamedList.of("link", links)); + return imageTransfer; + } + + public static List toImageTransferList(List vos, + final Function hostResolver, + final Function volumeResolver) { + return vos.stream().map(vo -> toImageTransfer(vo, hostResolver, volumeResolver)) + .collect(Collectors.toList()); + } + + private static Link getLink(ImageTransfer it, String rel) { + return Link.of(rel, it.getHref() + "/" + rel); + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/NetworkVOToNetworkConverter.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/NetworkVOToNetworkConverter.java new file mode 100644 index 000000000000..82198997e7db --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/NetworkVOToNetworkConverter.java @@ -0,0 +1,79 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.converter; + +import java.util.Collections; +import java.util.List; +import java.util.function.Function; +import java.util.stream.Collectors; + +import org.apache.cloudstack.veeam.VeeamControlService; +import org.apache.cloudstack.veeam.api.DataCentersRouteHandler; +import org.apache.cloudstack.veeam.api.NetworksRouteHandler; +import org.apache.cloudstack.veeam.api.dto.NamedList; +import org.apache.cloudstack.veeam.api.dto.Network; +import org.apache.cloudstack.veeam.api.dto.Ref; + +import com.cloud.api.query.vo.DataCenterJoinVO; +import com.cloud.network.dao.NetworkVO; + +public class NetworkVOToNetworkConverter { + public static Network toNetwork(final NetworkVO vo, final Function dcResolver) { + final Network dto = new Network(); + + final String networkUuid = vo.getUuid(); + dto.setId(networkUuid); + final String basePath = VeeamControlService.ContextPath.value(); + dto.setHref(basePath + NetworksRouteHandler.BASE_ROUTE + "/" + networkUuid); + + String name = vo.getName() != null ? vo.getName() : vo.getTrafficType().name() + "-" + networkUuid; + dto.setName(name); + dto.setDescription(vo.getDisplayText()); + dto.setComment(""); + + dto.setMtu(String.valueOf(vo.getPrivateMtu() != null ? vo.getPrivateMtu() : 0)); + dto.setPortIsolation("false"); + dto.setStp("false"); + + dto.setUsages(NamedList.of("usage", List.of("vm"))); + + // Best-effort mapping for vdsm_name + dto.setVdsmName(dto.getName()); + + if (dcResolver != null) { + final DataCenterJoinVO dc = dcResolver.apply(vo.getDataCenterId()); + if (dc != null) { + final String dcUuid = dc.getUuid(); + if (dcUuid != null && !dcUuid.isEmpty()) { + dto.setDataCenter(Ref.of(basePath + DataCentersRouteHandler.BASE_ROUTE + "/" + dcUuid, dcUuid)); + } + } + } + + dto.setLink(Collections.emptyList()); + + return dto; + } + + public static List toNetworkList(final List vos, + final Function dcResolver) { + return vos.stream() + .map(vo -> toNetwork(vo, dcResolver)) + .collect(Collectors.toList()); + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/NetworkVOToVnicProfileConverter.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/NetworkVOToVnicProfileConverter.java new file mode 100644 index 000000000000..af10d586c89a --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/NetworkVOToVnicProfileConverter.java @@ -0,0 +1,65 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.converter; + +import java.util.List; +import java.util.function.Function; +import java.util.stream.Collectors; + +import org.apache.cloudstack.veeam.VeeamControlService; +import org.apache.cloudstack.veeam.api.DataCentersRouteHandler; +import org.apache.cloudstack.veeam.api.NetworksRouteHandler; +import org.apache.cloudstack.veeam.api.VnicProfilesRouteHandler; +import org.apache.cloudstack.veeam.api.dto.Ref; +import org.apache.cloudstack.veeam.api.dto.VnicProfile; + +import com.cloud.api.query.vo.DataCenterJoinVO; +import com.cloud.network.dao.NetworkVO; + +public class NetworkVOToVnicProfileConverter { + public static VnicProfile toVnicProfile(final NetworkVO vo, final Function dcResolver) { + final VnicProfile vnicProfile = new VnicProfile(); + + final String networkUuid = vo.getUuid(); + vnicProfile.setId(networkUuid); + final String basePath = VeeamControlService.ContextPath.value(); + vnicProfile.setHref(basePath + VnicProfilesRouteHandler.BASE_ROUTE + "/" + networkUuid); + vnicProfile.setId(networkUuid); + String name = vo.getName() != null ? vo.getName() : vo.getTrafficType().name() + "-" + networkUuid; + vnicProfile.setName(name); + vnicProfile.setNetwork(Ref.of(basePath + NetworksRouteHandler.BASE_ROUTE + "/" + networkUuid, networkUuid)); + vnicProfile.setDescription(vo.getDisplayText()); + + if (dcResolver != null) { + final DataCenterJoinVO dc = dcResolver.apply(vo.getDataCenterId()); + if (dc != null) { + final String dcUuid = dc.getUuid(); + if (dcUuid != null && !dcUuid.isEmpty()) { + vnicProfile.setDataCenter(Ref.of(basePath + DataCentersRouteHandler.BASE_ROUTE + "/" + dcUuid, dcUuid)); + } + } + } + return vnicProfile; + } + + public static List toVnicProfileList(final List vos, final Function dcResolver) { + return vos.stream() + .map(vo -> toVnicProfile(vo, dcResolver)) + .collect(Collectors.toList()); + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/NicVOToNicConverter.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/NicVOToNicConverter.java new file mode 100644 index 000000000000..b55201327ea2 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/NicVOToNicConverter.java @@ -0,0 +1,104 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.converter; + +import java.util.List; +import java.util.function.Function; +import java.util.stream.Collectors; + +import org.apache.cloudstack.veeam.VeeamControlService; +import org.apache.cloudstack.veeam.api.VmsRouteHandler; +import org.apache.cloudstack.veeam.api.VnicProfilesRouteHandler; +import org.apache.cloudstack.veeam.api.dto.Ip; +import org.apache.cloudstack.veeam.api.dto.Mac; +import org.apache.cloudstack.veeam.api.dto.NamedList; +import org.apache.cloudstack.veeam.api.dto.Nic; +import org.apache.cloudstack.veeam.api.dto.Ref; +import org.apache.cloudstack.veeam.api.dto.ReportedDevice; +import org.apache.cloudstack.veeam.api.dto.Vm; +import org.apache.commons.lang3.ObjectUtils; +import org.apache.commons.lang3.StringUtils; +import org.jetbrains.annotations.NotNull; + +import com.cloud.network.dao.NetworkVO; +import com.cloud.vm.NicVO; + +public class NicVOToNicConverter { + private static final String DEFAULT_INTERFACE_TYPE = "virtio"; + private static final String DEFAULT_REPORTED_DEVICE_NAME = "eth0"; + + public static Nic toNic(final NicVO vo, final String vmUuid, final Function networkResolver) { + final String basePath = VeeamControlService.ContextPath.value(); + final Nic nic = new Nic(); + nic.setId(vo.getUuid()); + nic.setName(vo.getReserver()); + Mac mac = new Mac(); + mac.setAddress(vo.getMacAddress()); + nic.setMac(mac); + nic.setLinked(Boolean.TRUE.toString()); + nic.setPlugged(Boolean.TRUE.toString()); + nic.setSynced(Boolean.TRUE.toString()); + if (StringUtils.isNotBlank(vmUuid)) { + Vm vm = Vm.of(basePath + VmsRouteHandler.BASE_ROUTE + "/" + vmUuid, vmUuid); + nic.setVm(vm); + nic.setHref(vm.getHref() + "/nics/" + vo.getUuid()); + } + nic.setInterfaceType(DEFAULT_INTERFACE_TYPE); + ReportedDevice device = getReportedDevice(vo, mac, nic.getVm()); + nic.setReportedDevices(NamedList.of("reported_device", List.of(device))); + if (networkResolver != null) { + final NetworkVO network = networkResolver.apply(vo.getNetworkId()); + if (network != null) { + nic.setVnicProfile(Ref.of(basePath + VnicProfilesRouteHandler.BASE_ROUTE + "/" + network.getUuid(), network.getUuid())); + } + } + return nic; + } + + @NotNull + private static ReportedDevice getReportedDevice(NicVO vo, Mac mac, Vm vm) { + ReportedDevice device = new ReportedDevice(); + device.setType("network"); + device.setId(vo.getUuid()); + device.setName(DEFAULT_REPORTED_DEVICE_NAME); + device.setDescription(String.format("%s device", vo.getReserver())); + device.setMac(mac); + if (ObjectUtils.anyNotNull(vo.getIPv4Address(), vo.getIPv6Address())) { + Ip ip = new Ip(); + if (vo.getIPv4Address() != null) { + ip.setAddress(vo.getIPv4Address()); + ip.setGateway(vo.getIPv4Gateway()); + ip.setVersion("v4"); + } else if (vo.getIPv6Address() != null) { + ip.setAddress(vo.getIPv6Address()); + ip.setGateway(vo.getIPv6Gateway()); + ip.setVersion("v6"); + } + device.setIps(NamedList.of("ip", List.of(ip))); + } + device.setHref(vm.getHref() + "/reporteddevices/" + vo.getUuid()); + device.setVm(vm); + return device; + } + + public static List toNicList(final List vos, final String vmUuid, final Function networkResolver) { + return vos.stream() + .map(vo -> toNic(vo, vmUuid, networkResolver)) + .collect(Collectors.toList()); + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/ResourceTagVOToTagConverter.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/ResourceTagVOToTagConverter.java new file mode 100644 index 000000000000..9715b0321103 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/ResourceTagVOToTagConverter.java @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.converter; + +import java.util.List; +import java.util.stream.Collectors; + +import org.apache.cloudstack.veeam.VeeamControlService; +import org.apache.cloudstack.veeam.api.TagsRouteHandler; +import org.apache.cloudstack.veeam.api.VmsRouteHandler; +import org.apache.cloudstack.veeam.api.dto.BaseDto; +import org.apache.cloudstack.veeam.api.dto.Ref; +import org.apache.cloudstack.veeam.api.dto.Tag; + +import com.cloud.server.ResourceTag; +import com.cloud.tags.ResourceTagVO; + +public class ResourceTagVOToTagConverter { + + public static Ref getRootTagRef() { + String basePath = VeeamControlService.ContextPath.value(); + return Ref.of(basePath + TagsRouteHandler.BASE_ROUTE + "/" + BaseDto.ZERO_UUID, BaseDto.ZERO_UUID); + } + + public static Tag getRootTag() { + Tag tag = new Tag(); + tag.setId(BaseDto.ZERO_UUID); + tag.setName("root"); + tag.setHref(getRootTagRef().getHref()); + return tag; + } + + public static Tag toTag(ResourceTagVO vo) { + String basePath = VeeamControlService.ContextPath.value(); + Tag tag = new Tag(); + String id = vo.getValue(); + tag.setId(id); + tag.setName(vo.getValue()); + tag.setDescription(String.format("Tag %s with value: %s", vo.getKey(), vo.getValue())); + tag.setHref(basePath + TagsRouteHandler.BASE_ROUTE + "/" + id); + if (ResourceTag.ResourceObjectType.UserVm.equals(vo.getResourceType())) { + tag.setVm(Ref.of(basePath + VmsRouteHandler.BASE_ROUTE + "/" + vo.getResourceUuid(), + vo.getResourceUuid())); + } + tag.setParent(getRootTagRef()); + return tag; + } + + public static List toTags(List vos) { + return vos.stream().map(ResourceTagVOToTagConverter::toTag).collect(Collectors.toList()); + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/StoreVOToStorageDomainConverter.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/StoreVOToStorageDomainConverter.java new file mode 100644 index 000000000000..b32c9ceaec58 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/StoreVOToStorageDomainConverter.java @@ -0,0 +1,244 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.converter; + +import java.util.List; +import java.util.stream.Collectors; + +import org.apache.cloudstack.veeam.VeeamControlService; +import org.apache.cloudstack.veeam.api.ApiRouteHandler; +import org.apache.cloudstack.veeam.api.DataCentersRouteHandler; +import org.apache.cloudstack.veeam.api.dto.DataCenter; +import org.apache.cloudstack.veeam.api.dto.Link; +import org.apache.cloudstack.veeam.api.dto.NamedList; +import org.apache.cloudstack.veeam.api.dto.Storage; +import org.apache.cloudstack.veeam.api.dto.StorageDomain; + +import com.cloud.api.query.vo.ImageStoreJoinVO; +import com.cloud.api.query.vo.StoragePoolJoinVO; + +public class StoreVOToStorageDomainConverter { + + /** Primary storage -> oVirt storage_domain (type=data) */ + public static StorageDomain toStorageDomain(final StoragePoolJoinVO pool) { + final String basePath = VeeamControlService.ContextPath.value(); + + final String id = pool.getUuid(); + + StorageDomain sd = new StorageDomain(); + sd.setId(id); + final String href = href(basePath, ApiRouteHandler.BASE_ROUTE + "/storagedomains/" + id); + sd.setHref(href); + + sd.setName(pool.getName()); + + // oVirt sample returns numbers as strings + sd.setAvailable(Long.toString(pool.getCapacityBytes() - pool.getUsedBytes())); + sd.setUsed(Long.toString(pool.getUsedBytes())); + sd.setCommitted(Long.toString(pool.getCapacityBytes())); + + sd.setType("data"); + sd.setStatus(mapPoolStatus(pool)); // "active"/"inactive"/"maintenance" (approx) + sd.setMaster("true"); // if you don’t have a concept, choose stable default + sd.setBackup("false"); + + sd.setBlockSize("512"); // stable default unless you can compute it + sd.setExternalStatus("ok"); + sd.setStorageFormat("v5"); + + sd.setDiscardAfterDelete("false"); + sd.setWipeAfterDelete("false"); + sd.setSupportsDiscard("false"); + sd.setSupportsDiscardZeroesData("false"); + + sd.setWarningLowSpaceIndicator("10"); + sd.setCriticalSpaceActionBlocker("5"); + + // Nested storage (try to extract if available) + sd.setStorage(buildPrimaryStorage(pool)); + + // dc attachment + String dcId = pool.getZoneUuid(); + DataCenter dc = new DataCenter(); + dc.setHref(href(basePath, DataCentersRouteHandler.BASE_ROUTE + "/" + dcId)); + dc.setId(dcId); + sd.setDataCenters(NamedList.of("data_center", List.of(dc))); + + sd.setLink(defaultStorageDomainLinks(href, true, /*includeTemplates*/ true)); + + return sd; + } + + public static List toStorageDomainListFromPools(final List pools) { + return pools.stream().map(StoreVOToStorageDomainConverter::toStorageDomain).collect(Collectors.toList()); + } + + /** Secondary/Image store -> oVirt storage_domain (type=image) */ + public static StorageDomain toStorageDomain(final ImageStoreJoinVO store) { + final String basePath = VeeamControlService.ContextPath.value(); + + final String id = store.getUuid(); + + StorageDomain sd = new StorageDomain(); + sd.setId(id); + final String href = href(basePath, ApiRouteHandler.BASE_ROUTE + "/storagedomains/" + id); + sd.setHref(href); + + sd.setName(store.getName()); + + // Many image repos don’t have these values readily; keep as "0" or omit (null) + sd.setCommitted("0"); + sd.setAvailable(null); // oVirt’s glance example omitted available/used + sd.setUsed(null); + + sd.setType("image"); + sd.setStatus("unattached"); // matches your sample for glance-like repo + sd.setMaster("false"); + sd.setBackup("false"); + + sd.setBlockSize("512"); + sd.setExternalStatus("ok"); + sd.setStorageFormat("v1"); + + sd.setDiscardAfterDelete("false"); + sd.setWipeAfterDelete("false"); + sd.setSupportsDiscard("false"); + sd.setSupportsDiscardZeroesData("false"); + + sd.setWarningLowSpaceIndicator("0"); + sd.setCriticalSpaceActionBlocker("0"); + + sd.setStorage(buildImageStoreStorage(store)); + + // Optionally include dc attachment (your first object had it; second didn’t) + String dcId = store.getZoneUuid(); + DataCenter dc = new DataCenter(); + dc.setHref(href(basePath, DataCentersRouteHandler.BASE_ROUTE + "/" + dcId)); + dc.setId(dcId); + sd.setDataCenters(NamedList.of("data_center", List.of(dc))); + + sd.setLink(defaultStorageDomainLinks(href, false, /*includeTemplates*/ false)); + + return sd; + } + + public static List toStorageDomainListFromStores(final List stores) { + return stores.stream().map(StoreVOToStorageDomainConverter::toStorageDomain).collect(Collectors.toList()); + } + + // ----------- Helpers ----------- + + private static Storage buildPrimaryStorage(StoragePoolJoinVO pool) { + Storage st = new Storage(); + st.setType(mapPrimaryStorageType(pool)); + + // If you can parse details/url, fill these. If not, keep empty strings like oVirt. + // For NFS pools in CloudStack, URL is often like: nfs://10.0.32.4/path or 10.0.32.4:/path + String url = null; + try { + url = pool.getHostAddress(); // sometimes exists in VO; if not, ignore + } catch (Exception ignored) { } + + if ("nfs".equals(st.getType())) { + // best-effort placeholders + st.setAddress(""); // fill if you can parse + st.setPath(""); // fill if you can parse + st.setMountOptions(""); + st.setNfsVersion("auto"); + } + return st; + } + + private static Storage buildImageStoreStorage(ImageStoreJoinVO store) { + Storage st = new Storage(); + + // Match your sample: glance store => type=glance + // If you want "nfs" for secondary, map based on provider/protocol instead. + st.setType(mapImageStorageType(store)); + + if ("nfs".equals(st.getType())) { + st.setAddress(""); + st.setPath(""); + st.setMountOptions(""); + st.setNfsVersion("auto"); + } + return st; + } + + private static List defaultStorageDomainLinks(String basePath, boolean includeDisks, boolean includeTemplates) { + // Mirrors the rels you pasted; keep stable order. + // You can add/remove based on what endpoints you actually implement. + List common = new java.util.ArrayList<>(); + common.add(Link.of("diskprofiles", href(basePath, "/diskprofiles"))); + if (includeDisks) { + common.add(Link.of("disks", href(basePath, "/disks"))); + common.add(Link.of("storageconnections", href(basePath, "/storageconnections"))); + } + common.add(Link.of("permissions", href(basePath, "/permissions"))); + if (includeTemplates) { + common.add(Link.of("templates", href(basePath, "/templates"))); + common.add(Link.of("vms", href(basePath, "/vms"))); + } else { + common.add(Link.of("images", href(basePath, "/images"))); + } + common.add(Link.of("disksnapshots", href(basePath, "/disksnapshots"))); + return common; + } + + private static String mapPoolStatus(StoragePoolJoinVO pool) { + // This is approximate; adjust if you have better signals. + try { + Object status = pool.getStatus(); // often StoragePoolStatus enum + if (status != null) { + String s = status.toString().toLowerCase(); + if (s.contains("up") || s.contains("enabled")) return "active"; + if (s.contains("maintenance")) return "maintenance"; + } + } catch (Exception ignored) { } + return "inactive"; + } + + private static String mapPrimaryStorageType(StoragePoolJoinVO pool) { + try { + Object t = pool.getPoolType(); // often StoragePoolType enum + if (t != null) { + String s = t.toString().toLowerCase(); + if (s.contains("networkfilesystem") || s.contains("nfs") || s.contains("sharedmountpoint")) return "nfs"; + if (s.contains("iscsi")) return "iscsi"; + if (s.contains("filesystem")) return "localfs"; + if (s.contains("rbd") || s.contains("ceph")) return "cinder"; // not perfect; pick stable + } + } catch (Exception ignored) { } + return "unknown"; + } + + private static String mapImageStorageType(ImageStoreJoinVO store) { + // If your secondary store is S3/NFS/etc, you may want different mapping. + // For your oVirt sample, "glance" is used for an image repo. + try { + String provider = store.getProviderName(); // may exist + if (provider != null && provider.toLowerCase().contains("glance")) return "glance"; + } catch (Exception ignored) { } + return "glance"; + } + + private static String href(String baseUrl, String path) { + if (baseUrl.endsWith("/")) baseUrl = baseUrl.substring(0, baseUrl.length() - 1); + return baseUrl + path; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/UserVmJoinVOToVmConverter.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/UserVmJoinVOToVmConverter.java new file mode 100644 index 000000000000..40743a2e3c19 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/UserVmJoinVOToVmConverter.java @@ -0,0 +1,218 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.converter; + +import java.util.Arrays; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.veeam.VeeamControlService; +import org.apache.cloudstack.veeam.api.ApiRouteHandler; +import org.apache.cloudstack.veeam.api.VmsRouteHandler; +import org.apache.cloudstack.veeam.api.dto.BaseDto; +import org.apache.cloudstack.veeam.api.dto.Cpu; +import org.apache.cloudstack.veeam.api.dto.DiskAttachment; +import org.apache.cloudstack.veeam.api.dto.NamedList; +import org.apache.cloudstack.veeam.api.dto.Nic; +import org.apache.cloudstack.veeam.api.dto.Os; +import org.apache.cloudstack.veeam.api.dto.OvfXmlUtil; +import org.apache.cloudstack.veeam.api.dto.Ref; +import org.apache.cloudstack.veeam.api.dto.Tag; +import org.apache.cloudstack.veeam.api.dto.Topology; +import org.apache.cloudstack.veeam.api.dto.Vm; +import org.apache.commons.collections.MapUtils; +import org.apache.commons.lang3.StringUtils; + +import com.cloud.api.query.vo.HostJoinVO; +import com.cloud.api.query.vo.UserVmJoinVO; +import com.cloud.vm.VirtualMachine; + +public final class UserVmJoinVOToVmConverter { + + private UserVmJoinVOToVmConverter() { + } + + /** + * Convert CloudStack UserVmJoinVO -> oVirt-like Vm DTO. + * + */ + public static Vm toVm(final UserVmJoinVO src, + final Function hostResolver, + final Function> detailsResolver, + final Function> tagsResolver, + final Function> disksResolver, + final Function> nicsResolver, + final boolean allContent) { + if (src == null) { + return null; + } + final String basePath = VeeamControlService.ContextPath.value(); + final Vm dst = new Vm(); + + dst.setId(src.getUuid()); + dst.setName(StringUtils.firstNonBlank(src.getName(), src.getInstanceName())); + // CloudStack doesn't really have "description" for VM; displayName is closest + dst.setDescription(src.getDisplayName()); + dst.setHref(basePath + VmsRouteHandler.BASE_ROUTE + "/" + src.getUuid()); + dst.setStatus(mapStatus(src.getState())); + dst.setCreationTime(src.getCreated().getTime()); + final Date lastUpdated = src.getLastUpdated() != null ? src.getLastUpdated() : src.getCreated(); + if ("down".equals(dst.getStatus())) { + dst.setStopTime(lastUpdated.getTime()); + } + if ("up".equals(dst.getStatus())) { + dst.setStartTime(lastUpdated.getTime()); + } + final Ref template = buildRef( + basePath + ApiRouteHandler.BASE_ROUTE, + "templates", + src.getTemplateUuid() + ); + dst.setTemplate(template); + dst.setOriginalTemplate(template); + if (StringUtils.isNotBlank(src.getHostUuid())) { + dst.setHost(buildRef( + basePath + ApiRouteHandler.BASE_ROUTE, + "hosts", + src.getHostUuid())); + } + if (hostResolver != null) { + HostJoinVO hostVo = hostResolver.apply(src.getHostId() == null ? src.getLastHostId() : src.getHostId()); + if (hostVo != null) { + dst.setHost(buildRef( + basePath + ApiRouteHandler.BASE_ROUTE, + "hosts", + hostVo.getUuid())); + dst.setCluster(buildRef( + basePath + ApiRouteHandler.BASE_ROUTE, + "clusters", + hostVo.getClusterUuid())); + } + } + + String memory = String.valueOf(src.getRamSize() * 1024L * 1024L); + dst.setMemory(memory); + Vm.MemoryPolicy memoryPolicy = new Vm.MemoryPolicy(); + memoryPolicy.setGuaranteed(memory); + memoryPolicy.setMax(memory); + memoryPolicy.setBallooning("false"); + dst.setMemoryPolicy(memoryPolicy); + Cpu cpu = new Cpu(); + cpu.setArchitecture(src.getArch()); + cpu.setTopology(new Topology(src.getCpu(), 1, 1)); + dst.setCpu(cpu); + Os os = new Os(); + os.setType(src.getGuestOsDisplayName()); + Os.Boot boot = new Os.Boot(); + boot.setDevices(NamedList.of("device", List.of("hd"))); + os.setBoot(boot); + dst.setOs(os); + Vm.Bios bios = Vm.Bios.getDefault(); + Map details = null; + if (detailsResolver != null) { + details = detailsResolver.apply(src.getId()); + Vm.Bios.updateBios(bios, MapUtils.getString(details, ApiConstants.BootType.UEFI.toString())); + } + dst.setBios(bios); + dst.setType("desktop"); + dst.setOrigin("ovirt"); + dst.setStateless("false"); + + if (disksResolver != null) { + List diskAttachments = disksResolver.apply(src.getId()); + dst.setDiskAttachments(NamedList.of("disk_attachment", diskAttachments)); + } + + if (nicsResolver != null) { + List nics = nicsResolver.apply(src); + dst.setNics(NamedList.of("nic", nics)); + } + + dst.setActions(NamedList.of("link", List.of( + BaseDto.getActionLink("start", dst.getHref()), + BaseDto.getActionLink("stop", dst.getHref()), + BaseDto.getActionLink("shutdown", dst.getHref()) + ))); + dst.setLink(List.of( + BaseDto.getActionLink("diskattachments", dst.getHref()), + BaseDto.getActionLink("nics", dst.getHref()), + BaseDto.getActionLink("reporteddevices", dst.getHref()), + BaseDto.getActionLink("snapshots", dst.getHref()) + )); + if (tagsResolver != null) { + List tags = tagsResolver.apply(src.getId()); + dst.setTags(NamedList.of("tag", tags)); + } + dst.setCpuProfile(Ref.of( + basePath + ApiRouteHandler.BASE_ROUTE + "/cpuprofiles/" + src.getServiceOfferingUuid(), + src.getServiceOfferingUuid())); + if (allContent) { + dst.setInitialization(getOvfInitialization(dst, src)); + } + + dst.setAccountId(src.getAccountUuid()); + dst.setAffinityGroupId(src.getAffinityGroupUuid()); + dst.setUserDataId(src.getUserDataUuid()); + dst.setDetails(details); + + return dst; + } + + private static Vm.Initialization getOvfInitialization(Vm vm, UserVmJoinVO vo) { + final Vm.Initialization.Configuration configuration = new Vm.Initialization.Configuration(); + configuration.setType("ovf"); + configuration.setData(OvfXmlUtil.toXml(vm, vo)); + + final Vm.Initialization initialization = new Vm.Initialization(); + initialization.setConfiguration(configuration); + return initialization; + } + + public static List toVmList(final List srcList, + final Function hostResolver, + final Function> detailsResolver, + final Function> tagsResolver, + final Function> disksResolver, + final Function> nicsResolver, + final boolean allContent) { + return srcList.stream() + .map(v -> toVm(v, hostResolver, detailsResolver, tagsResolver, disksResolver, nicsResolver, allContent)) + .collect(Collectors.toList()); + } + + private static String mapStatus(final VirtualMachine.State state) { + if (Arrays.asList( + VirtualMachine.State.Running, + VirtualMachine.State.Migrating, + VirtualMachine.State.Restoring).contains(state)) { + return "up"; + } + return "down"; + } + + private static Ref buildRef(final String baseHref, final String suffix, final String id) { + if (StringUtils.isBlank(id)) { + return null; + } + return Ref.of((baseHref != null) ? (baseHref + "/" + suffix + "/" + id) : null, id); + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/UserVmVOToCheckpointConverter.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/UserVmVOToCheckpointConverter.java new file mode 100644 index 000000000000..7f64b6b7d4aa --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/UserVmVOToCheckpointConverter.java @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.converter; + +import java.time.Instant; + +import org.apache.cloudstack.veeam.api.dto.Checkpoint; +import org.apache.commons.lang3.StringUtils; + +import com.cloud.utils.NumbersUtil; + +public class UserVmVOToCheckpointConverter { + + public static Checkpoint toCheckpoint(String checkpointId, String createTimeStr) { + if (StringUtils.isEmpty(checkpointId)) { + return null; + } + Checkpoint checkpoint = new Checkpoint(); + checkpoint.setId(checkpointId); + checkpoint.setName(checkpointId); + long createTimeSeconds = createTimeStr != null ? NumbersUtil.parseLong(createTimeStr, 0L) : 0L; + if (createTimeSeconds > 0) { + checkpoint.setCreationDate(String.valueOf(Instant.ofEpochSecond(createTimeSeconds).toEpochMilli())); + } else { + checkpoint.setCreationDate(String.valueOf(System.currentTimeMillis())); + } + checkpoint.setState("created"); + return checkpoint; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/VmSnapshotVOToSnapshotConverter.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/VmSnapshotVOToSnapshotConverter.java new file mode 100644 index 000000000000..4dbc71505d79 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/VmSnapshotVOToSnapshotConverter.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.converter; + +import java.util.List; +import java.util.stream.Collectors; + +import org.apache.cloudstack.veeam.VeeamControlService; +import org.apache.cloudstack.veeam.api.VmsRouteHandler; +import org.apache.cloudstack.veeam.api.dto.BaseDto; +import org.apache.cloudstack.veeam.api.dto.NamedList; +import org.apache.cloudstack.veeam.api.dto.Snapshot; +import org.apache.cloudstack.veeam.api.dto.Vm; + +import com.cloud.vm.snapshot.VMSnapshot; +import com.cloud.vm.snapshot.VMSnapshotVO; + +public class VmSnapshotVOToSnapshotConverter { + public static Snapshot toSnapshot(final VMSnapshotVO vmSnapshotVO, String vmUuid) { + final String basePath = VeeamControlService.ContextPath.value(); + final Snapshot snapshot = new Snapshot(); + snapshot.setId(vmSnapshotVO.getUuid()); + snapshot.setHref(basePath + VmsRouteHandler.BASE_ROUTE + "/" + vmUuid + "/snapshots/" + vmSnapshotVO.getUuid()); + snapshot.setVm(Vm.of(basePath + VmsRouteHandler.BASE_ROUTE + "/" + vmUuid, vmUuid)); + snapshot.setDescription(vmSnapshotVO.getDescription()); + snapshot.setSnapshotType("active"); + snapshot.setDate(vmSnapshotVO.getCreated().getTime()); + snapshot.setPersistMemorystate(String.valueOf(VMSnapshotVO.Type.DiskAndMemory.equals(vmSnapshotVO.getType()))); + snapshot.setSnapshotStatus(VMSnapshot.State.Ready.equals(vmSnapshotVO.getState()) ? "ok" : "locked"); + snapshot.setActions(NamedList.of("link", List.of(BaseDto.getActionLink("restore", snapshot.getHref())))); + return snapshot; + } + + public static List toSnapshotList(final List vmSnapshotVOList, final String vmUuid) { + return vmSnapshotVOList.stream() + .map(v -> toSnapshot(v, vmUuid)) + .collect(Collectors.toList()); + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/VolumeJoinVOToDiskConverter.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/VolumeJoinVOToDiskConverter.java new file mode 100644 index 000000000000..af92e7a10f22 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/converter/VolumeJoinVOToDiskConverter.java @@ -0,0 +1,196 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.converter; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; +import java.util.stream.Collectors; + +import org.apache.cloudstack.backup.Backup; +import org.apache.cloudstack.veeam.VeeamControlService; +import org.apache.cloudstack.veeam.api.ApiRouteHandler; +import org.apache.cloudstack.veeam.api.DisksRouteHandler; +import org.apache.cloudstack.veeam.api.VmsRouteHandler; +import org.apache.cloudstack.veeam.api.dto.Disk; +import org.apache.cloudstack.veeam.api.dto.DiskAttachment; +import org.apache.cloudstack.veeam.api.dto.Link; +import org.apache.cloudstack.veeam.api.dto.NamedList; +import org.apache.cloudstack.veeam.api.dto.Ref; +import org.apache.cloudstack.veeam.api.dto.StorageDomain; +import org.apache.cloudstack.veeam.api.dto.Vm; + +import com.cloud.api.query.vo.VolumeJoinVO; +import com.cloud.storage.Storage; +import com.cloud.storage.Volume; + +public class VolumeJoinVOToDiskConverter { + public static Disk toDisk(final VolumeJoinVO vol, final Function physicalSizeResolver) { + final Disk disk = new Disk(); + final String basePath = VeeamControlService.ContextPath.value(); + final String apiBasePath = basePath + ApiRouteHandler.BASE_ROUTE; + final String diskId = vol.getUuid(); + final String diskHref = basePath + DisksRouteHandler.BASE_ROUTE + "/" + diskId; + + disk.setId(diskId); + disk.setHref(diskHref); + disk.setBootable(String.valueOf(Volume.Type.ROOT.equals(vol.getVolumeType()))); + + // Names + disk.setName(vol.getName()); + disk.setAlias(vol.getName()); + disk.setDescription(vol.getName()); + + // Sizes (bytes) + final long size = vol.getSize(); + final long actualSize = vol.getVolumeStoreSize(); + + disk.setProvisionedSize(String.valueOf(size)); + disk.setActualSize(String.valueOf(actualSize)); + disk.setTotalSize(String.valueOf(size)); + Long physicalSize = null; + if (physicalSizeResolver != null) { + physicalSize = physicalSizeResolver.apply(vol); + } + if (physicalSize != null) { + disk.setActualSize(String.valueOf(physicalSize)); + } + + // Disk format + disk.setFormat(mapFormat(vol.getFormat())); + disk.setQcowVersion("qcow2_v3"); + + // Content & storage + disk.setContentType("data"); + disk.setStorageType("image"); + disk.setSparse("true"); + disk.setShareable("false"); + + // Status + disk.setStatus(mapStatus(vol.getState())); + + // Backup-related flags (safe defaults) + disk.setBackup("none"); + disk.setPropagateErrors("false"); + disk.setWipeAfterDelete("false"); + + // Image ID (best-effort) + disk.setImageId(vol.getPath()); // acceptable placeholder + + // Disk profile (optional) + disk.setDiskProfile(Ref.of( + apiBasePath + "/diskprofiles/" + vol.getDiskOfferingUuid(), + String.valueOf(vol.getDiskOfferingUuid()) + )); + + // Storage domains + if (vol.getPoolUuid() != null) { + StorageDomain sd = new StorageDomain(); + sd.setHref(apiBasePath + "/storagedomains/" + vol.getPoolUuid()); + sd.setId(vol.getPoolUuid()); + disk.setStorageDomains(NamedList.of("storage_domain", List.of(sd))); + } + + // Links + disk.setLink(List.of( + Link.of("disksnapshots", diskHref + "/disksnapshots") + )); + + return disk; + } + + public static List toDiskList(final List srcList, + final Function physicalSizeResolver) { + return srcList.stream() + .map(vo -> toDisk(vo, physicalSizeResolver)) + .collect(Collectors.toList()); + } + + public static List toDiskListFromVolumeInfos(final List volumeInfos) { + List disks = new ArrayList<>(); + for (Backup.VolumeInfo volumeInfo : volumeInfos) { + Disk disk = new Disk(); + disk.setId(volumeInfo.getUuid()); + disk.setName(volumeInfo.getUuid()); + disk.setProvisionedSize(String.valueOf(volumeInfo.getSize())); + disk.setActualSize(String.valueOf(volumeInfo.getSize())); + disk.setTotalSize(String.valueOf(volumeInfo.getSize())); + disk.setBootable(String.valueOf(Volume.Type.ROOT.equals(volumeInfo.getType()))); + disks.add(disk); + } + return disks; + } + + public static DiskAttachment toDiskAttachment(final VolumeJoinVO vol, + final Function physicalSizeResolver) { + final DiskAttachment da = new DiskAttachment(); + final String basePath = VeeamControlService.ContextPath.value(); + + final String diskAttachmentId = vol.getUuid(); + da.setVm(Vm.of(basePath + VmsRouteHandler.BASE_ROUTE + "/" + vol.getVmUuid(), vol.getVmUuid())); + + da.setId(diskAttachmentId); + da.setHref(da.getVm().getHref() + "/diskattachments/" + diskAttachmentId);; + + // Links + da.setDisk(toDisk(vol, physicalSizeResolver)); + + // Properties + da.setActive("true"); + da.setBootable(String.valueOf(Volume.Type.ROOT.equals(vol.getVolumeType()))); + da.setIface("virtio_scsi"); + da.setLogicalName(vol.getName()); + da.setReadOnly("false"); + da.setPassDiscard("false"); + + return da; + } + + public static List toDiskAttachmentList(final List srcList, + final Function physicalSizeResolver) { + return srcList.stream() + .map(vo -> toDiskAttachment(vo, physicalSizeResolver)) + .collect(Collectors.toList()); + } + + private static String mapFormat(final Storage.ImageFormat format) { + if (format == null) { + return "cow"; + } + switch (format) { + case RAW: + return "raw"; + case QCOW2: + default: + return "cow"; + } + } + + private static String mapStatus(final Volume.State state) { + if (state == null) { + return "ok"; + } + switch (state) { + case Ready: + case Allocated: + return "ok"; + default: + return "locked"; + } + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Api.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Api.java new file mode 100644 index 000000000000..93ae93b26d72 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Api.java @@ -0,0 +1,104 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper; + +/** + * Root response for GET /ovirt-engine/api + */ +@JsonInclude(JsonInclude.Include.NON_NULL) +public final class Api { + + @JacksonXmlElementWrapper(useWrapping = false) + private List link; + private EmptyElement engineBackup; + private ProductInfo productInfo; + private SpecialObjects specialObjects; + private ApiSummary summary; + private Long time; + private Ref authenticatedUser; + private Ref effectiveUser; + + public List getLink() { + return link; + } + + public void setLink(List link) { + this.link = link; + } + + public EmptyElement getEngineBackup() { + return engineBackup; + } + + public void setEngineBackup(EmptyElement engineBackup) { + this.engineBackup = engineBackup; + } + + public ProductInfo getProductInfo() { + return productInfo; + } + + public void setProductInfo(ProductInfo productInfo) { + this.productInfo = productInfo; + } + + public SpecialObjects getSpecialObjects() { + return specialObjects; + } + + public void setSpecialObjects(SpecialObjects specialObjects) { + this.specialObjects = specialObjects; + } + + public ApiSummary getSummary() { + return summary; + } + + public void setSummary(ApiSummary summary) { + this.summary = summary; + } + + public Long getTime() { + return time; + } + + public void setTime(Long time) { + this.time = time; + } + + public Ref getAuthenticatedUser() { + return authenticatedUser; + } + + public void setAuthenticatedUser(Ref authenticatedUser) { + this.authenticatedUser = authenticatedUser; + } + + public Ref getEffectiveUser() { + return effectiveUser; + } + + public void setEffectiveUser(Ref effectiveUser) { + this.effectiveUser = effectiveUser; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/ApiSummary.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/ApiSummary.java new file mode 100644 index 000000000000..a81c2a1d2745 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/ApiSummary.java @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public final class ApiSummary { + + private SummaryCount hosts; + private SummaryCount storageDomains; + private SummaryCount users; + private SummaryCount vms; + + public SummaryCount getHosts() { + return hosts; + } + + public void setHosts(SummaryCount hosts) { + this.hosts = hosts; + } + + public SummaryCount getStorageDomains() { + return storageDomains; + } + + public void setStorageDomains(SummaryCount storageDomains) { + this.storageDomains = storageDomains; + } + + public SummaryCount getUsers() { + return users; + } + + public void setUsers(SummaryCount users) { + this.users = users; + } + + public SummaryCount getVms() { + return vms; + } + + public void setVms(SummaryCount vms) { + this.vms = vms; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Backup.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Backup.java new file mode 100644 index 000000000000..b337541bf5ca --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Backup.java @@ -0,0 +1,103 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +public class Backup extends BaseDto { + + private String name; + private String description; + private Long creationDate; + private Vm vm; + private Host host; + private String phase; + private String fromCheckpointId; + private String toCheckpointId; + private NamedList disks; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public Long getCreationDate() { + return creationDate; + } + + public void setCreationDate(Long creationDate) { + this.creationDate = creationDate; + } + + public Vm getVm() { + return vm; + } + + public void setVm(Vm vm) { + this.vm = vm; + } + + public Host getHost() { + return host; + } + + public void setHost(Host host) { + this.host = host; + } + + public String getPhase() { + return phase; + } + + public void setPhase(String phase) { + this.phase = phase; + } + + public String getFromCheckpointId() { + return fromCheckpointId; + } + + public void setFromCheckpointId(String fromCheckpointId) { + this.fromCheckpointId = fromCheckpointId; + } + + public String getToCheckpointId() { + return toCheckpointId; + } + + public void setToCheckpointId(String toCheckpointId) { + this.toCheckpointId = toCheckpointId; + } + + public NamedList getDisks() { + return disks; + } + + public void setDisks(NamedList disks) { + this.disks = disks; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/BaseDto.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/BaseDto.java new file mode 100644 index 000000000000..0b260a5cdcd7 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/BaseDto.java @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public class BaseDto { + + public static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000"; + + private String href; + private String id; + + public String getHref() { + return href; + } + + public void setHref(String href) { + this.href = href; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public static Link getActionLink(final String action, final String baseHref) { + return Link.of(action, baseHref + "/" + action); + } + + protected static T withHrefAndId(T dto, String href, String id) { + dto.setHref(href); + dto.setId(id); + return dto; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Certificate.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Certificate.java new file mode 100644 index 000000000000..12e99159bfc9 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Certificate.java @@ -0,0 +1,42 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public class Certificate { + private String organization; + private String subject; + + public String getOrganization() { + return organization; + } + + public void setOrganization(String organization) { + this.organization = organization; + } + + public String getSubject() { + return subject; + } + + public void setSubject(String subject) { + this.subject = subject; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Checkpoint.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Checkpoint.java new file mode 100644 index 000000000000..763875535904 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Checkpoint.java @@ -0,0 +1,76 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +public class Checkpoint extends BaseDto { + + private String name; + private String description; + private String creationDate; + private Vm vm; + private String state; + private String parentId; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public String getCreationDate() { + return creationDate; + } + + public void setCreationDate(String creationDate) { + this.creationDate = creationDate; + } + + public Vm getVm() { + return vm; + } + + public void setVm(Vm vm) { + this.vm = vm; + } + + public String getState() { + return state; + } + + public void setState(String state) { + this.state = state; + } + + public String getParentId() { + return parentId; + } + + public void setParentId(String parentId) { + this.parentId = parentId; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Cluster.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Cluster.java new file mode 100644 index 000000000000..db0cd8be6eab --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Cluster.java @@ -0,0 +1,408 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +@JsonInclude(JsonInclude.Include.NON_NULL) +@JacksonXmlRootElement(localName = "cluster") +public final class Cluster extends BaseDto { + + private String name; + private String description; + private String comment; + private String ballooningEnabled; + private String biosType; + private Cpu cpu; + private CustomSchedulingPolicyProperties customSchedulingPolicyProperties; + private ErrorHandling errorHandling; + private FencingPolicy fencingPolicy; + private String fipsMode; // "disabled" + private String firewallType; // "firewalld" + private String glusterService; + private String haReservation; + private Ksm ksm; + private String logMaxMemoryUsedThreshold; + private String logMaxMemoryUsedThresholdType; + private MemoryPolicy memoryPolicy; + private Migration migration; + private RequiredRngSources requiredRngSources; + private String switchType; + private String threadsAsCores; + private String trustedService; + private String tunnelMigration; + private String upgradeInProgress; + private String upgradePercentComplete; + private Version version; + private String virtService; + private String vncEncryption; + private Ref dataCenter; + private Ref macPool; + private Ref schedulingPolicy; + private NamedList actions; + @JacksonXmlElementWrapper(useWrapping = false) + private List link; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public String getComment() { + return comment; + } + + public void setComment(String comment) { + this.comment = comment; + } + + public String getBallooningEnabled() { + return ballooningEnabled; + } + + public void setBallooningEnabled(String ballooningEnabled) { + this.ballooningEnabled = ballooningEnabled; + } + + public String getBiosType() { + return biosType; + } + + public void setBiosType(String biosType) { + this.biosType = biosType; + } + + public Cpu getCpu() { + return cpu; + } + + public void setCpu(Cpu cpu) { + this.cpu = cpu; + } + + public CustomSchedulingPolicyProperties getCustomSchedulingPolicyProperties() { + return customSchedulingPolicyProperties; + } + + public void setCustomSchedulingPolicyProperties(CustomSchedulingPolicyProperties customSchedulingPolicyProperties) { + this.customSchedulingPolicyProperties = customSchedulingPolicyProperties; + } + + public ErrorHandling getErrorHandling() { + return errorHandling; + } + + public void setErrorHandling(ErrorHandling errorHandling) { + this.errorHandling = errorHandling; + } + + public FencingPolicy getFencingPolicy() { + return fencingPolicy; + } + + public void setFencingPolicy(FencingPolicy fencingPolicy) { + this.fencingPolicy = fencingPolicy; + } + + public String getFipsMode() { + return fipsMode; + } + + public void setFipsMode(String fipsMode) { + this.fipsMode = fipsMode; + } + + public String getFirewallType() { + return firewallType; + } + + public void setFirewallType(String firewallType) { + this.firewallType = firewallType; + } + + public String getGlusterService() { + return glusterService; + } + + public void setGlusterService(String glusterService) { + this.glusterService = glusterService; + } + + public String getHaReservation() { + return haReservation; + } + + public void setHaReservation(String haReservation) { + this.haReservation = haReservation; + } + + public Ksm getKsm() { + return ksm; + } + + public void setKsm(Ksm ksm) { + this.ksm = ksm; + } + + public String getLogMaxMemoryUsedThreshold() { + return logMaxMemoryUsedThreshold; + } + + public void setLogMaxMemoryUsedThreshold(String logMaxMemoryUsedThreshold) { + this.logMaxMemoryUsedThreshold = logMaxMemoryUsedThreshold; + } + + public String getLogMaxMemoryUsedThresholdType() { + return logMaxMemoryUsedThresholdType; + } + + public void setLogMaxMemoryUsedThresholdType(String logMaxMemoryUsedThresholdType) { + this.logMaxMemoryUsedThresholdType = logMaxMemoryUsedThresholdType; + } + + public MemoryPolicy getMemoryPolicy() { + return memoryPolicy; + } + + public void setMemoryPolicy(MemoryPolicy memoryPolicy) { + this.memoryPolicy = memoryPolicy; + } + + public Migration getMigration() { + return migration; + } + + public void setMigration(Migration migration) { + this.migration = migration; + } + + public RequiredRngSources getRequiredRngSources() { + return requiredRngSources; + } + + public void setRequiredRngSources(RequiredRngSources requiredRngSources) { + this.requiredRngSources = requiredRngSources; + } + + public String getSwitchType() { + return switchType; + } + + public void setSwitchType(String switchType) { + this.switchType = switchType; + } + + public String getThreadsAsCores() { + return threadsAsCores; + } + + public void setThreadsAsCores(String threadsAsCores) { + this.threadsAsCores = threadsAsCores; + } + + public String getTrustedService() { + return trustedService; + } + + public void setTrustedService(String trustedService) { + this.trustedService = trustedService; + } + + public String getTunnelMigration() { + return tunnelMigration; + } + + public void setTunnelMigration(String tunnelMigration) { + this.tunnelMigration = tunnelMigration; + } + + public String getUpgradeInProgress() { + return upgradeInProgress; + } + + public void setUpgradeInProgress(String upgradeInProgress) { + this.upgradeInProgress = upgradeInProgress; + } + + public String getUpgradePercentComplete() { + return upgradePercentComplete; + } + + public void setUpgradePercentComplete(String upgradePercentComplete) { + this.upgradePercentComplete = upgradePercentComplete; + } + + public Version getVersion() { + return version; + } + + public void setVersion(Version version) { + this.version = version; + } + + public String getVirtService() { + return virtService; + } + + public void setVirtService(String virtService) { + this.virtService = virtService; + } + + public String getVncEncryption() { + return vncEncryption; + } + + public void setVncEncryption(String vncEncryption) { + this.vncEncryption = vncEncryption; + } + + public Ref getDataCenter() { + return dataCenter; + } + + public void setDataCenter(Ref dataCenter) { + this.dataCenter = dataCenter; + } + + public Ref getMacPool() { + return macPool; + } + + public void setMacPool(Ref macPool) { + this.macPool = macPool; + } + + public Ref getSchedulingPolicy() { + return schedulingPolicy; + } + + public void setSchedulingPolicy(Ref schedulingPolicy) { + this.schedulingPolicy = schedulingPolicy; + } + + public NamedList getActions() { + return actions; + } + + public void setActions(NamedList actions) { + this.actions = actions; + } + + public List getLink() { + return link; + } + + public void setLink(List link) { + this.link = link; + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + public static final class CustomSchedulingPolicyProperties { + @JacksonXmlElementWrapper(useWrapping = false) + public List property; + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + public static final class Property { + public String name; + public String value; + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + public static final class ErrorHandling { + public String onError; // "migrate" + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + public static final class FencingPolicy { + public String enabled; + public SkipIfConnectivityBroken skipIfConnectivityBroken; + public String skipIfGlusterBricksUp; + public String skipIfGlusterQuorumNotMet; + public SkipIfSdActive skipIfSdActive; + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + public static final class SkipIfConnectivityBroken { + public String enabled; + public String threshold; + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + public static final class SkipIfSdActive { + public String enabled; + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + public static final class Ksm { + public String enabled; + public String mergeAcrossNodes; + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + public static final class MemoryPolicy { + public OverCommit overCommit; + public TransparentHugepages transparentHugepages; + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + public static final class OverCommit { + public String percent; + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + public static final class TransparentHugepages { + public String enabled; + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + public static final class Migration { + public String autoConverge; + public Bandwidth bandwidth; + public String compressed; + public String encrypted; + public String parallelMigrationsPolicy; + public Ref policy; + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + public static final class Bandwidth { + public String assignmentMethod; + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + public static final class RequiredRngSources { + @JacksonXmlElementWrapper(useWrapping = false) + public List requiredRngSource; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Cpu.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Cpu.java new file mode 100644 index 000000000000..3dce4931c848 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Cpu.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public final class Cpu { + private String name; + private String speed; + private String architecture; + private String type; + private Topology topology; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getSpeed() { + return speed; + } + + public void setSpeed(String speed) { + this.speed = speed; + } + + public String getArchitecture() { + return architecture; + } + + public void setArchitecture(String architecture) { + this.architecture = architecture; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public Topology getTopology() { + return topology; + } + + public void setTopology(Topology topology) { + this.topology = topology; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/DataCenter.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/DataCenter.java new file mode 100644 index 000000000000..52f6a6c279f1 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/DataCenter.java @@ -0,0 +1,129 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +@JsonInclude(JsonInclude.Include.NON_NULL) +@JacksonXmlRootElement(localName = "data_center") +public final class DataCenter extends BaseDto { + private String local; + private String quotaMode; + private String status; + private String storageFormat; + private SupportedVersions supportedVersions; + private Version version; + private Ref macPool; + private NamedList actions; + private String name; + private String description; + @JacksonXmlElementWrapper(useWrapping = false) + public List link; + + public String getLocal() { + return local; + } + + public void setLocal(String local) { + this.local = local; + } + + public String getQuotaMode() { + return quotaMode; + } + + public void setQuotaMode(String quotaMode) { + this.quotaMode = quotaMode; + } + + public String getStatus() { + return status; + } + + public void setStatus(String status) { + this.status = status; + } + + public String getStorageFormat() { + return storageFormat; + } + + public void setStorageFormat(String storageFormat) { + this.storageFormat = storageFormat; + } + + public SupportedVersions getSupportedVersions() { + return supportedVersions; + } + + public void setSupportedVersions(SupportedVersions supportedVersions) { + this.supportedVersions = supportedVersions; + } + + public Version getVersion() { + return version; + } + + public void setVersion(Version version) { + this.version = version; + } + + public Ref getMacPool() { + return macPool; + } + + public void setMacPool(Ref macPool) { + this.macPool = macPool; + } + + public NamedList getActions() { + return actions; + } + + public void setActions(NamedList actions) { + this.actions = actions; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public List getLink() { + return link; + } + + public void setLink(List link) { + this.link = link; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Disk.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Disk.java new file mode 100644 index 000000000000..c9a19794c189 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Disk.java @@ -0,0 +1,247 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +@JsonInclude(JsonInclude.Include.NON_NULL) +@JacksonXmlRootElement(localName = "disk") +public final class Disk extends BaseDto { + + private String bootable; + private String actualSize; + private String alias; + private String backup; + private String contentType; + private String format; + private String imageId; + private String propagateErrors; + private String initialSize; + private String provisionedSize; + private String qcowVersion; + private String shareable; + private String sparse; + private String status; + private String storageType; + private String totalSize; + private String wipeAfterDelete; + private Ref diskProfile; + private Ref quota; + private NamedList storageDomains; + private NamedList actions; + private String name; + private String description; + @JacksonXmlElementWrapper(useWrapping = false) + private List link; + + public String getBootable() { + return bootable; + } + + public void setBootable(String bootable) { + this.bootable = bootable; + } + + public String getActualSize() { + return actualSize; + } + + public void setActualSize(String actualSize) { + this.actualSize = actualSize; + } + + public String getAlias() { + return alias; + } + + public void setAlias(String alias) { + this.alias = alias; + } + + public String getBackup() { + return backup; + } + + public void setBackup(String backup) { + this.backup = backup; + } + + public String getContentType() { + return contentType; + } + + public void setContentType(String contentType) { + this.contentType = contentType; + } + + public String getFormat() { + return format; + } + + public void setFormat(String format) { + this.format = format; + } + + public String getImageId() { + return imageId; + } + + public void setImageId(String imageId) { + this.imageId = imageId; + } + + public String getPropagateErrors() { + return propagateErrors; + } + + public void setPropagateErrors(String propagateErrors) { + this.propagateErrors = propagateErrors; + } + + public String getInitialSize() { + return initialSize; + } + + public void setInitialSize(String initialSize) { + this.initialSize = initialSize; + } + + public String getProvisionedSize() { + return provisionedSize; + } + + public void setProvisionedSize(String provisionedSize) { + this.provisionedSize = provisionedSize; + } + + public String getQcowVersion() { + return qcowVersion; + } + + public void setQcowVersion(String qcowVersion) { + this.qcowVersion = qcowVersion; + } + + public String getShareable() { + return shareable; + } + + public void setShareable(String shareable) { + this.shareable = shareable; + } + + public String getSparse() { + return sparse; + } + + public void setSparse(String sparse) { + this.sparse = sparse; + } + + public String getStatus() { + return status; + } + + public void setStatus(String status) { + this.status = status; + } + + public String getStorageType() { + return storageType; + } + + public void setStorageType(String storageType) { + this.storageType = storageType; + } + + public String getTotalSize() { + return totalSize; + } + + public void setTotalSize(String totalSize) { + this.totalSize = totalSize; + } + + public String getWipeAfterDelete() { + return wipeAfterDelete; + } + + public void setWipeAfterDelete(String wipeAfterDelete) { + this.wipeAfterDelete = wipeAfterDelete; + } + + public Ref getDiskProfile() { + return diskProfile; + } + + public void setDiskProfile(Ref diskProfile) { + this.diskProfile = diskProfile; + } + + public Ref getQuota() { + return quota; + } + + public void setQuota(Ref quota) { + this.quota = quota; + } + + public NamedList getStorageDomains() { + return storageDomains; + } + + public void setStorageDomains(NamedList storageDomains) { + this.storageDomains = storageDomains; + } + + public NamedList getActions() { + return actions; + } + + public void setActions(NamedList actions) { + this.actions = actions; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public List getLink() { + return link; + } + + public void setLink(List link) { + this.link = link; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/DiskAttachment.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/DiskAttachment.java new file mode 100644 index 000000000000..f22168342e35 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/DiskAttachment.java @@ -0,0 +1,111 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public final class DiskAttachment extends BaseDto { + + private String active; + private String bootable; + @JsonProperty("interface") + private String iface; // virtio_scsi etc + private String logicalName; + private String passDiscard; + private String readOnly; + private String usesScsiReservation; + private Disk disk; + private Vm vm; + + public DiskAttachment() { + } + + public String getActive() { + return active; + } + + public void setActive(String active) { + this.active = active; + } + + public String getBootable() { + return bootable; + } + + public void setBootable(String bootable) { + this.bootable = bootable; + } + + public String getIface() { + return iface; + } + + public void setIface(String iface) { + this.iface = iface; + } + + public String getLogicalName() { + return logicalName; + } + + public void setLogicalName(String logicalName) { + this.logicalName = logicalName; + } + + public String getPassDiscard() { + return passDiscard; + } + + public void setPassDiscard(String passDiscard) { + this.passDiscard = passDiscard; + } + + public String getReadOnly() { + return readOnly; + } + + public void setReadOnly(String readOnly) { + this.readOnly = readOnly; + } + + public String getUsesScsiReservation() { + return usesScsiReservation; + } + + public void setUsesScsiReservation(String usesScsiReservation) { + this.usesScsiReservation = usesScsiReservation; + } + + public Disk getDisk() { + return disk; + } + + public void setDisk(Disk disk) { + this.disk = disk; + } + + public Vm getVm() { + return vm; + } + + public void setVm(Vm vm) { + this.vm = vm; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/EmptyElement.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/EmptyElement.java new file mode 100644 index 000000000000..3c4111c55a31 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/EmptyElement.java @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import com.fasterxml.jackson.databind.annotation.JsonSerialize; + +@JsonSerialize(using = EmptyElementSerializer.class) +public final class EmptyElement { + public EmptyElement() { + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/EmptyElementSerializer.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/EmptyElementSerializer.java new file mode 100644 index 000000000000..9a877d5e4b25 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/EmptyElementSerializer.java @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.JsonSerializer; +import com.fasterxml.jackson.databind.SerializerProvider; + +import java.io.IOException; + +/** + * Serializes as an "empty object". + * With Jackson XML this becomes an empty element: . + */ +public final class EmptyElementSerializer extends JsonSerializer { + @Override + public void serialize(EmptyElement value, JsonGenerator gen, SerializerProvider serializers) throws IOException { + gen.writeStartObject(); + gen.writeEndObject(); + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Fault.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Fault.java new file mode 100644 index 000000000000..20989d8cbd70 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Fault.java @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public final class Fault { + private String reason; // "Not Found", "Bad Request", "Unauthorized" + private String detail; // full message + + public Fault(final String reason, final String detail) { + this.reason = reason; + this.detail = detail; + } + + public String getReason() { + return reason; + } + + public String getDetail() { + return detail; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Host.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Host.java new file mode 100644 index 000000000000..73efba5eeb89 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Host.java @@ -0,0 +1,315 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public class Host extends BaseDto { + + private String address; + private String autoNumaStatus; + private Certificate certificate; + private Cpu cpu; + private String externalStatus; + private HardwareInformation hardwareInformation; + private String kdumpStatus; + private Version libvirtVersion; + private String maxSchedulingMemory; + private String memory; + private String numaSupported; + private Os os; + private String port; + private String protocol; + private String reinstallationRequired; + private String status; + private ApiSummary summary; + private String type; + private String updateAvailable; + private Version version; + private String vgpuPlacement; + private Ref cluster; + private NamedList actions; + private String name; + private String comment; + private List link; + + // getters/setters (generate via IDE) + public String getAddress() { + return address; + } + + public void setAddress(String address) { + this.address = address; + } + + public String getAutoNumaStatus() { + return autoNumaStatus; + } + + public void setAutoNumaStatus(String autoNumaStatus) { + this.autoNumaStatus = autoNumaStatus; + } + + public Certificate getCertificate() { + return certificate; + } + + public void setCertificate(Certificate certificate) { + this.certificate = certificate; + } + + public Cpu getCpu() { + return cpu; + } + + public void setCpu(Cpu cpu) { + this.cpu = cpu; + } + + public String getExternalStatus() { + return externalStatus; + } + + public void setExternalStatus(String externalStatus) { + this.externalStatus = externalStatus; + } + + public HardwareInformation getHardwareInformation() { + return hardwareInformation; + } + + public void setHardwareInformation(HardwareInformation hardwareInformation) { + this.hardwareInformation = hardwareInformation; + } + + public String getKdumpStatus() { + return kdumpStatus; + } + + public void setKdumpStatus(String kdumpStatus) { + this.kdumpStatus = kdumpStatus; + } + + public Version getLibvirtVersion() { + return libvirtVersion; + } + + public void setLibvirtVersion(Version libvirtVersion) { + this.libvirtVersion = libvirtVersion; + } + + public String getMaxSchedulingMemory() { + return maxSchedulingMemory; + } + + public void setMaxSchedulingMemory(String maxSchedulingMemory) { + this.maxSchedulingMemory = maxSchedulingMemory; + } + + public String getMemory() { + return memory; + } + + public void setMemory(String memory) { + this.memory = memory; + } + + public String getNumaSupported() { + return numaSupported; + } + + public void setNumaSupported(String numaSupported) { + this.numaSupported = numaSupported; + } + + public Os getOs() { + return os; + } + + public void setOs(Os os) { + this.os = os; + } + + public String getPort() { + return port; + } + + public void setPort(String port) { + this.port = port; + } + + public String getProtocol() { + return protocol; + } + + public void setProtocol(String protocol) { + this.protocol = protocol; + } + + public String getReinstallationRequired() { + return reinstallationRequired; + } + + public void setReinstallationRequired(String reinstallationRequired) { + this.reinstallationRequired = reinstallationRequired; + } + + public String getStatus() { + return status; + } + + public void setStatus(String status) { + this.status = status; + } + + public ApiSummary getSummary() { + return summary; + } + + public void setSummary(ApiSummary summary) { + this.summary = summary; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public String getUpdateAvailable() { + return updateAvailable; + } + + public void setUpdateAvailable(String updateAvailable) { + this.updateAvailable = updateAvailable; + } + + public Version getVersion() { + return version; + } + + public void setVersion(Version version) { + this.version = version; + } + + public String getVgpuPlacement() { + return vgpuPlacement; + } + + public void setVgpuPlacement(String vgpuPlacement) { + this.vgpuPlacement = vgpuPlacement; + } + + public Ref getCluster() { + return cluster; + } + + public void setCluster(Ref cluster) { + this.cluster = cluster; + } + + public NamedList getActions() { + return actions; + } + + public void setActions(NamedList actions) { + this.actions = actions; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getComment() { + return comment; + } + + public void setComment(String comment) { + this.comment = comment; + } + + public List getLink() { + return link; + } + + public void setLink(List link) { + this.link = link; + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + public static class HardwareInformation { + private String manufacturer; + private String productName; + private String serialNumber; + private String uuid; + private String version; + + public String getManufacturer() { + return manufacturer; + } + + public void setManufacturer(String manufacturer) { + this.manufacturer = manufacturer; + } + + public String getProductName() { + return productName; + } + + public void setProductName(String productName) { + this.productName = productName; + } + + public String getSerialNumber() { + return serialNumber; + } + + public void setSerialNumber(String serialNumber) { + this.serialNumber = serialNumber; + } + + public String getUuid() { + return uuid; + } + + public void setUuid(String uuid) { + this.uuid = uuid; + } + + public String getVersion() { + return version; + } + + public void setVersion(String version) { + this.version = version; + } + } + + public static Host of(String href, String id) { + return withHrefAndId(new Host(), href, id); + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/ImageTransfer.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/ImageTransfer.java new file mode 100644 index 000000000000..b0a26daa1049 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/ImageTransfer.java @@ -0,0 +1,167 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public class ImageTransfer extends BaseDto { + + private String active; + private String direction; + private String format; + private String inactivityTimeout; + private String phase; + private String proxyUrl; + private String shallow; + private String timeoutPolicy; + private String transferUrl; + private String transferred; + private Backup backup; + private Ref host; + private Ref image; + private Ref disk; + private NamedList actions; + + @JacksonXmlElementWrapper(useWrapping = false) + public List link; + + public String getActive() { + return active; + } + + public void setActive(String active) { + this.active = active; + } + + public String getDirection() { + return direction; + } + + public void setDirection(String direction) { + this.direction = direction; + } + + public String getFormat() { + return format; + } + + public void setFormat(String format) { + this.format = format; + } + + public String getInactivityTimeout() { + return inactivityTimeout; + } + + public void setInactivityTimeout(String inactivityTimeout) { + this.inactivityTimeout = inactivityTimeout; + } + + public String getPhase() { + return phase; + } + + public void setPhase(String phase) { + this.phase = phase; + } + + public String getProxyUrl() { + return proxyUrl; + } + + public void setProxyUrl(String proxyUrl) { + this.proxyUrl = proxyUrl; + } + + public String getShallow() { + return shallow; + } + + public void setShallow(String shallow) { + this.shallow = shallow; + } + + public String getTimeoutPolicy() { + return timeoutPolicy; + } + + public void setTimeoutPolicy(String timeoutPolicy) { + this.timeoutPolicy = timeoutPolicy; + } + + public String getTransferUrl() { + return transferUrl; + } + + public void setTransferUrl(String transferUrl) { + this.transferUrl = transferUrl; + } + + public String getTransferred() { + return transferred; + } + + public void setTransferred(String transferred) { + this.transferred = transferred; + } + + public Backup getBackup() { + return backup; + } + + public void setBackup(Backup backup) { + this.backup = backup; + } + + public Ref getHost() { + return host; + } + + public void setHost(Ref host) { + this.host = host; + } + + public Ref getImage() { + return image; + } + + public void setImage(Ref image) { + this.image = image; + } + + public Ref getDisk() { + return disk; + } + + public void setDisk(Ref disk) { + this.disk = disk; + } + + public NamedList getActions() { + return actions; + } + + public void setActions(NamedList actions) { + this.actions = actions; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Ip.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Ip.java new file mode 100644 index 000000000000..7afbc0710ffb --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Ip.java @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public class Ip { + + private String address; + private String gateway; + private String netmask; + private String version; + + public String getAddress() { + return address; + } + + public void setAddress(String address) { + this.address = address; + } + + public String getGateway() { + return gateway; + } + + public void setGateway(String gateway) { + this.gateway = gateway; + } + + public String getNetmask() { + return netmask; + } + + public void setNetmask(String netmask) { + this.netmask = netmask; + } + + public String getVersion() { + return version; + } + + public void setVersion(String version) { + this.version = version; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Job.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Job.java new file mode 100644 index 000000000000..13b0e8a02fd1 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Job.java @@ -0,0 +1,117 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public class Job extends BaseDto { + private String autoCleared; + private String external; + private Long lastUpdated; + private Long startTime; + private Long endTime; + private String status; + private Ref owner; + private NamedList actions; + private String description; + private List link; + + // getters and setters + public String getAutoCleared() { + return autoCleared; + } + + public void setAutoCleared(String autoCleared) { + this.autoCleared = autoCleared; + } + + public String getExternal() { + return external; + } + + public void setExternal(String external) { + this.external = external; + } + + public Long getLastUpdated() { + return lastUpdated; + } + + public void setLastUpdated(Long lastUpdated) { + this.lastUpdated = lastUpdated; + } + + public Long getStartTime() { + return startTime; + } + + public void setStartTime(Long startTime) { + this.startTime = startTime; + } + + public Long getEndTime() { + return endTime; + } + + public void setEndTime(Long endTime) { + this.endTime = endTime; + } + + public String getStatus() { + return status; + } + + public void setStatus(String status) { + this.status = status; + } + + public Ref getOwner() { + return owner; + } + + public void setOwner(Ref owner) { + this.owner = owner; + } + + public NamedList getActions() { + return actions; + } + + public void setActions(NamedList actions) { + this.actions = actions; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public List getLink() { + return link; + } + + public void setLink(List link) { + this.link = link; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Link.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Link.java new file mode 100644 index 000000000000..7d67820360f9 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Link.java @@ -0,0 +1,49 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public final class Link { + private String rel; + private String href; + + public static Link of(final String rel, final String href) { + Link link = new Link(); + link.setRel(rel); + link.setHref(href); + return link; + } + + public String getRel() { + return rel; + } + + public void setRel(String rel) { + this.rel = rel; + } + + public String getHref() { + return href; + } + + public void setHref(String href) { + this.href = href; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Mac.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Mac.java new file mode 100644 index 000000000000..02d908054608 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Mac.java @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public class Mac { + + private String address; + + public String getAddress() { + return address; + } + + public void setAddress(String address) { + this.address = address; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/NamedList.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/NamedList.java new file mode 100644 index 000000000000..fb7c2aa664b3 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/NamedList.java @@ -0,0 +1,63 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import com.fasterxml.jackson.annotation.JsonAnyGetter; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnore; + +public class NamedList { + private final String name; + private final List items; + + private NamedList(String name, List items) { + if (name == null || name.isEmpty()) { + throw new IllegalArgumentException("name must be non-empty"); + } + this.name = name; + this.items = items == null ? Collections.emptyList() : items; + } + + public static NamedList of(String name, List items) { + return new NamedList<>(name, items); + } + + @JsonAnyGetter + public Map> asMap() { + return Collections.singletonMap(name, items); + } + + @JsonCreator + public static NamedList fromMap(Map> map) { + if (map == null || map.size() != 1) { + throw new IllegalArgumentException("Expected single-property object for NamedList"); + } + Entry> e = map.entrySet().iterator().next(); + return new NamedList<>(e.getKey(), e.getValue()); + } + + @JsonIgnore + public List getItems() { + return items; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Network.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Network.java new file mode 100644 index 000000000000..bb72a2ad323f --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Network.java @@ -0,0 +1,126 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public class Network extends BaseDto { + private String mtu; // oVirt prints as string + private String portIsolation; // "false" + private String stp; // "false" + private NamedList usages; // { usage: ["vm"] } + private String vdsmName; + + private Ref dataCenter; + + private String name; + private String description; + private String comment; + + @JsonProperty("link") + private List link; + + public Network() { + } + + // ---- getters / setters ---- + + public String getMtu() { + return mtu; + } + + public void setMtu(final String mtu) { + this.mtu = mtu; + } + + public String getPortIsolation() { + return portIsolation; + } + + public void setPortIsolation(final String portIsolation) { + this.portIsolation = portIsolation; + } + + public String getStp() { + return stp; + } + + public void setStp(final String stp) { + this.stp = stp; + } + + public NamedList getUsages() { + return usages; + } + + public void setUsages(final NamedList usages) { + this.usages = usages; + } + + public String getVdsmName() { + return vdsmName; + } + + public void setVdsmName(final String vdsmName) { + this.vdsmName = vdsmName; + } + + public Ref getDataCenter() { + return dataCenter; + } + + public void setDataCenter(final Ref dataCenter) { + this.dataCenter = dataCenter; + } + + public String getName() { + return name; + } + + public void setName(final String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(final String description) { + this.description = description; + } + + public String getComment() { + return comment; + } + + public void setComment(final String comment) { + this.comment = comment; + } + + public List getLink() { + return link; + } + + public void setLink(final List link) { + this.link = link; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Nic.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Nic.java new file mode 100644 index 000000000000..2f866abef7f4 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Nic.java @@ -0,0 +1,122 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public class Nic extends BaseDto { + + private String name; + private String description; + @JacksonXmlProperty(localName = "interface") + @JsonProperty("interface") + private String interfaceType; + private String linked; + private Mac mac; + private String plugged; + public String synced; + private Ref vnicProfile; + private Vm vm; + private NamedList reportedDevices; + + public Nic() { + } + + public String getName() { + return name; + } + + public void setName(final String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(final String description) { + this.description = description; + } + + public String getInterfaceType() { + return interfaceType; + } + + public void setInterfaceType(String interfaceType) { + this.interfaceType = interfaceType; + } + + public String getLinked() { + return linked; + } + + public void setLinked(String linked) { + this.linked = linked; + } + + public Mac getMac() { + return mac; + } + + public void setMac(Mac mac) { + this.mac = mac; + } + + public String getPlugged() { + return plugged; + } + + public void setPlugged(String plugged) { + this.plugged = plugged; + } + + public String getSynced() { + return synced; + } + + public void setSynced(String synced) { + this.synced = synced; + } + + public Ref getVnicProfile() { + return vnicProfile; + } + + public void setVnicProfile(Ref vnicProfile) { + this.vnicProfile = vnicProfile; + } + + public Vm getVm() { + return vm; + } + + public void setVm(Vm vm) { + this.vm = vm; + } + + public NamedList getReportedDevices() { + return reportedDevices; + } + + public void setReportedDevices(NamedList reportedDevices) { + this.reportedDevices = reportedDevices; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Os.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Os.java new file mode 100644 index 000000000000..af17151d4335 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Os.java @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public final class Os { + private String type; + private String version; + private Boot boot; + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public Boot getBoot() { + return boot; + } + + public void setBoot(Boot boot) { + this.boot = boot; + } + + public final static class Boot { + private NamedList devices; + + public NamedList getDevices() { + return devices; + } + + public void setDevices(NamedList devices) { + this.devices = devices; + } + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/OvfXmlUtil.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/OvfXmlUtil.java new file mode 100644 index 000000000000..d417ffde17de --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/OvfXmlUtil.java @@ -0,0 +1,792 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import java.io.ByteArrayInputStream; +import java.nio.charset.StandardCharsets; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.TimeZone; +import java.util.UUID; + +import javax.xml.XMLConstants; +import javax.xml.namespace.NamespaceContext; +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.xpath.XPath; +import javax.xml.xpath.XPathConstants; +import javax.xml.xpath.XPathExpressionException; +import javax.xml.xpath.XPathFactory; + +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; +import org.apache.commons.lang3.ObjectUtils; +import org.apache.commons.lang3.StringUtils; +import org.w3c.dom.Document; +import org.w3c.dom.Node; +import org.w3c.dom.NodeList; + +import com.cloud.api.query.vo.UserVmJoinVO; + +public class OvfXmlUtil { + + private static final String NS_OVF = "http://schemas.dmtf.org/ovf/envelope/1/"; + private static final String NS_RASD = "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"; + private static final String NS_VSSD = "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"; + private static final String NS_XSI = "http://www.w3.org/2001/XMLSchema-instance"; + + private static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000"; + private static final TimeZone UTC = TimeZone.getTimeZone("Etc/GMT"); + + private static final ThreadLocal OVIRT_DTF = ThreadLocal.withInitial(() -> { + final SimpleDateFormat sdf = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss", Locale.ROOT); + sdf.setTimeZone(UTC); + return sdf; + }); + + public static String toXml(final Vm vm, final UserVmJoinVO vo) { + final String vmId = vm.getId(); + final String vmName = vm.getName(); + final String vmDesc = defaultString(vm.getDescription()); + + final long creationMillis = vm.getCreationTime(); + final String creationDate = formatDate(creationMillis); + final String exportDate = formatDate(System.currentTimeMillis()); + final String stopTime = vm.getStopTime() != null ? formatDate(vm.getStopTime()) : creationDate; + final String bootTime = vm.getStartTime() != null ? formatDate(vm.getStartTime()) : creationDate; + + // Memory: Vm.memory is bytes (string) + final long memBytes = parseLong(vm.getMemory(), 1024L * 1024L * 1024L); + final long memMb = Math.max(128, memBytes / (1024L * 1024L)); + + // CPU: topology cores/sockets/threads. We default sockets=1 threads=1. + final int vcpu = Math.max(1, Integer.parseInt(vm.getCpu().getTopology().getCores())); + final int sockets = Math.max(1, Integer.parseInt(vm.getCpu().getTopology().getSockets())); + final int threads = Math.max(1, Integer.parseInt(vm.getCpu().getTopology().getThreads())); + final int cpuPerSocket = Math.max(1, vcpu / sockets); + final int maxVcpu = vcpu; + + // Template + final Ref template = vm.getTemplate(); + final String templateId = template != null && StringUtils.isNotBlank(template.getId()) ? template.getId() : ZERO_UUID; + final String templateName = template != null ? defaultString(template.getId()) : "Blank"; + + // Snapshot id (stable per VM id) + final String snapshotId = UUID.nameUUIDFromBytes(("ovf-snap-" + vmId).getBytes(StandardCharsets.UTF_8)).toString(); + + final StringBuilder sb = new StringBuilder(16_384); + sb.append(""); + sb.append(""); + + // --- References (from disks) --- + sb.append(""); + for (DiskAttachment da : diskAttachments(vm)) { + if (da == null || da.getDisk() == null || StringUtils.isBlank(da.getDisk().getId())) { + continue; + } + final String diskId = da.getDisk().getId(); + final String storageDomainId = firstStorageDomainId(da.getDisk()); + final String href = storageDomainId + "/" + diskId; + sb.append(""); + } + sb.append(""); + + // --- NetworkSection --- + sb.append(""); + sb.append("List of networks"); + // oVirt often lists networks, but can also be empty. We'll include known names if we can. + for (Nic nic : nics(vm)) { + if (nic == null) { + continue; + } + final String netName = inferNetworkName(nic); + if (StringUtils.isBlank(netName)) { + continue; + } + sb.append(""); + sb.append("").append(escapeText(defaultString(nic.getDescription()))).append(""); + sb.append(""); + } + sb.append(""); + + // --- DiskSection --- + sb.append("
"); + sb.append("List of Virtual Disks"); + for (DiskAttachment da : diskAttachments(vm)) { + if (da == null || da.getDisk() == null || StringUtils.isBlank(da.getDisk().getId())) { + continue; + } + final org.apache.cloudstack.veeam.api.dto.Disk d = da.getDisk(); + final String diskId = d.getId(); + final String storageDomainId = firstStorageDomainId(d); + final String href = storageDomainId + "/" + diskId; + final long provBytes = parseLong(d.getProvisionedSize(), 0); + final long actualBytes = parseLong(d.getActualSize(), 0); + final long provGiB = bytesToGibCeil(provBytes); + final long actualGiB = bytesToGibCeil(actualBytes); + final String diskInterface = mapDiskInterface(da.getIface()); + + sb.append(" 0 ? provGiB : 1).append("\""); + sb.append(" ovf:actual_size=\"").append(actualGiB > 0 ? actualGiB : 1).append("\""); + sb.append(" ovf:vm_snapshot_id=\"").append(escapeAttr(snapshotId)).append("\""); + sb.append(" ovf:parentRef=\"\""); + sb.append(" ovf:fileRef=\"").append(escapeAttr(href)).append("\""); + sb.append(" ovf:format=\"").append(escapeAttr(mapOvfDiskFormat(d.getFormat(), d.getSparse()))).append("\""); + sb.append(" ovf:volume-format=\"").append(escapeAttr(mapVolumeFormat(d.getFormat()))).append("\""); + sb.append(" ovf:volume-type=\"").append(escapeAttr(mapVolumeType(d.getSparse()))).append("\""); + sb.append(" ovf:disk-interface=\"").append(escapeAttr(diskInterface)).append("\""); + sb.append(" ovf:read-only=\"").append(escapeAttr(booleanString(da.getReadOnly(), "false"))).append("\""); + sb.append(" ovf:shareable=\"").append(escapeAttr(booleanString(d.getShareable(), "false"))).append("\""); + sb.append(" ovf:boot=\"").append(escapeAttr(booleanString(da.getBootable(), "false"))).append("\""); + sb.append(" ovf:pass-discard=\"").append(escapeAttr(booleanString(da.getPassDiscard(), "false"))).append("\""); + sb.append(" ovf:incremental-backup=\"false\""); + sb.append(" ovf:disk-alias=\"").append(escapeAttr(defaultString(d.getAlias()))).append("\""); + sb.append(" ovf:disk-description=\"").append(escapeAttr(defaultString(d.getDescription()))).append("\""); + sb.append(" ovf:wipe-after-delete=\"").append(escapeAttr(booleanString(d.getWipeAfterDelete(), "false"))).append("\""); + sb.append(">"); + } + sb.append("
"); + + if (vo != null) { + // -- Add a section for CloudStack-specific metadata that some consumers might look for (e.g. for import back into CloudStack) --- + // Add CloudStack-specific metadata section + sb.append("
"); + sb.append("CloudStack specific metadata"); + sb.append(""); + sb.append("").append(vo.getAccountUuid()).append(""); + sb.append("").append(vo.getDomainUuid()).append(""); + sb.append("").append(escapeText(vo.getProjectUuid())).append(""); + if (vm.getCpuProfile() != null && StringUtils.isNotBlank(vm.getCpuProfile().getId())) { + sb.append("").append(vm.getCpuProfile().getId()).append(""); + } + sb.append(""); + for (DiskAttachment da : diskAttachments(vm)) { + if (da == null || da.getDisk() == null || StringUtils.isBlank(da.getDisk().getId())) { + continue; + } + final Disk d = da.getDisk(); + sb.append(""); + sb.append("").append(escapeText(d.getId())).append(""); + sb.append("").append(d.getDiskProfile().getId()).append(""); + sb.append(""); + } + sb.append(""); + if (MapUtils.isNotEmpty(vm.getDetails())) { + sb.append("
"); + for (Map.Entry entry : vm.getDetails().entrySet()) { + sb.append(""); + sb.append("").append(escapeText(entry.getKey())).append(""); + sb.append("").append(escapeText(entry.getValue())).append(""); + sb.append(""); + } + sb.append("
"); + } + if (vo.getUserDataId() != null) { + sb.append("").append(escapeText(vo.getUserDataUuid())).append(""); + } + if (vo.getAffinityGroupId() != null) { + sb.append("").append(escapeText(vo.getAffinityGroupUuid())).append(""); + } + sb.append("
"); + sb.append("
"); + } + + // --- Content / VirtualSystem --- + sb.append(""); + sb.append("").append(escapeText(vmName)).append(""); + sb.append("").append(escapeText(vmDesc)).append(""); + sb.append(""); + sb.append("").append(creationDate).append(""); + sb.append("").append(exportDate).append(""); + sb.append("false"); + sb.append("guest_agent"); + sb.append("false"); + sb.append("1"); + sb.append("Etc/GMT"); + sb.append("0"); + sb.append("11"); + sb.append("4.8"); + sb.append("1"); + sb.append("AUTO_RESUME"); + sb.append("").append(memMb).append(""); + sb.append("").append(escapeText(booleanString(vm.getStateless(), "false"))).append(""); + sb.append("false"); + sb.append("false"); + sb.append("0"); + sb.append("").append(vo.getAccountUuid()).append(""); + sb.append("0"); + sb.append("").append(escapeText(booleanString(vm.getBios() != null && vm.getBios().getBootMenu() != null ? vm.getBios().getBootMenu().getEnabled() : null, "false"))).append(""); + sb.append("true"); + sb.append("true"); + sb.append("false"); + sb.append("LOCK_SCREEN"); + sb.append("0"); + sb.append(""); + sb.append("").append(vm.getBios() != null ? vm.getBios().getTypeOrdinal() : 1).append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append("").append(memMb).append(""); + sb.append("true"); + sb.append("false"); + sb.append("false"); + sb.append("").append(mapBalloonEnabled(vm)).append(""); + sb.append("0"); + sb.append(""); + sb.append("").append(escapeText(templateId)).append(""); + sb.append("").append(escapeText(templateName)).append(""); + sb.append("true"); + sb.append("3"); + sb.append("").append(ZERO_UUID).append(""); + sb.append("2"); + sb.append("false"); + sb.append("").append(escapeText(templateId)).append(""); + sb.append("").append(escapeText(templateName)).append(""); + sb.append("false"); + sb.append("").append(stopTime).append(""); + sb.append("").append(bootTime).append(""); + sb.append("0"); + + // --- Operating system section --- + sb.append("
"); + sb.append("Guest Operating System"); + sb.append("").append(escapeText(inferOsDescription(vm))).append(""); + sb.append("
"); + + // --- Virtual hardware section --- + sb.append("
"); + sb.append("").append(vcpu).append(" CPU, ").append(memMb).append(" Memory"); + sb.append(""); + sb.append("ENGINE 4.4.0.0"); + sb.append(""); + + // CPU + sb.append(""); + sb.append("").append(vcpu).append(" virtual cpu"); + sb.append("Number of virtual CPU"); + sb.append("1"); + sb.append("3"); + sb.append("").append(sockets).append(""); + sb.append("").append(cpuPerSocket).append(""); + sb.append("").append(threads).append(""); + sb.append("").append(maxVcpu).append(""); + sb.append("").append(vcpu).append(""); + sb.append(""); + + // Memory + sb.append(""); + sb.append("").append(memMb).append(" MB of memory"); + sb.append("Memory Size"); + sb.append("2"); + sb.append("4"); + sb.append("MegaBytes"); + sb.append("").append(memMb).append(""); + sb.append(""); + + // Disks as Items + int diskUnit = 0; + for (DiskAttachment da : diskAttachments(vm)) { + if (da == null || da.getDisk() == null || StringUtils.isBlank(da.getDisk().getId())) { + continue; + } + final org.apache.cloudstack.veeam.api.dto.Disk d = da.getDisk(); + final String diskId = d.getId(); + final String storageDomainId = firstStorageDomainId(d); + final String href = storageDomainId + "/" + diskId; + + sb.append(""); + sb.append("").append(escapeText(defaultString(d.getAlias()))).append(""); + sb.append("").append(escapeText(diskId)).append(""); + sb.append("17"); + sb.append("").append(escapeText(href)).append(""); + sb.append("").append(ZERO_UUID).append(""); + sb.append("").append(escapeText(templateId)).append(""); + sb.append(""); + sb.append("").append(escapeText(storageDomainId)).append(""); + sb.append("").append(ZERO_UUID).append(""); + sb.append("").append(creationDate).append(""); + sb.append("").append(exportDate).append(""); + sb.append("").append(exportDate).append(""); + sb.append("disk"); + sb.append("disk"); + sb.append("").append(escapeText("{type=drive, bus=0, controller=0, target=0, unit=" + diskUnit + "}")).append(""); + sb.append("").append("true".equalsIgnoreCase(da.getBootable()) ? 1 : 0).append(""); + sb.append("true"); + sb.append("").append("true".equalsIgnoreCase(da.getReadOnly())).append(""); + sb.append("").append(escapeText("ua-" + href)).append(""); + sb.append(""); + diskUnit++; + } + + // NICs as Items + int nicSlot = 0; + for (Nic nic : nics(vm)) { + if (nic == null) { + continue; + } + final String nicId = firstNonBlank(nic.getId(), UUID.nameUUIDFromBytes(("nic-" + vmId + "-" + nicSlot).getBytes(StandardCharsets.UTF_8)).toString()); + final String nicName = firstNonBlank(nic.getName(), "nic" + (nicSlot + 1)); + final String mac = nic.getMac() != null ? defaultString(nic.getMac().getAddress()) : ""; + + sb.append(""); + sb.append("Ethernet adapter on [No Network]"); + sb.append("").append(escapeText(nicId)).append(""); + sb.append("10"); + sb.append(""); + sb.append("").append(mapNicResourceSubType(nic.getInterfaceType())).append(""); + sb.append("").append(escapeText(defaultString(inferNetworkName(nic)))).append(""); + sb.append("").append(escapeText(booleanString(nic.getLinked(), "true"))).append(""); + sb.append("").append(escapeText(nicName)).append(""); + sb.append("").append(escapeText(nicName)).append(""); + sb.append("").append(escapeText(mac)).append(""); + sb.append("10000"); + sb.append("interface"); + sb.append("bridge"); + sb.append("").append(escapeText("{type=pci, slot=0x" + String.format("%02x", nicSlot) + ", bus=0x01, domain=0x0000, function=0x0}")).append(""); + sb.append("0"); + sb.append("").append(escapeText(booleanString(nic.getPlugged(), "true"))).append(""); + sb.append("false"); + sb.append("").append(escapeText("ua-" + nicId)).append(""); + sb.append(""); + nicSlot++; + } + + // A few common devices that some consumers expect to exist (kept minimal) + // USB controller + sb.append(""); + sb.append("USB Controller"); + sb.append("3"); + sb.append("23"); + sb.append("DISABLED"); + sb.append(""); + + // RNG device + sb.append(""); + sb.append("0"); + sb.append("").append(UUID.nameUUIDFromBytes(("rng-" + vmId).getBytes(StandardCharsets.UTF_8))).append(""); + sb.append("rng"); + sb.append("virtio"); + sb.append("{type=pci, slot=0x00, bus=0x06, domain=0x0000, function=0x0}"); + sb.append("0"); + sb.append("true"); + sb.append("false"); + sb.append(""); + sb.append("urandom"); + sb.append(""); + + sb.append("
"); + sb.append("
"); + sb.append("
"); + + return sb.toString(); + } + + public static void updateFromConfiguration(Vm vm) { + Vm.Initialization initialization = vm.getInitialization(); + if (initialization == null) { + return; + } + Vm.Initialization.Configuration configuration = vm.getInitialization().getConfiguration(); + if (configuration == null) { + return; + } + OvfXmlUtil.updateFromXml(vm, configuration.getData()); + } + + protected static void updateFromXml(Vm vm, String ovfXml) { + if (vm == null || StringUtils.isBlank(ovfXml)) { + return; + } + try { + DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); + dbf.setNamespaceAware(true); + dbf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); + DocumentBuilder db = dbf.newDocumentBuilder(); + Document doc = db.parse(new ByteArrayInputStream(ovfXml.getBytes(StandardCharsets.UTF_8))); + + XPathFactory xpf = XPathFactory.newInstance(); + XPath xpath = xpf.newXPath(); + + // Register namespace context for XPath + xpath.setNamespaceContext(new OvfNamespaceContext()); + + + Node contentNode = (Node) xpath.evaluate( + "//*[local-name()='Content']", + doc, + XPathConstants.NODE + ); + updateFromXmlContentNode(vm, contentNode, xpath); + + Node hwSection = (Node) xpath.evaluate( + "//*[local-name()='Section' and @*[local-name()='type']='ovf:VirtualHardwareSection_Type']", + doc, + XPathConstants.NODE + ); + updateFromXmlHardwareSection(vm, hwSection, xpath); + + Node metadataSection = (Node) xpath.evaluate( + "//*[local-name()='Section' and @*[local-name()='type']='ovf:CloudStackMetadata_Type']", + doc, + XPathConstants.NODE + ); + updateFromXmlCloudStackMetadataSection(vm, metadataSection, xpath); + } catch (Exception e) { + // Ignore parsing errors and keep original VM configuration + } + } + + private static void updateFromXmlContentNode(Vm vm, Node contentNode, XPath xpath) { + if (contentNode == null) { + return; + } + String userId = xpathString(xpath, contentNode, "./*[local-name()='CreatedByUserId']/text()"); + if (StringUtils.isNotBlank(userId)) { + vm.setAccountId(userId); + } + String templateId = xpathString(xpath, contentNode, "./*[local-name()='TemplateId']/text()"); + if (StringUtils.isNotBlank(templateId)) { + vm.setTemplate(Ref.of("", templateId)); + } + String biosType = xpathString(xpath, contentNode, "./*[local-name()='BiosType']/text()"); + Vm.Bios bios = Vm.Bios.getBiosFromOrdinal(biosType); + vm.setBios(bios); + } + + private static void updateFromXmlHardwareSection(Vm vm, Node hwSection, XPath xpath) throws XPathExpressionException { + if (hwSection == null) { + return; + } + // Memory + NodeList memItems = (NodeList) xpath.evaluate( + ".//*[local-name()='Item'][*[local-name()='ResourceType' and text()='4']]", + hwSection, + XPathConstants.NODESET + ); + if (memItems != null && memItems.getLength() > 0) { + Node memItem = memItems.item(0); + String memStr = childText(memItem, "VirtualQuantity"); + if (StringUtils.isNotBlank(memStr)) { + vm.setMemory(memStr); + } + } + + // CPU + NodeList cpuItems = (NodeList) xpath.evaluate( + ".//*[local-name()='Item'][*[local-name()='ResourceType' and text()='3']]", + hwSection, + XPathConstants.NODESET + ); + if (cpuItems != null && cpuItems.getLength() > 0) { + Node cpuItem = cpuItems.item(0); + String socketsStr = childText(cpuItem, "num_of_sockets"); + String coresStr = childText(cpuItem, "cpu_per_socket"); + String threadsStr = childText(cpuItem, "threads_per_cpu"); + + if (vm.getCpu() == null) { + vm.setCpu(new Cpu()); + } + if (vm.getCpu().getTopology() == null) { + vm.getCpu().setTopology(new Topology()); + } + + if (StringUtils.isNotBlank(socketsStr)) { + vm.getCpu().getTopology().setSockets(socketsStr); + } + if (StringUtils.isNotBlank(coresStr)) { + vm.getCpu().getTopology().setCores(coresStr); + } + if (StringUtils.isNotBlank(threadsStr)) { + vm.getCpu().getTopology().setThreads(threadsStr); + } + } + } + + private static void updateFromXmlCloudStackMetadataSection(Vm vm, Node metadataSection, XPath xpath) { + if (metadataSection == null) { + return; + } + String serviceOfferingId = xpathString(xpath, metadataSection, ".//*[local-name()='ServiceOfferingId']/text()"); + if (StringUtils.isNotBlank(serviceOfferingId)) { + vm.setCpuProfile(Ref.of("", serviceOfferingId)); + } + String affinityGroupId = xpathString(xpath, metadataSection, ".//*[local-name()='AffinityGroupId']/text()"); + if (StringUtils.isNotBlank(affinityGroupId)) { + vm.setAffinityGroupId(affinityGroupId); + } + String userDataId = xpathString(xpath, metadataSection, ".//*[local-name()='UserDataId']/text()"); + if (StringUtils.isNotBlank(userDataId)) { + vm.setUserDataId(userDataId); + } + final Map details = new HashMap<>(); + try { + NodeList detailNodes = (NodeList) xpath.evaluate( + ".//*[local-name()='Details']/*[local-name()='Detail']", + metadataSection, + XPathConstants.NODESET + ); + + for (int i = 0; i < detailNodes.getLength(); i++) { + Node detailNode = detailNodes.item(i); + String key = xpathString(xpath, detailNode, "./*[local-name()='Key']/text()"); + if (StringUtils.isBlank(key)) { + continue; + } + String value = xpathString(xpath, detailNode, "./*[local-name()='Value']/text()"); + details.put(key, defaultString(value)); + } + } catch (XPathExpressionException ignored) { + } + if (!details.isEmpty()) { + vm.setDetails(details); + } + } + + private static String xpathString(XPath xpath, Node node, String expression) { + if (node == null) { + return null; + } + try { + String value = (String) xpath.evaluate(expression, node, XPathConstants.STRING); + return StringUtils.isBlank(value) ? null : value.trim(); + } catch (XPathExpressionException e) { + return null; + } + } + + private static String childText(Node parent, String localName) { + if (parent == null || StringUtils.isBlank(localName)) { + return null; + } + NodeList children = parent.getChildNodes(); + for (int i = 0; i < children.getLength(); i++) { + Node child = children.item(i); + if (child.getNodeType() != Node.ELEMENT_NODE) { + continue; + } + String ln = child.getLocalName(); + if (StringUtils.isBlank(ln)) { + ln = child.getNodeName(); + } + if (localName.equalsIgnoreCase(ln)) { + return StringUtils.trim(child.getTextContent()); + } + } + return null; + } + + private static List diskAttachments(Vm vm) { + if (vm.getDiskAttachments() == null) { + return List.of(); + } + return vm.getDiskAttachments().getItems(); + } + + private static List nics(Vm vm) { + if (vm.getNics() == null) { + return List.of(); + } + return vm.getNics().getItems(); + } + + private static String inferOsDescription(Vm vm) { + if (vm.getOs() == null) { + return "other"; + } + String t = vm.getOs().getType(); + if (StringUtils.isBlank(t)) { + return "other"; + } + if (t.toLowerCase(Locale.ROOT).contains("win")) { + return "windows"; + } + if (t.toLowerCase(Locale.ROOT).contains("linux")) { + return "linux"; + } + return t; + } + + private static String inferNetworkName(Nic nic) { + return "Network-" + nic.getId(); + } + + private static String firstStorageDomainId(Disk d) { + if (ObjectUtils.allNotNull(d, d.getStorageDomains()) && CollectionUtils.isNotEmpty(d.getStorageDomains().getItems())) { + return d.getStorageDomains().getItems().get(0).getId(); + } + return UUID.randomUUID().toString(); + } + + private static String mapDiskInterface(String iface) { + if (StringUtils.isBlank(iface)) { + return "VirtIO_SCSI"; + } + String v = iface.toLowerCase(Locale.ROOT); + if (v.contains("virtio") && v.contains("scsi")) { + return "VirtIO_SCSI"; + } + if (v.contains("virtio")) { + return "VirtIO"; + } + if (v.contains("ide")) { + return "IDE"; + } + if (v.contains("sata")) { + return "SATA"; + } + return iface; + } + + private static String mapOvfDiskFormat(String format, String sparse) { + if ("true".equalsIgnoreCase(sparse)) { + return "http://www.vmware.com/specifications/vmdk.html#sparse"; + } + return "http://www.vmware.com/specifications/vmdk.html#sparse"; + } + + private static String mapVolumeFormat(String format) { + if (StringUtils.isBlank(format)) { + return "RAW"; + } + String f = format.toLowerCase(Locale.ROOT); + if (f.contains("cow") || f.contains("qcow")) { + return "COW"; + } + if (f.contains("raw")) { + return "RAW"; + } + return format.toUpperCase(Locale.ROOT); + } + + private static String mapVolumeType(String sparse) { + return "true".equalsIgnoreCase(sparse) ? "Sparse" : "Preallocated"; + } + + private static String mapBalloonEnabled(Vm vm) { + if (vm.getMemoryPolicy() == null || vm.getMemoryPolicy().getBallooning() == null) { + return "true"; + } + return "true".equalsIgnoreCase(vm.getMemoryPolicy().getBallooning()) ? "true" : "false"; + } + + private static int mapNicResourceSubType(String iface) { + if (StringUtils.isBlank(iface)) { + return 3; + } + String v = iface.toLowerCase(Locale.ROOT); + if (v.contains("virtio")) { + return 3; + } + return 3; + } + + private static String booleanString(String v, String def) { + if (StringUtils.isBlank(v)) { + return def; + } + if ("true".equalsIgnoreCase(v)) { + return "true"; + } + if ("false".equalsIgnoreCase(v)) { + return "false"; + } + return def; + } + + private static String firstNonBlank(String... vals) { + for (String v : vals) { + if (StringUtils.isNotBlank(v)) { + return v; + } + } + return ""; + } + + private static String defaultString(String s) { + return s == null ? "" : s; + } + + private static long parseLong(String s, long def) { + if (StringUtils.isBlank(s)) { + return def; + } + try { + return Long.parseLong(s); + } catch (Exception ignored) { + return def; + } + } + + private static long bytesToGibCeil(long bytes) { + if (bytes <= 0) { + return 0; + } + final long gib = 1024L * 1024L * 1024L; + return (bytes + gib - 1) / gib; + } + + private static String formatDate(long epochMillis) { + return OVIRT_DTF.get().format(new Date(epochMillis)); + } + + private static String escapeText(String s) { + if (s == null) { + return ""; + } + return s.replace("&", "&") + .replace("<", "<") + .replace(">", ">") + .replace("\"", """) + .replace("'", "'"); + } + + private static String escapeAttr(String s) { + return escapeText(s); + } + + protected static class OvfNamespaceContext implements NamespaceContext { + @Override + public String getNamespaceURI(String prefix) { + if ("ovf".equals(prefix)) return NS_OVF; + if ("rasd".equals(prefix)) return NS_RASD; + if ("vssd".equals(prefix)) return NS_VSSD; + if ("xsi".equals(prefix)) return NS_XSI; + return XMLConstants.NULL_NS_URI; + } + @Override + public String getPrefix(String namespaceURI) { + return null; + } + @Override + public java.util.Iterator getPrefixes(String namespaceURI) { + return null; + } + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/ProductInfo.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/ProductInfo.java new file mode 100644 index 000000000000..7f696a309798 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/ProductInfo.java @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public final class ProductInfo { + + private String instanceId; + public String name; + public Version version; + + public String getInstanceId() { + return instanceId; + } + + public void setInstanceId(String instanceId) { + this.instanceId = instanceId; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public Version getVersion() { + return version; + } + + public void setVersion(Version version) { + this.version = version; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Ref.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Ref.java new file mode 100644 index 000000000000..4eefbde8ebf6 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Ref.java @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public final class Ref extends BaseDto { + + public static Ref of(final String href, final String id) { + Ref ref = new Ref(); + ref.setHref(href); + ref.setId(id); + return ref; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/ReportedDevice.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/ReportedDevice.java new file mode 100644 index 000000000000..a925d6ec4450 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/ReportedDevice.java @@ -0,0 +1,84 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +public class ReportedDevice extends BaseDto { + private String comment; + private String description; + private NamedList ips; + private Mac Mac; + private String name; + private String type; + private Vm vm; + + public String getComment() { + return comment; + } + + public void setComment(String comment) { + this.comment = comment; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public NamedList getIps() { + return ips; + } + + public void setIps(NamedList ips) { + this.ips = ips; + } + + public Mac getMac() { + return Mac; + } + + public void setMac(Mac mac) { + Mac = mac; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public Vm getVm() { + return vm; + } + + public void setVm(Vm vm) { + this.vm = vm; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/ResourceAction.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/ResourceAction.java new file mode 100644 index 000000000000..ed6c39240369 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/ResourceAction.java @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +public class ResourceAction extends BaseDto { + private Ref job; + private String status; + + public Ref getJob() { + return job; + } + + public void setJob(Ref job) { + this.job = job; + } + + public String getStatus() { + return status; + } + + public void setStatus(String status) { + this.status = status; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Snapshot.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Snapshot.java new file mode 100644 index 000000000000..616e6317d90d --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Snapshot.java @@ -0,0 +1,105 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public class Snapshot extends BaseDto { + + // epoch millis + private Long date; + private String persistMemorystate; + private String snapshotStatus; + private String snapshotType; + private NamedList actions; + private String description; + @JacksonXmlElementWrapper(useWrapping = false) + private List link; + private Vm vm; + + public Snapshot() { + } + + public Long getDate() { + return date; + } + + public void setDate(final Long date) { + this.date = date; + } + + public String getPersistMemorystate() { + return persistMemorystate; + } + + public void setPersistMemorystate(final String persistMemorystate) { + this.persistMemorystate = persistMemorystate; + } + + public String getSnapshotStatus() { + return snapshotStatus; + } + + public void setSnapshotStatus(final String snapshotStatus) { + this.snapshotStatus = snapshotStatus; + } + + public String getSnapshotType() { + return snapshotType; + } + + public void setSnapshotType(final String snapshotType) { + this.snapshotType = snapshotType; + } + + public NamedList getActions() { + return actions; + } + + public void setActions(final NamedList actions) { + this.actions = actions; + } + + public String getDescription() { + return description; + } + + public void setDescription(final String description) { + this.description = description; + } + + public List getLink() { + return link; + } + + public void setLink(final List link) { + this.link = link; + } + + public Vm getVm() { + return vm; + } + + public void setVm(Vm vm) { + this.vm = vm; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/SpecialObjects.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/SpecialObjects.java new file mode 100644 index 000000000000..0ed2297eaad9 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/SpecialObjects.java @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public final class SpecialObjects { + + private Ref blankTemplate; + private Ref rootTag; + + public Ref getBlankTemplate() { + return blankTemplate; + } + + public void setBlankTemplate(Ref blankTemplate) { + this.blankTemplate = blankTemplate; + } + + public Ref getRootTag() { + return rootTag; + } + + public void setRootTag(Ref rootTag) { + this.rootTag = rootTag; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Storage.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Storage.java new file mode 100644 index 000000000000..4631df35ec67 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Storage.java @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public final class Storage { + + private String type; + private String address; + private String path; + private String mountOptions; + private String nfsVersion; + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public String getAddress() { + return address; + } + + public void setAddress(String address) { + this.address = address; + } + + public String getPath() { + return path; + } + + public void setPath(String path) { + this.path = path; + } + + public String getMountOptions() { + return mountOptions; + } + + public void setMountOptions(String mountOptions) { + this.mountOptions = mountOptions; + } + + public String getNfsVersion() { + return nfsVersion; + } + + public void setNfsVersion(String nfsVersion) { + this.nfsVersion = nfsVersion; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/StorageDomain.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/StorageDomain.java new file mode 100644 index 000000000000..fff9d5f75ce8 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/StorageDomain.java @@ -0,0 +1,236 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public final class StorageDomain extends BaseDto { + + private String name; + private String description; + private String comment; + private String available; + private String used; + private String committed; + private String blockSize; + private String warningLowSpaceIndicator; + private String criticalSpaceActionBlocker; + private String status; // e.g. "unattached" (optional in your first object) + private String type; // data / image / iso / export + private String master; // "true"/"false" + private String backup; // "true"/"false" + private String externalStatus; // "ok" + private String storageFormat; // v5 / v1 + private String discardAfterDelete; + private String wipeAfterDelete; + private String supportsDiscard; + private String supportsDiscardZeroesData; + private Storage storage; + private NamedList dataCenters; + private NamedList actions; + @JacksonXmlElementWrapper(useWrapping = false) + private List link; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public String getComment() { + return comment; + } + + public void setComment(String comment) { + this.comment = comment; + } + + public String getAvailable() { + return available; + } + + public void setAvailable(String available) { + this.available = available; + } + + public String getUsed() { + return used; + } + + public void setUsed(String used) { + this.used = used; + } + + public String getCommitted() { + return committed; + } + + public void setCommitted(String committed) { + this.committed = committed; + } + + public String getBlockSize() { + return blockSize; + } + + public void setBlockSize(String blockSize) { + this.blockSize = blockSize; + } + + public String getWarningLowSpaceIndicator() { + return warningLowSpaceIndicator; + } + + public void setWarningLowSpaceIndicator(String warningLowSpaceIndicator) { + this.warningLowSpaceIndicator = warningLowSpaceIndicator; + } + + public String getCriticalSpaceActionBlocker() { + return criticalSpaceActionBlocker; + } + + public void setCriticalSpaceActionBlocker(String criticalSpaceActionBlocker) { + this.criticalSpaceActionBlocker = criticalSpaceActionBlocker; + } + + public String getStatus() { + return status; + } + + public void setStatus(String status) { + this.status = status; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public String getMaster() { + return master; + } + + public void setMaster(String master) { + this.master = master; + } + + public String getBackup() { + return backup; + } + + public void setBackup(String backup) { + this.backup = backup; + } + + public String getExternalStatus() { + return externalStatus; + } + + public void setExternalStatus(String externalStatus) { + this.externalStatus = externalStatus; + } + + public String getStorageFormat() { + return storageFormat; + } + + public void setStorageFormat(String storageFormat) { + this.storageFormat = storageFormat; + } + + public String getDiscardAfterDelete() { + return discardAfterDelete; + } + + public void setDiscardAfterDelete(String discardAfterDelete) { + this.discardAfterDelete = discardAfterDelete; + } + + public String getWipeAfterDelete() { + return wipeAfterDelete; + } + + public void setWipeAfterDelete(String wipeAfterDelete) { + this.wipeAfterDelete = wipeAfterDelete; + } + + public String getSupportsDiscard() { + return supportsDiscard; + } + + public void setSupportsDiscard(String supportsDiscard) { + this.supportsDiscard = supportsDiscard; + } + + public String getSupportsDiscardZeroesData() { + return supportsDiscardZeroesData; + } + + public void setSupportsDiscardZeroesData(String supportsDiscardZeroesData) { + this.supportsDiscardZeroesData = supportsDiscardZeroesData; + } + + public Storage getStorage() { + return storage; + } + + public void setStorage(Storage storage) { + this.storage = storage; + } + + public NamedList getDataCenters() { + return dataCenters; + } + + public void setDataCenters(NamedList dataCenters) { + this.dataCenters = dataCenters; + } + + public NamedList getActions() { + return actions; + } + + public void setActions(NamedList actions) { + this.actions = actions; + } + + public List getLink() { + return link; + } + + public void setLink(List link) { + this.link = link; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/SummaryCount.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/SummaryCount.java new file mode 100644 index 000000000000..ac26619ff026 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/SummaryCount.java @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public final class SummaryCount { + + private String active; + private String total; + + public SummaryCount() { + } + + public SummaryCount(Integer active, Integer total) { + this.active = String.valueOf(active); + this.total = String.valueOf(total); + } + + public String getActive() { + return active; + } + + public String getTotal() { + return total; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/SupportedVersions.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/SupportedVersions.java new file mode 100644 index 000000000000..26cfff65620e --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/SupportedVersions.java @@ -0,0 +1,38 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public final class SupportedVersions { + + @JacksonXmlElementWrapper(useWrapping = false) + private List version; + + public SupportedVersions(final List version) { + this.version = version; + } + + public List getVersion() { + return version; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Tag.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Tag.java new file mode 100644 index 000000000000..1a9493160b6e --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Tag.java @@ -0,0 +1,57 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +public class Tag extends BaseDto { + private String name; + private String description; + private Ref parent; + private Ref vm; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public Ref getParent() { + return parent; + } + + public void setParent(Ref parent) { + this.parent = parent; + } + + public Ref getVm() { + return vm; + } + + public void setVm(Ref vm) { + this.vm = vm; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Topology.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Topology.java new file mode 100644 index 000000000000..fa20db9d658c --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Topology.java @@ -0,0 +1,60 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public final class Topology { + public String sockets; + public String cores; + public String threads; + + public Topology() { + } + + public Topology(final Integer sockets, final Integer cores, final Integer threads) { + this.sockets = String.valueOf(sockets); + this.cores = String.valueOf(cores); + this.threads = String.valueOf(threads); + } + + public String getSockets() { + return sockets; + } + + public void setSockets(String sockets) { + this.sockets = sockets; + } + + public String getCores() { + return cores; + } + + public void setCores(String cores) { + this.cores = cores; + } + + public String getThreads() { + return threads; + } + + public void setThreads(String threads) { + this.threads = threads; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Version.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Version.java new file mode 100644 index 000000000000..7b7d80a0f16c --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Version.java @@ -0,0 +1,94 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import org.apache.cloudstack.utils.CloudStackVersion; +import org.apache.cloudstack.veeam.VeeamControlService; +import org.apache.commons.lang3.StringUtils; + +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public final class Version { + + private String build; + private String fullVersion; + private String major; + private String minor; + private String revision; + + public Version() { + } + + public String getBuild() { + return build; + } + + public void setBuild(String build) { + this.build = build; + } + + public String getFullVersion() { + return fullVersion; + } + + public void setFullVersion(String fullVersion) { + this.fullVersion = fullVersion; + } + + public String getMajor() { + return major; + } + + public void setMajor(String major) { + this.major = major; + } + + public String getMinor() { + return minor; + } + + public void setMinor(String minor) { + this.minor = minor; + } + + public String getRevision() { + return revision; + } + + public void setRevision(String revision) { + this.revision = revision; + } + + public static Version fromPackageAndCSVersion(boolean complete) { + Version version = new Version(); + String packageVersion = VeeamControlService.getPackageVersion(); + if (StringUtils.isNotBlank(packageVersion) && complete) { + version.setFullVersion(packageVersion); + } + CloudStackVersion csVersion = VeeamControlService.getCSVersion(); + if (csVersion == null) { + return version; + } + version.setMajor(String.valueOf(csVersion.getMajorRelease())); + version.setMinor(String.valueOf(csVersion.getMinorRelease())); + version.setBuild(String.valueOf(csVersion.getPatchRelease())); + version.setRevision(String.valueOf(csVersion.getSecurityRelease())); + return version; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Vm.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Vm.java new file mode 100644 index 000000000000..1d557d186f08 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/Vm.java @@ -0,0 +1,518 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.api.ApiConstants; + +import com.cloud.utils.EnumUtils; +import com.cloud.utils.Pair; +import com.cloud.utils.StringUtils; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * VM DTO intentionally uses snake_case field names to match the required JSON. + * Configure Jackson globally with SNAKE_CASE or keep as-is. + */ +@JsonInclude(JsonInclude.Include.NON_NULL) +@JacksonXmlRootElement(localName = "vm") +public final class Vm extends BaseDto { + private String name; + private String description; + private String status; // "up", "down", ... + private String stopReason; // empty string allowed + private Long creationTime; + private Long stopTime; // epoch millis + private Long startTime; // epoch millis + private Ref template; + private Ref originalTemplate; + private Ref cluster; + private Ref host; + private String memory; // bytes + private MemoryPolicy memoryPolicy; + private Cpu cpu; + private Os os; + private Bios bios; + private String stateless; // true|false + private String type; // "server" + private String origin; // "ovirt" + private NamedList actions; // actions.link[] + @JacksonXmlElementWrapper(useWrapping = false) + private List link; + private NamedList tags; + private NamedList diskAttachments; + private NamedList nics; + private Initialization initialization; + + private Ref cpuProfile; + + public EmptyElement io = new EmptyElement(); + public EmptyElement migration = new EmptyElement(); + public EmptyElement sso = new EmptyElement(); + public EmptyElement usb = new EmptyElement(); + public EmptyElement quota = new EmptyElement(); + public EmptyElement highAvailability = new EmptyElement(); + public EmptyElement largeIcon = new EmptyElement(); + public EmptyElement smallIcon = new EmptyElement(); + public EmptyElement placementPolicy = new EmptyElement(); + public EmptyElement timeZone = new EmptyElement(); + public EmptyElement display = new EmptyElement(); + + // CloudStack-specific fields + private String accountId; + private String affinityGroupId; + private String userDataId; + private Map details; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public String getStatus() { + return status; + } + + public void setStatus(String status) { + this.status = status; + } + + public String getStopReason() { + return stopReason; + } + + public void setStopReason(String stopReason) { + this.stopReason = stopReason; + } + + public Long getCreationTime() { + return creationTime; + } + + public void setCreationTime(Long creationTime) { + this.creationTime = creationTime; + } + + public Long getStopTime() { + return stopTime; + } + + public void setStopTime(Long stopTime) { + this.stopTime = stopTime; + } + + public Long getStartTime() { + return startTime; + } + + public void setStartTime(Long startTime) { + this.startTime = startTime; + } + + public Ref getTemplate() { + return template; + } + + public void setTemplate(Ref template) { + this.template = template; + } + + public Ref getOriginalTemplate() { + return originalTemplate; + } + + public void setOriginalTemplate(Ref originalTemplate) { + this.originalTemplate = originalTemplate; + } + + public Ref getCluster() { + return cluster; + } + + public void setCluster(Ref cluster) { + this.cluster = cluster; + } + + public Ref getHost() { + return host; + } + + public void setHost(Ref host) { + this.host = host; + } + + public String getMemory() { + return memory; + } + + public void setMemory(String memory) { + this.memory = memory; + } + + public MemoryPolicy getMemoryPolicy() { + return memoryPolicy; + } + + public void setMemoryPolicy(MemoryPolicy memoryPolicy) { + this.memoryPolicy = memoryPolicy; + } + + public Cpu getCpu() { + return cpu; + } + + public void setCpu(Cpu cpu) { + this.cpu = cpu; + } + + public Os getOs() { + return os; + } + + public void setOs(Os os) { + this.os = os; + } + + public Bios getBios() { + return bios; + } + + public void setBios(Bios bios) { + this.bios = bios; + } + + public String getStateless() { + return stateless; + } + + public void setStateless(String stateless) { + this.stateless = stateless; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public String getOrigin() { + return origin; + } + + public void setOrigin(String origin) { + this.origin = origin; + } + + public NamedList getActions() { + return actions; + } + + public void setActions(NamedList actions) { + this.actions = actions; + } + + public List getLink() { + return link; + } + + public void setLink(List link) { + this.link = link; + } + + public NamedList getTags() { + return tags; + } + + public void setTags(NamedList tags) { + this.tags = tags; + } + + public NamedList getDiskAttachments() { + return diskAttachments; + } + + public void setDiskAttachments(NamedList diskAttachments) { + this.diskAttachments = diskAttachments; + } + + public NamedList getNics() { + return nics; + } + + public void setNics(NamedList nics) { + this.nics = nics; + } + + public Initialization getInitialization() { + return initialization; + } + + public void setInitialization(Initialization initialization) { + this.initialization = initialization; + } + + public Ref getCpuProfile() { + return cpuProfile; + } + + public void setCpuProfile(Ref cpuProfile) { + this.cpuProfile = cpuProfile; + } + + @JsonIgnore + public String getAccountId() { + return accountId; + } + + public void setAccountId(String accountId) { + this.accountId = accountId; + } + + @JsonIgnore + public String getAffinityGroupId() { + return affinityGroupId; + } + + public void setAffinityGroupId(String affinityGroupId) { + this.affinityGroupId = affinityGroupId; + } + + @JsonIgnore + public String getUserDataId() { + return userDataId; + } + + public void setUserDataId(String userDataId) { + this.userDataId = userDataId; + } + + @JsonIgnore + public Map getDetails() { + return details; + } + + public void setDetails(Map details) { + this.details = details; + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + public static final class Bios { + + public enum Type { + cluster_default, + i440fx_sea_bios, + q35_ovmf, + q35_sea_bios, + q35_secure_boot + } + + private String type; // "uefi" or "bios" or whatever mapping you choose + private BootMenu bootMenu = new BootMenu(); + + public String getType() { + return type; + } + + @JsonIgnore + public int getTypeOrdinal() { + Type enumType = EnumUtils.getEnum(Type.class, type, Type.q35_sea_bios); + return enumType.ordinal(); + } + + public void setType(String type) { + this.type = type; + } + + public BootMenu getBootMenu() { + return bootMenu; + } + + public void setBootMenu(BootMenu bootMenu) { + this.bootMenu = bootMenu; + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + public static class BootMenu { + + private String enabled; + + public String getEnabled() { + return enabled; + } + + public void setEnabled(String enabled) { + this.enabled = enabled; + } + } + + public static Bios getDefault() { + Bios bios = new Bios(); + bios.setType(Type.q35_sea_bios.name()); + BootMenu bootMenu = new BootMenu(); + bootMenu.setEnabled("false"); + bios.setBootMenu(bootMenu); + return bios; + } + + public static void updateBios(Bios bios, String bootMode) { + if (StringUtils.isEmpty(bootMode)) { + return; + } + if (ApiConstants.BootMode.SECURE.toString().equals(bootMode)) { + bios.setType(Type.q35_secure_boot.name()); + return; + } + bios.setType(Type.q35_ovmf.name()); + } + + public static Bios getBiosFromOrdinal(String bootTypeStr) { + Bios bios = getDefault(); + if (StringUtils.isEmpty(bootTypeStr)) { + return bios; + } + int type = 1; + try { + type = Integer.parseInt(bootTypeStr); + } catch (NumberFormatException e) { + return bios; + } + + if (type == Type.q35_ovmf.ordinal()) { + bios.setType(Type.q35_ovmf.name()); + } else if (type == Type.q35_secure_boot.ordinal()) { + bios.setType(Type.q35_secure_boot.name()); + } + return bios; + } + + public static Pair retrieveBootOptions(Bios bios) { + Pair defaultValue = + new Pair<>(ApiConstants.BootType.BIOS, ApiConstants.BootMode.LEGACY); + if (bios == null || StringUtils.isEmpty(bios.getType())) { + return defaultValue; + } + if (Type.q35_secure_boot.name().equals(bios.getType())) { + return new Pair<>(ApiConstants.BootType.UEFI, ApiConstants.BootMode.SECURE); + } + if (Type.q35_ovmf.name().equals(bios.getType())) { + return new Pair<>(ApiConstants.BootType.UEFI, ApiConstants.BootMode.LEGACY); + } + return defaultValue; + } + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + public static final class MemoryPolicy { + + private String guaranteed; + private String max; + private String ballooning; + + public String getGuaranteed() { + return guaranteed; + } + + public void setGuaranteed(String guaranteed) { + this.guaranteed = guaranteed; + } + + public String getMax() { + return max; + } + + public void setMax(String max) { + this.max = max; + } + + public String getBallooning() { + return ballooning; + } + + public void setBallooning(String ballooning) { + this.ballooning = ballooning; + } + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + public static class Initialization { + + private String customScript; + private Configuration configuration; + + public String getCustomScript() { + return customScript; + } + + public void setCustomScript(String customScript) { + this.customScript = customScript; + } + + public Configuration getConfiguration() { + return configuration; + } + + public void setConfiguration(Configuration configuration) { + this.configuration = configuration; + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + public static class Configuration { + + private String data; + private String type; + + public String getData() { + return data; + } + + public void setData(String data) { + this.data = data; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + } + } + + public static Vm of(String href, String id) { + return withHrefAndId(new Vm(), href, id); + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/VmAction.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/VmAction.java new file mode 100644 index 000000000000..2fb5d11d0789 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/VmAction.java @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +public class VmAction extends ResourceAction { + private Vm vm; + + public Vm getVm() { + return vm; + } + + public void setVm(Vm vm) { + this.vm = vm; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/VnicProfile.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/VnicProfile.java new file mode 100644 index 000000000000..efc42ed1c88a --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/dto/VnicProfile.java @@ -0,0 +1,81 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonInclude; + +/** + * oVirt-like vNIC profile element. + * Every vNIC profile MUST reference exactly one network. + */ +@JsonInclude(JsonInclude.Include.NON_NULL) +public class VnicProfile extends BaseDto { + + private String name; + private String description; + + private Ref network; + private Ref dataCenter; + + private List link; + + public VnicProfile() { + } + + public String getName() { + return name; + } + + public void setName(final String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(final String description) { + this.description = description; + } + + public Ref getNetwork() { + return network; + } + + public void setNetwork(final Ref network) { + this.network = network; + } + + public Ref getDataCenter() { + return dataCenter; + } + + public void setDataCenter(final Ref dataCenter) { + this.dataCenter = dataCenter; + } + + public List getLink() { + return link; + } + + public void setLink(final List link) { + this.link = link; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/request/ListQuery.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/request/ListQuery.java new file mode 100644 index 000000000000..f57edf76e04c --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/api/request/ListQuery.java @@ -0,0 +1,158 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.request; + +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import javax.servlet.http.HttpServletRequest; + +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; +import org.apache.commons.lang3.StringUtils; +import org.jetbrains.annotations.NotNull; + +public class ListQuery { + boolean allContent; + Long max; + Long page; + Map search; + List follow; + + public boolean isAllContent() { + return allContent; + } + + public void setAllContent(boolean allContent) { + this.allContent = allContent; + } + + public Long getMax() { + return max; + } + + public void setMax(Long max) { + this.max = max; + } + + public void setSearch(Map search) { + this.search = search; + } + + public void setFollow(String followStr) { + if (StringUtils.isBlank(followStr)) { + this.follow = null; + return; + } + this.follow = Arrays.stream(followStr.split(",")) + .map(String::trim) + .filter(s -> !s.isEmpty()) + .collect(Collectors.toList()); + } + + public Long getOffset() { + if (page == null || max == null) { + return null; + } + return Math.max(0, (page - 1)) * max; + } + + public Long getLimit() { + return max; + } + + public boolean followContains(String part) { + if (CollectionUtils.isEmpty(follow)) { + return false; + } + return follow.contains(part); + } + + public static ListQuery fromRequest(HttpServletRequest request) { + ListQuery query = new ListQuery(); + if (MapUtils.isEmpty(request.getParameterMap())) { + return query; + } + + String allContent = request.getParameter("all_content"); + if (StringUtils.isNotBlank(allContent)) { + query.setAllContent(Boolean.parseBoolean(allContent)); + } + String max = request.getParameter("max"); + if (StringUtils.isNotBlank(max)) { + try { + query.setMax(Long.parseLong(max)); + } catch (NumberFormatException e) { + // Ignore invalid max and keep default null value. + } + } + String follow = request.getParameter("follow"); + query.setFollow(follow); + Map searchItems = getSearchMap(request.getParameter("search")); + if (!searchItems.isEmpty()) { + try { + query.setMax(Long.parseLong(searchItems.get("page"))); + } catch (NumberFormatException e) { + // Ignore invalid page and keep default null value. + } + query.setSearch(searchItems); + } + + return query; + } + + // Parse search clause. Only keep items which use simple '=' operator, and ignore others. For example: + // name=myvm and status=up --> {name=myvm, status=up} + // name=myvm and status!=down --> {name=myvm} (ignore status!=down because it uses '!=' operator) + @NotNull + private static Map getSearchMap(String searchClause) { + Map searchItems = new LinkedHashMap<>(); + if (StringUtils.isBlank(searchClause)) { + return searchItems; + } + String[] terms = searchClause.trim().split("(?i)\\s+and\\s+"); + for (String term : terms) { + if (term == null) { + continue; + } + String trimmedTerm = term.trim(); + if (trimmedTerm.isEmpty()) { + continue; + } + + int eqIdx = trimmedTerm.indexOf('='); + if (eqIdx <= 0 || eqIdx != trimmedTerm.lastIndexOf('=')) { + continue; + } + char prev = trimmedTerm.charAt(eqIdx - 1); + if (prev == '!' || prev == '<' || prev == '>') { + continue; + } + + String key = trimmedTerm.substring(0, eqIdx).trim(); + String value = trimmedTerm.substring(eqIdx + 1).trim(); + if (!key.isEmpty() && !value.isEmpty()) { + searchItems.put(key, value); + } + } + return searchItems; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/filter/AllowedClientCidrsFilter.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/filter/AllowedClientCidrsFilter.java new file mode 100644 index 000000000000..9c3c199704e7 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/filter/AllowedClientCidrsFilter.java @@ -0,0 +1,100 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.filter; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.List; + +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.cloudstack.veeam.VeeamControlService; +import org.apache.commons.collections.CollectionUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import com.cloud.utils.net.NetUtils; + +public class AllowedClientCidrsFilter implements Filter { + + private static final Logger LOGGER = LogManager.getLogger(AllowedClientCidrsFilter.class); + + private final VeeamControlService veeamControlService; + + public AllowedClientCidrsFilter(VeeamControlService veeamControlService) { + this.veeamControlService = veeamControlService; + } + + @Override + public void init(FilterConfig filterConfig) { + // no-op + } + + @Override + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { + if (!(request instanceof HttpServletRequest) || !(response instanceof HttpServletResponse)) { + chain.doFilter(request, response); + return; + } + + final HttpServletRequest req = (HttpServletRequest) request; + final HttpServletResponse resp = (HttpServletResponse) response; + + if (veeamControlService == null) { + LOGGER.warn("Failed to inject VeeamControlService, allowing request by default"); + chain.doFilter(request, response); + return; + } + + final List cidrList = veeamControlService.getAllowedClientCidrs(); + if (CollectionUtils.isEmpty(cidrList)) { + chain.doFilter(request, response); + return; + } + + final String remoteAddr = req.getRemoteAddr(); + try { + final InetAddress clientIp = InetAddress.getByName(remoteAddr); + final boolean allowed = NetUtils.isIpInCidrList(clientIp, cidrList.toArray(new String[0])); + if (!allowed) { + LOGGER.warn("Rejected request from client IP {} not in allowed CIDRs {}", remoteAddr, cidrList); + resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Forbidden"); + return; + } + } catch (Exception e) { + LOGGER.warn("Rejected request failed to parse client IP {}: {}", remoteAddr, e.getMessage()); + resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Forbidden"); + return; + } + + chain.doFilter(request, response); + } + + @Override + public void destroy() { + // no-op + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/filter/BasicAuthFilter.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/filter/BasicAuthFilter.java new file mode 100644 index 000000000000..22f76b8058e1 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/filter/BasicAuthFilter.java @@ -0,0 +1,110 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.filter; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Base64; + +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.cloudstack.veeam.VeeamControlService; +import org.apache.cloudstack.veeam.VeeamControlServlet; + +public class BasicAuthFilter implements Filter { + + @Override + public void init(FilterConfig filterConfig) throws ServletException { + // no-op + } + + @Override + public void destroy() { + // no-op + } + + @Override + public void doFilter( + ServletRequest request, + ServletResponse response, + FilterChain chain + ) throws IOException, ServletException { + + HttpServletRequest req = (HttpServletRequest) request; + HttpServletResponse resp = (HttpServletResponse) response; + + String expectedUser = VeeamControlService.Username.value(); + String expectedPass = VeeamControlService.Password.value(); + + String auth = req.getHeader("Authorization"); + if (auth == null || !auth.regionMatches(true, 0, "Basic ", 0, 6)) { + unauthorized(resp); + return; + } + + String decoded; + try { + decoded = new String( + Base64.getDecoder().decode(auth.substring(6)), + StandardCharsets.UTF_8 + ); + } catch (IllegalArgumentException e) { + unauthorized(resp); + return; + } + + int idx = decoded.indexOf(':'); + if (idx <= 0) { + unauthorized(resp); + return; + } + + String user = decoded.substring(0, idx); + String pass = decoded.substring(idx + 1); + + if (!constantTimeEquals(user, expectedUser) + || !constantTimeEquals(pass, expectedPass)) { + unauthorized(resp); + return; + } + + chain.doFilter(request, response); + } + + private void unauthorized(HttpServletResponse resp) { + throw VeeamControlServlet.Error.unauthorized("Unauthorized"); + } + + private boolean constantTimeEquals(String a, String b) { + byte[] x = a.getBytes(StandardCharsets.UTF_8); + byte[] y = b.getBytes(StandardCharsets.UTF_8); + if (x.length != y.length) return false; + int r = 0; + for (int i = 0; i < x.length; i++) { + r |= x[i] ^ y[i]; + } + return r == 0; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/filter/BearerOrBasicAuthFilter.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/filter/BearerOrBasicAuthFilter.java new file mode 100644 index 000000000000..e86bd6a2a3ef --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/filter/BearerOrBasicAuthFilter.java @@ -0,0 +1,220 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.filter; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.util.Base64; +import java.util.Map; + +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.cloudstack.veeam.VeeamControlService; +import org.apache.cloudstack.veeam.sso.SsoService; +import org.apache.cloudstack.veeam.utils.DataUtil; +import org.apache.cloudstack.veeam.utils.JwtUtil; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; + +public class BearerOrBasicAuthFilter implements Filter { + private static final ObjectMapper JSON_MAPPER = new ObjectMapper(); + + private final VeeamControlService veeamControlService; + + public BearerOrBasicAuthFilter(VeeamControlService veeamControlService) { + this.veeamControlService = veeamControlService; + } + + @Override + public void init(FilterConfig filterConfig) { + } + + @Override + public void destroy() { + } + + @Override + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { + + final HttpServletRequest req = (HttpServletRequest) request; + final HttpServletResponse resp = (HttpServletResponse) response; + + final String auth = req.getHeader("Authorization"); + if (auth != null && auth.regionMatches(true, 0, "Bearer ", 0, 7)) { + final String token = auth.substring(7).trim(); + if (token.isEmpty()) { + unauthorized(req, resp, "invalid_token", "Missing Bearer token"); + return; + } + if (!verifyJwtHs256(token)) { + unauthorized(req, resp, "invalid_token", "Invalid or expired token"); + return; + } + chain.doFilter(request, response); + return; + } + + // Optional fallback: Basic (handy for manual testing). + if (auth != null && auth.regionMatches(true, 0, "Basic ", 0, 6)) { + if (!verifyBasic(auth.substring(6))) { + unauthorized(req, resp, "invalid_client", "Invalid Basic credentials"); + return; + } + chain.doFilter(request, response); + return; + } + + unauthorized(req, resp, "invalid_token", "Missing Authorization"); + } + + private boolean verifyBasic(String b64) { + final String decoded; + try { + decoded = new String(Base64.getDecoder().decode(b64), StandardCharsets.UTF_8); + } catch (IllegalArgumentException e) { + return false; + } + + final int idx = decoded.indexOf(':'); + if (idx <= 0) return false; + + final String user = decoded.substring(0, idx); + final String pass = decoded.substring(idx + 1); + + return veeamControlService != null && veeamControlService.validateCredentials(user, pass); + } + + /** + * Minimal JWT verification: + * - HS256 signature + * - "iss" matches + * - "exp" not expired + * - "scope" contains REQUIRED_SCOPES (space-separated) + */ + private boolean verifyJwtHs256(String token) { + final String[] parts = token.split("\\."); + if (parts.length != 3) return false; + + final String headerB64 = parts[0]; + final String payloadB64 = parts[1]; + final String sigB64 = parts[2]; + + final byte[] expectedSig; + try { + expectedSig = JwtUtil.hmacSha256((headerB64 + "." + payloadB64).getBytes(StandardCharsets.UTF_8), + SsoService.HMAC_SECRET.getBytes(StandardCharsets.UTF_8)); + } catch (Exception e) { + return false; + } + + final byte[] providedSig; + try { + providedSig = Base64.getUrlDecoder().decode(sigB64); + } catch (IllegalArgumentException e) { + return false; + } + + if (!DataUtil.constantTimeEquals(expectedSig, providedSig)) return false; + + Map payloadMap; + try { + String payloadJson = new String(Base64.getUrlDecoder().decode(payloadB64), StandardCharsets.UTF_8); + payloadMap = JSON_MAPPER.readValue( + payloadJson, + new TypeReference<>() { + } + ); + } catch (IllegalArgumentException | JsonProcessingException e) { + return false; + } + + final String iss = (String) payloadMap.get("iss"); + final String scope = (String) payloadMap.get("scope"); + final Object expObj = payloadMap.get("exp"); + Long exp = null; + if (expObj instanceof Number) { + exp = ((Number) expObj).longValue(); + } else if (expObj instanceof String) { + try { + exp = Long.parseLong((String) expObj); + } catch (NumberFormatException ignored) { + } + } + + if (!JwtUtil.ISSUER.equals(iss)) { + return false; + } + if (exp == null || Instant.now().getEpochSecond() >= exp) { + return false; + } + return scope != null && hasRequiredScopes(scope); + } + + private static boolean hasRequiredScopes(String scope) { + String[] scopes = scope.split("\\s+"); + for (String required : SsoService.REQUIRED_SCOPES) { + if (!hasScope(scopes, required)) return false; + } + return true; + } + + private static boolean hasScope(String[] scopes, String required) { + for (String scope : scopes) { + if (scope.equals(required)) { + return true; + } + } + return false; + } + + private static void unauthorized(HttpServletRequest req, HttpServletResponse resp, + String error, String desc) throws IOException { + resp.resetBuffer(); + resp.setStatus(HttpServletResponse.SC_UNAUTHORIZED); + + // Helpful for OAuth clients: + resp.setHeader("WWW-Authenticate", + "Bearer realm=\"Veeam Integration\", error=\"" + DataUtil.jsonEscape(error) + + "\", error_description=\"" + DataUtil.jsonEscape(desc) + "\""); + + final String accept = req.getHeader("Accept"); + final boolean wantsJson = accept != null && accept.toLowerCase().contains("application/json"); + + resp.setCharacterEncoding("UTF-8"); + if (wantsJson) { + resp.setContentType("application/json; charset=UTF-8"); + resp.getWriter().write("{\"error\":\"" + DataUtil.jsonEscape(error) + + "\",\"error_description\":\"" + DataUtil.jsonEscape(desc) + "\"}"); + } else { + resp.setContentType("text/html; charset=UTF-8"); + resp.getWriter().write("ErrorUnauthorized"); + } + resp.getWriter().flush(); + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/services/PkiResourceRouteHandler.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/services/PkiResourceRouteHandler.java new file mode 100644 index 000000000000..e3373d5edf58 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/services/PkiResourceRouteHandler.java @@ -0,0 +1,106 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.services; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.cloudstack.ca.CAManager; +import org.apache.cloudstack.veeam.RouteHandler; +import org.apache.cloudstack.veeam.VeeamControlServlet; +import org.apache.cloudstack.veeam.utils.Negotiation; +import org.apache.commons.lang3.StringUtils; + +import com.cloud.utils.component.ManagerBase; + +public class PkiResourceRouteHandler extends ManagerBase implements RouteHandler { + private static final String BASE_ROUTE = "/services/pki-resource"; + private static final String RESOURCE_KEY = "resource"; + private static final String RESOURCE_VALUE = "ca-certificate"; + private static final String FORMAT_KEY = "format"; + private static final String FORMAT_VALUE = "X509-PEM-CA"; + private static final Charset OUTPUT_CHARSET = StandardCharsets.ISO_8859_1; + + @Inject + CAManager caManager; + + @Override + public boolean canHandle(String method, String path) { + return getSanitizedPath(path).startsWith(BASE_ROUTE); + } + + @Override + public void handle(HttpServletRequest req, HttpServletResponse resp, String path, Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + final String sanitizedPath = getSanitizedPath(path); + if (sanitizedPath.equals(BASE_ROUTE) && "GET".equalsIgnoreCase(req.getMethod())) { + handleGet(req, resp, outFormat, io); + return; + } + + io.notFound(resp, null, outFormat); + } + + protected void handleGet(HttpServletRequest req, HttpServletResponse resp, + Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + try { + final String resource = req.getParameter(RESOURCE_KEY); + final String format = req.getParameter(FORMAT_KEY); + + if (StringUtils.isNotBlank(resource) && !RESOURCE_VALUE.equals(resource)) { + resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Unsupported resource"); + return; + } + + if (StringUtils.isNotBlank(format) && !FORMAT_VALUE.equals(format)) { + resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Unsupported format"); + return; + } + + byte[] pemBytes = returnCACertificate(); + if (pemBytes.length == 0) { + resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "No certificate data available"); + return; + } + resp.setStatus(HttpServletResponse.SC_OK); + resp.setHeader("Cache-Control", "no-store"); + resp.setContentType("application/x-x509-ca-cert; charset=" + OUTPUT_CHARSET.name()); + resp.setHeader("Content-Disposition", + "attachment; filename=\"pki-resource.cer\""); + resp.setContentLength(pemBytes.length); + + try (OutputStream os = resp.getOutputStream()) { + os.write(pemBytes); + } + } catch (IOException e) { + String msg = "Failed to retrieve server CA certificate"; + logger.error(msg, e); + resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, msg); + } + } + + private byte[] returnCACertificate() throws IOException { + String tlsCaCert = caManager.getCaCertificate(null); + return tlsCaCert.getBytes(OUTPUT_CHARSET); + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/sso/SsoService.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/sso/SsoService.java new file mode 100644 index 000000000000..3f1735952012 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/sso/SsoService.java @@ -0,0 +1,137 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.sso; + +import java.io.IOException; +import java.time.Instant; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.cloudstack.veeam.RouteHandler; +import org.apache.cloudstack.veeam.VeeamControlService; +import org.apache.cloudstack.veeam.VeeamControlServlet; +import org.apache.cloudstack.veeam.utils.JwtUtil; +import org.apache.cloudstack.veeam.utils.Negotiation; +import org.apache.commons.lang3.StringUtils; + +import com.cloud.utils.component.ManagerBase; + +public class SsoService extends ManagerBase implements RouteHandler { + private static final String BASE_ROUTE = "/sso"; + private static final long DEFAULT_TTL_SECONDS = 3600; + public static final List REQUIRED_SCOPES = List.of("ovirt-app-admin", "ovirt-app-portal"); + public static final String HMAC_SECRET = "change-this-super-secret-key-change-this"; + + @Inject + VeeamControlService veeamControlService; + + @Override + public boolean canHandle(String method, String path) { + return getSanitizedPath(path).startsWith(BASE_ROUTE); + } + + @Override + public void handle(HttpServletRequest req, HttpServletResponse resp, String path, Negotiation.OutFormat outFormat, + VeeamControlServlet io) throws IOException { + final String sanitizedPath = getSanitizedPath(path); + if (sanitizedPath.equals(BASE_ROUTE + "/oauth/token")) { + handleToken(req, resp, outFormat, io); + return; + } + + io.notFound(resp, null, outFormat); + } + + protected void handleToken(HttpServletRequest req, HttpServletResponse resp, + Negotiation.OutFormat outFormat, VeeamControlServlet io) throws IOException { + + if (!"POST".equalsIgnoreCase(req.getMethod())) { + resp.setHeader("Allow", "POST"); + io.getWriter().write(resp, HttpServletResponse.SC_METHOD_NOT_ALLOWED, + Map.of("error", "method_not_allowed", + "message", "token endpoint requires POST"), outFormat); + return; + } + + final String grantType = trimToNull(req.getParameter("grant_type")); + final String scope = trimToNull(req.getParameter("scope")); + final String username = trimToNull(req.getParameter("username")); + final String password = trimToNull(req.getParameter("password")); + + if (grantType == null) { + io.getWriter().write(resp, HttpServletResponse.SC_BAD_REQUEST, + Map.of("error", "invalid_request", + "error_description", "Missing parameter: grant_type"), outFormat); + return; + } + if (!"password".equals(grantType)) { + io.getWriter().write(resp, HttpServletResponse.SC_BAD_REQUEST, + Map.of("error", "unsupported_grant_type", + "error_description", "Only grant_type=password is supported"), outFormat); + return; + } + if (username == null || password == null) { + io.getWriter().write(resp, HttpServletResponse.SC_BAD_REQUEST, + Map.of("error", "invalid_request", + "error_description", "Missing username/password"), outFormat); + return; + } + + if (!veeamControlService.validateCredentials(username, password)) { + io.getWriter().write(resp, HttpServletResponse.SC_UNAUTHORIZED, + Map.of("error", "invalid_grant", + "error_description", "Invalid credentials"), outFormat); + return; + } + + final String effectiveScope = (scope == null) ? StringUtils.join(REQUIRED_SCOPES, " ") : scope; + + final long ttl = DEFAULT_TTL_SECONDS; + long nowMillis = Instant.now().toEpochMilli(); + long expMillis = nowMillis + ttl * 1000L; + final String token; + try { + token = JwtUtil.issueHs256Jwt(username, effectiveScope, ttl, HMAC_SECRET); + } catch (Exception e) { + io.getWriter().write(resp, HttpServletResponse.SC_INTERNAL_SERVER_ERROR, + Map.of("error", "server_error", + "error_description", "Failed to issue token"), outFormat); + return; + } + + final Map payload = new HashMap<>(); + payload.put("access_token", token); + payload.put("token_type", "bearer"); + payload.put("expires_in", ttl); + payload.put("exp", expMillis); + payload.put("scope", effectiveScope); + + io.getWriter().write(resp, HttpServletResponse.SC_OK, payload, outFormat); + } + + private static String trimToNull(String s) { + if (s == null) return null; + s = s.trim(); + return s.isEmpty() ? null : s; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/utils/DataUtil.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/utils/DataUtil.java new file mode 100644 index 000000000000..9e0eef768d03 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/utils/DataUtil.java @@ -0,0 +1,44 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.utils; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; + +public class DataUtil { + + public static String b64Url(byte[] in) { + return Base64.getUrlEncoder().withoutPadding().encodeToString(in); + } + + public static String jsonEscape(String s) { + return s == null ? "" : s.replace("\\", "\\\\").replace("\"", "\\\""); + } + + public static boolean constantTimeEquals(String a, String b) { + if (a == null || b == null) return false; + return constantTimeEquals(a.getBytes(StandardCharsets.UTF_8), b.getBytes(StandardCharsets.UTF_8)); + } + + public static boolean constantTimeEquals(byte[] x, byte[] y) { + if (x.length != y.length) return false; + int r = 0; + for (int i = 0; i < x.length; i++) r |= x[i] ^ y[i]; + return r == 0; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/utils/JwtUtil.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/utils/JwtUtil.java new file mode 100644 index 000000000000..a862c706b694 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/utils/JwtUtil.java @@ -0,0 +1,57 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.utils; + +import java.nio.charset.StandardCharsets; +import java.time.Instant; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; + +public class JwtUtil { + public static final String ALGORITHM = "HmacSHA256"; + public static final String ISSUER = "veeam-control"; + + public static String issueHs256Jwt(String subject, String scope, long ttlSeconds, String secret) throws Exception { + long now = Instant.now().getEpochSecond(); + long exp = now + ttlSeconds; + + String headerJson = "{\"alg\":\"HS256\",\"typ\":\"JWT\"}"; + String payloadJson = + "{" + + "\"iss\":\"" + DataUtil.jsonEscape(ISSUER) + "\"," + + "\"sub\":\"" + DataUtil.jsonEscape(subject) + "\"," + + "\"scope\":\"" + DataUtil.jsonEscape(scope) + "\"," + + "\"iat\":" + now + "," + + "\"exp\":" + exp + + "}"; + + String header = DataUtil.b64Url(headerJson.getBytes(StandardCharsets.UTF_8)); + String payload = DataUtil.b64Url(payloadJson.getBytes(StandardCharsets.UTF_8)); + String signingInput = header + "." + payload; + + byte[] sig = hmacSha256(signingInput.getBytes(StandardCharsets.UTF_8), secret.getBytes(StandardCharsets.UTF_8)); + return signingInput + "." + DataUtil.b64Url(sig); + } + + public static byte[] hmacSha256(byte[] data, byte[] key) throws Exception { + final Mac mac = Mac.getInstance(ALGORITHM); + mac.init(new SecretKeySpec(key, ALGORITHM)); + return mac.doFinal(data); + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/utils/Mapper.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/utils/Mapper.java new file mode 100644 index 000000000000..933e57b202a3 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/utils/Mapper.java @@ -0,0 +1,63 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.utils; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.PropertyNamingStrategies; +import com.fasterxml.jackson.dataformat.xml.XmlMapper; + +public class Mapper { + private final ObjectMapper json; + private final XmlMapper xml; + + public Mapper() { + this.json = new ObjectMapper(); + this.xml = new XmlMapper(); + + configure(json); + configure(xml); + } + + private static void configure(final ObjectMapper mapper) { + mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL); + mapper.setPropertyNamingStrategy(PropertyNamingStrategies.SNAKE_CASE); + mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + // If you ever add enums etc: + // mapper.enable(SerializationFeature.WRITE_ENUMS_USING_TO_STRING); + // mapper.enable(DeserializationFeature.READ_ENUMS_USING_TO_STRING); + } + + public String toJson(final Object value) throws JsonProcessingException { + return json.writeValueAsString(value); + } + + public String toXml(final Object value) throws JsonProcessingException { + return xml.writeValueAsString(value); + } + + public ObjectMapper jsonMapper() { + return json; + } + + public XmlMapper xmlMapper() { + return xml; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/utils/Negotiation.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/utils/Negotiation.java new file mode 100644 index 000000000000..1c82216f113b --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/utils/Negotiation.java @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.utils; + + +import javax.servlet.http.HttpServletRequest; + +public final class Negotiation { + + public enum OutFormat { XML, JSON } + + public static OutFormat responseFormat(HttpServletRequest req) { + String accept = req.getHeader("Accept"); + if (accept == null || accept.isBlank() || accept.contains("*/*")) { + return OutFormat.XML; + } + accept = accept.toLowerCase(); + if (accept.contains("application/json")) return OutFormat.JSON; + if (accept.contains("application/xml") || accept.contains("text/xml")) { + return OutFormat.XML; + } + return OutFormat.XML; + } + + public static String contentType(OutFormat fmt) { + return fmt == OutFormat.JSON + ? "application/json" + : "application/xml"; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/utils/PathUtil.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/utils/PathUtil.java new file mode 100644 index 000000000000..8fe2a48c7029 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/utils/PathUtil.java @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.utils; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.lang3.StringUtils; + +import com.cloud.utils.UuidUtils; + +public class PathUtil { + private static final boolean CONSIDER_ONLY_UUID_AS_ID = false; + + public static List extractIdAndSubPath(final String path, final String baseRoute) { + + if (StringUtils.isBlank(path)) { + return null; + } + + // Remove base route (be tolerant of trailing slash in baseRoute) + String rest = path; + if (StringUtils.isNotBlank(baseRoute)) { + String normalizedBase = baseRoute.endsWith("/") && baseRoute.length() > 1 + ? baseRoute.substring(0, baseRoute.length() - 1) + : baseRoute; + if (rest.startsWith(normalizedBase)) { + rest = rest.substring(normalizedBase.length()); + } + } + + // Expect "/{id}" or "/{id}/..." (no empty segments) + if (StringUtils.isBlank(rest) || !rest.startsWith("/")) { + return null; // /api/datacenters (no id) or invalid format + } + + rest = rest.substring(1); // remove leading '/' + + if (StringUtils.isBlank(rest)) { + return null; + } + + final String[] parts = rest.split("/", -1); + + // Collect non-blank segments + List validParts = new ArrayList<>(); + for (String part : parts) { + if (StringUtils.isNotBlank(part)) { + validParts.add(part); + } + } + + // Validate first segment is a UUID + if (validParts.isEmpty() || (CONSIDER_ONLY_UUID_AS_ID && !UuidUtils.isUuid(validParts.get(0)))) { + return null; + } + + return validParts; + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/utils/ResponseWriter.java b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/utils/ResponseWriter.java new file mode 100644 index 000000000000..51d2f829f3db --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/java/org/apache/cloudstack/veeam/utils/ResponseWriter.java @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.utils; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +import javax.servlet.http.HttpServletResponse; + +import org.apache.cloudstack.veeam.api.dto.Fault; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +public final class ResponseWriter { + private static final Logger LOGGER = LogManager.getLogger(ResponseWriter.class); + + private final Mapper mapper; + + public ResponseWriter(final Mapper mapper) { + this.mapper = mapper; + } + + public void write(final HttpServletResponse resp, final int status, final Object body, final Negotiation.OutFormat fmt) + throws IOException { + + resp.setStatus(status); + + if (body == null) { + resp.setContentLength(0); + return; + } + + final String payload; + final String contentType; + + try { + if (fmt == Negotiation.OutFormat.XML) { + contentType = "application/xml"; + payload = mapper.toXml(body); + } else { + contentType = "application/json"; + payload = mapper.toJson(body); + } + } catch (Exception e) { + // Last-resort fallback + resp.setStatus(500); + resp.setHeader("Content-Type", "text/plain"); + resp.getWriter().write("Internal Server Error"); + return; + } + + LOGGER.info("Writing response: {}\n{}", status, payload); + + resp.setCharacterEncoding(StandardCharsets.UTF_8.name()); + resp.setHeader("Content-Type", contentType); + resp.getWriter().write(payload); + } + + public void writeFault(final HttpServletResponse resp, final int status, final String reason, final String detail, final Negotiation.OutFormat fmt) + throws IOException { + Fault fault = new Fault(reason, detail); + if (fmt == Negotiation.OutFormat.XML) { + write(resp, status, fault, fmt); + } else { + write(resp, status, fault, fmt); + } + } +} diff --git a/plugins/integrations/veeam-control-service/src/main/resources/META-INF/cloudstack/veeam-control-service/module.properties b/plugins/integrations/veeam-control-service/src/main/resources/META-INF/cloudstack/veeam-control-service/module.properties new file mode 100644 index 000000000000..453e40dee69d --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/resources/META-INF/cloudstack/veeam-control-service/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=veeam-control-service +parent=backup diff --git a/plugins/integrations/veeam-control-service/src/main/resources/META-INF/cloudstack/veeam-control-service/spring-veeam-control-service-context.xml b/plugins/integrations/veeam-control-service/src/main/resources/META-INF/cloudstack/veeam-control-service/spring-veeam-control-service-context.xml new file mode 100644 index 000000000000..83d100ec76e8 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/resources/META-INF/cloudstack/veeam-control-service/spring-veeam-control-service-context.xml @@ -0,0 +1,66 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/plugins/integrations/veeam-control-service/src/main/resources/test-ovf.xml b/plugins/integrations/veeam-control-service/src/main/resources/test-ovf.xml new file mode 100644 index 000000000000..53688f0b82ef --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/main/resources/test-ovf.xml @@ -0,0 +1,198 @@ + + + + + + + + List of networks + + + + +
+ List of Virtual Disks + +
+
+ CloudStack specific metadata + + 644c6f0d-f6f9-11f0-9061-5254002b5a70 + 425cf134-f6f9-11f0-9061-5254002b5a70 + + 731da585-5259-46f3-bf2d-a71f62178acf + + + 5b08702c-3e4b-45fc-ba1c-425c54e69498 + 9468baee-f467-4806-9520-d313d7362694 + + + +
+ + adm-v10 + adm-v10 + + 2026/02/26 05:36:58 + 2026/03/11 07:25:03 + false + guest_agent + false + 1 + Etc/GMT + 0 + 11 + 4.8 + 1 + AUTO_RESUME + 512 + false + false + false + 0 + 644c6f0d-f6f9-11f0-9061-5254002b5a70 + 0 + false + true + true + false + LOCK_SCREEN + 0 + + 2 + + + + 512 + true + false + false + false + 0 + + e1a8db34-6eb4-41e0-97b8-898420437df8 + e1a8db34-6eb4-41e0-97b8-898420437df8 + true + 3 + 00000000-0000-0000-0000-000000000000 + 2 + false + e1a8db34-6eb4-41e0-97b8-898420437df8 + e1a8db34-6eb4-41e0-97b8-898420437df8 + false + 2026/03/10 05:05:50 + 2026/02/26 05:36:58 + 0 +
+ Guest Operating System + linux +
+
+ 1 CPU, 512 Memory + + ENGINE 4.4.0.0 + + + 1 virtual cpu + Number of virtual CPU + 1 + 3 + 1 + 1 + 1 + 1 + 1 + + + 512 MB of memory + Memory Size + 2 + 4 + MegaBytes + 512 + + + ROOT-139 + 5b08702c-3e4b-45fc-ba1c-425c54e69498 + 17 + 22e65515-04e6-374e-95e0-981dab9e7fe2/5b08702c-3e4b-45fc-ba1c-425c54e69498 + 00000000-0000-0000-0000-000000000000 + e1a8db34-6eb4-41e0-97b8-898420437df8 + + 22e65515-04e6-374e-95e0-981dab9e7fe2 + 00000000-0000-0000-0000-000000000000 + 2026/02/26 05:36:58 + 2026/03/11 07:25:03 + 2026/03/11 07:25:03 + disk + disk + {type=drive, bus=0, controller=0, target=0, unit=0} + 1 + true + false + ua-22e65515-04e6-374e-95e0-981dab9e7fe2/5b08702c-3e4b-45fc-ba1c-425c54e69498 + + + Ethernet adapter on [No Network] + 07e8e63c-13b5-4a01-9b41-6f97847d2534 + 10 + + 3 + Network-07e8e63c-13b5-4a01-9b41-6f97847d2534 + true + ExternalGuestNetworkGuru + ExternalGuestNetworkGuru + 02:01:00:dd:00:0c + 10000 + interface + bridge + {type=pci, slot=0x00, bus=0x01, domain=0x0000, function=0x0} + 0 + true + false + ua-07e8e63c-13b5-4a01-9b41-6f97847d2534 + + + USB Controller + 3 + 23 + DISABLED + + + 0 + a41e097e-329a-3be5-a9e8-9bc112fe5fac + rng + virtio + {type=pci, slot=0x00, bus=0x06, domain=0x0000, function=0x0} + 0 + true + false + + + urandom + + +
+
+
diff --git a/plugins/integrations/veeam-control-service/src/test/java/org/apache/cloudstack/veeam/VeeamControlServiceImplTest.java b/plugins/integrations/veeam-control-service/src/test/java/org/apache/cloudstack/veeam/VeeamControlServiceImplTest.java new file mode 100644 index 000000000000..4ae0808238b9 --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/test/java/org/apache/cloudstack/veeam/VeeamControlServiceImplTest.java @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.veeam; + +import org.apache.cloudstack.veeam.api.dto.ImageTransfer; +import org.apache.cloudstack.veeam.utils.Mapper; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +import com.fasterxml.jackson.core.JsonProcessingException; + +@RunWith(MockitoJUnitRunner.class) +public class VeeamControlServiceImplTest { + + @Test + public void test_parseImageTransfer() { + String data = "{\"active\":false,\"direction\":\"upload\",\"format\":\"cow\",\"inactivity_timeout\":3600,\"phase\":\"cancelled\",\"shallow\":false,\"transferred\":0,\"link\":[],\"disk\":{\"id\":\"dba4d72d-01de-4267-aa8e-305996b53599\"},\"image\":{},\"backup\":{\"creation_date\":0}}"; + Mapper mapper = new Mapper(); + try { + ImageTransfer request = mapper.jsonMapper().readValue(data, ImageTransfer.class); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + } +} diff --git a/plugins/integrations/veeam-control-service/src/test/java/org/apache/cloudstack/veeam/api/dto/OvfXmlUtilTest.java b/plugins/integrations/veeam-control-service/src/test/java/org/apache/cloudstack/veeam/api/dto/OvfXmlUtilTest.java new file mode 100644 index 000000000000..bf92cc4d57fb --- /dev/null +++ b/plugins/integrations/veeam-control-service/src/test/java/org/apache/cloudstack/veeam/api/dto/OvfXmlUtilTest.java @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.veeam.api.dto; + +import static org.junit.Assert.assertEquals; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class OvfXmlUtilTest { + + String configuration = "" + + "adm-v9adm-v9"+ + "
1 CPU, 512 MemoryENGINE 4.4.0.01 virtual cpuNumber of virtual CPU1311111" + + "512 MB of memoryMemory Size24MegaBytes512" + + "
"; + + @Test + public void updateFromXml_parsesDetails() { + Vm vm = new Vm(); + OvfXmlUtil.updateFromXml(vm, configuration); + + assertEquals(String.valueOf(512L), vm.getMemory()); + assertEquals("1", vm.getCpu().getTopology().getSockets()); + assertEquals("1", vm.getCpu().getTopology().getCores()); + assertEquals("1", vm.getCpu().getTopology().getThreads()); + } +} diff --git a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java index d9f4963165ec..4ec966362355 100644 --- a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java +++ b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java @@ -17,18 +17,22 @@ package org.apache.cloudstack.network.contrail.management; +import java.net.InetAddress; import java.util.List; import java.util.Map; -import java.net.InetAddress; import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.api.auth.SetupUserTwoFactorAuthenticationCmd; +import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.RolePermissionEntity; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.acl.SecurityChecker.AccessType; +import org.apache.cloudstack.acl.apikeypair.ApiKeyPair; import org.apache.cloudstack.acl.apikeypair.ApiKeyPairPermission; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.command.admin.account.CreateAccountCmd; +import org.apache.cloudstack.api.command.admin.account.UpdateAccountCmd; import org.apache.cloudstack.api.command.admin.user.DeleteUserCmd; import org.apache.cloudstack.api.command.admin.user.DeleteUserKeysCmd; import org.apache.cloudstack.api.command.admin.user.GetUserKeysCmd; @@ -37,20 +41,15 @@ import org.apache.cloudstack.api.command.admin.user.MoveUserCmd; import org.apache.cloudstack.api.command.admin.user.RegisterUserKeysCmd; import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd; +import org.apache.cloudstack.api.response.ApiKeyPairResponse; +import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.UserTwoFactorAuthenticationSetupResponse; import org.apache.cloudstack.auth.UserTwoFactorAuthenticator; import org.apache.cloudstack.backup.BackupOffering; -import org.apache.cloudstack.framework.config.ConfigKey; - -import org.apache.cloudstack.acl.apikeypair.ApiKeyPair; -import org.apache.cloudstack.acl.ControlledEntity; -import org.apache.cloudstack.acl.RoleType; -import org.apache.cloudstack.api.response.ApiKeyPairResponse; -import org.apache.cloudstack.api.response.ListResponse; -import org.apache.cloudstack.acl.SecurityChecker.AccessType; -import org.apache.cloudstack.api.command.admin.account.UpdateAccountCmd; import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.framework.config.ConfigKey; +import com.cloud.api.auth.SetupUserTwoFactorAuthenticationCmd; import com.cloud.api.query.vo.ControlledViewEntity; import com.cloud.configuration.ResourceLimit; import com.cloud.configuration.dao.ResourceCountDao; @@ -598,6 +597,10 @@ public void validateUserPasswordAndUpdateIfNeeded(String newPassword, UserVO use public void checkApiAccess(Account account, String command, String apiKey) throws PermissionDeniedException { } + @Override + public void checkApiAccess(Account caller, String command) throws PermissionDeniedException { + } + @Override public UserAccount clearUserTwoFactorAuthenticationInSetupStateOnLogin(UserAccount user) { return null; @@ -614,4 +617,14 @@ public void verifyCallerPrivilegeForUserOrAccountOperations(User user) { @Override public void checkCallerRoleTypeAllowedForUserOrAccountOperations(Account userAccount, User user) { } + + @Override + public Account getActiveAccountByUuid(String accountUuid) { + return null; + } + + @Override + public User getOneActiveUserForAccount(Account account) { + return null; + } } diff --git a/plugins/pom.xml b/plugins/pom.xml index e7d13871285e..b044beaa2c72 100755 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -96,6 +96,7 @@ integrations/cloudian integrations/prometheus integrations/kubernetes-service + integrations/veeam-control-service metrics diff --git a/scripts/vm/hypervisor/kvm/imageserver/__init__.py b/scripts/vm/hypervisor/kvm/imageserver/__init__.py new file mode 100644 index 000000000000..dc9505310395 --- /dev/null +++ b/scripts/vm/hypervisor/kvm/imageserver/__init__.py @@ -0,0 +1,37 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +CloudStack image server — HTTP server backed by NBD over Unix socket or a local file. + +Transfer configs are registered/unregistered by the cloudstack-agent via a +Unix domain control socket (default: /var/run/cloudstack/image-server.sock) +and stored in-memory for the lifetime of the server process. + +Supports two backends (configured per-transfer at registration time): +- nbd: proxy to an NBD server via Unix socket; supports range reads/writes + (GET/PUT/PATCH), extents, zero, flush. +- file: read/write a local qcow2/raw file; full PUT only, GET with optional + ranges, flush. + +Usage:: + + # As a module + python -m imageserver --listen 127.0.0.1 --port 54322 + + # Or via the systemd service started by createImageTransfer +""" diff --git a/scripts/vm/hypervisor/kvm/imageserver/__main__.py b/scripts/vm/hypervisor/kvm/imageserver/__main__.py new file mode 100644 index 000000000000..e64bd5f65205 --- /dev/null +++ b/scripts/vm/hypervisor/kvm/imageserver/__main__.py @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from .server import main + +main() diff --git a/scripts/vm/hypervisor/kvm/imageserver/backends/__init__.py b/scripts/vm/hypervisor/kvm/imageserver/backends/__init__.py new file mode 100644 index 000000000000..36080b4cbe73 --- /dev/null +++ b/scripts/vm/hypervisor/kvm/imageserver/backends/__init__.py @@ -0,0 +1,36 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import Any, Dict + +from .base import BackendSession, ImageBackend +from .file import FileBackend +from .nbd import NbdBackend + +__all__ = ["BackendSession", "ImageBackend", "FileBackend", "NbdBackend", "create_backend"] + + +def create_backend(cfg: Dict[str, Any]) -> ImageBackend: + """Factory: build the correct ImageBackend from a transfer config dict.""" + backend_type = cfg.get("backend", "nbd") + if backend_type == "file": + return FileBackend(cfg["file"]) + return NbdBackend( + cfg["socket"], + export=cfg.get("export"), + export_bitmap=cfg.get("export_bitmap"), + ) diff --git a/scripts/vm/hypervisor/kvm/imageserver/backends/base.py b/scripts/vm/hypervisor/kvm/imageserver/backends/base.py new file mode 100644 index 000000000000..b081640e2d38 --- /dev/null +++ b/scripts/vm/hypervisor/kvm/imageserver/backends/base.py @@ -0,0 +1,148 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from abc import ABC, abstractmethod +from typing import Any, Dict, List + + +class BackendSession(ABC): + """ + A session that holds an open connection/file handle for the duration of + an operation (e.g. a full GET streaming read). Use as a context manager. + """ + + @abstractmethod + def size(self) -> int: + """Return the image size in bytes.""" + ... + + @abstractmethod + def read(self, offset: int, length: int) -> bytes: + """ + Read *length* bytes starting at *offset*. + + For NBD backends, raises RuntimeError if the server returns empty data. + For file backends, returns empty bytes at EOF. + """ + ... + + @abstractmethod + def close(self) -> None: + """Release the underlying connection or file handle.""" + ... + + def __enter__(self) -> "BackendSession": + return self + + def __exit__(self, exc_type: Any, exc: Any, tb: Any) -> None: + self.close() + + +class ImageBackend(ABC): + """ + Abstract base class for image storage backends. + + Each backend (NBD, file, etc.) implements this interface so the HTTP handler + can operate uniformly without backend-specific branching. + """ + + @property + @abstractmethod + def supports_extents(self) -> bool: + """Whether this backend supports querying allocation/dirty extents.""" + ... + + @property + @abstractmethod + def supports_range_write(self) -> bool: + """Whether this backend supports writing at arbitrary byte offsets.""" + ... + + @abstractmethod + def size(self) -> int: + """Return the image size in bytes.""" + ... + + @abstractmethod + def read(self, offset: int, length: int) -> bytes: + """Read *length* bytes starting at *offset*.""" + ... + + @abstractmethod + def write(self, data: bytes, offset: int) -> None: + """Write *data* at *offset*.""" + ... + + @abstractmethod + def write_full(self, stream, content_length: int, flush: bool) -> int: + """ + Consume *content_length* bytes from *stream* and write the full image. + Returns bytes written. Raises on short read. + """ + ... + + @abstractmethod + def flush(self) -> None: + """Flush pending data to stable storage.""" + ... + + @abstractmethod + def zero(self, offset: int, length: int) -> None: + """Zero *length* bytes starting at *offset*.""" + ... + + @abstractmethod + def get_capabilities(self) -> Dict[str, bool]: + """ + Return backend capabilities dict with keys: + read_only, can_flush, can_zero. + """ + ... + + @abstractmethod + def get_allocation_extents(self) -> List[Dict[str, Any]]: + """ + Return allocation extents as [{"start": int, "length": int, "zero": bool}, ...]. + """ + ... + + @abstractmethod + def get_dirty_extents(self, dirty_bitmap_context: str) -> List[Dict[str, Any]]: + """ + Return merged dirty+zero extents as + [{"start": int, "length": int, "dirty": bool, "zero": bool}, ...]. + """ + ... + + @abstractmethod + def open_session(self) -> BackendSession: + """ + Open a session that holds a single connection/file handle for the + duration of a streaming operation (e.g. GET). + """ + ... + + @abstractmethod + def close(self) -> None: + """Release any resources held by this backend.""" + ... + + def __enter__(self) -> "ImageBackend": + return self + + def __exit__(self, exc_type: Any, exc: Any, tb: Any) -> None: + self.close() diff --git a/scripts/vm/hypervisor/kvm/imageserver/backends/file.py b/scripts/vm/hypervisor/kvm/imageserver/backends/file.py new file mode 100644 index 000000000000..9e55bf21fdef --- /dev/null +++ b/scripts/vm/hypervisor/kvm/imageserver/backends/file.py @@ -0,0 +1,123 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +from io import BufferedReader +from typing import Any, Dict, List, Optional + +from ..constants import CHUNK_SIZE +from .base import BackendSession, ImageBackend + + +class FileSession(BackendSession): + """ + Holds a single file handle open for the duration of a streaming read. + Returns empty bytes at EOF (file semantics). + """ + + def __init__(self, path: str): + self._path = path + self._fh: Optional[BufferedReader] = open(path, "rb") + self._size = os.path.getsize(path) + + def size(self) -> int: + return self._size + + def read(self, offset: int, length: int) -> bytes: + if self._fh is None: + raise RuntimeError("session is closed") + self._fh.seek(offset) + return self._fh.read(length) + + def close(self) -> None: + if self._fh is not None: + self._fh.close() + self._fh = None + + +class FileBackend(ImageBackend): + """ + ImageBackend implementation backed by a local file (qcow2 or raw). + Supports full read/write and flush. Does not support extents or range writes. + """ + + def __init__(self, file_path: str): + self._path = file_path + + @property + def supports_extents(self) -> bool: + return False + + @property + def supports_range_write(self) -> bool: + return False + + def size(self) -> int: + return os.path.getsize(self._path) + + def read(self, offset: int, length: int) -> bytes: + with open(self._path, "rb") as f: + f.seek(offset) + return f.read(length) + + def write(self, data: bytes, offset: int) -> None: + raise NotImplementedError("file backend does not support range writes") + + def write_full(self, stream: Any, content_length: int, flush: bool) -> int: + bytes_written = 0 + remaining = content_length + with open(self._path, "wb") as f: + while remaining > 0: + chunk = stream.read(min(CHUNK_SIZE, remaining)) + if not chunk: + raise IOError( + f"request body ended early at {bytes_written} bytes" + ) + f.write(chunk) + bytes_written += len(chunk) + remaining -= len(chunk) + if flush: + f.flush() + os.fsync(f.fileno()) + return bytes_written + + def flush(self) -> None: + with open(self._path, "rb") as f: + f.flush() + os.fsync(f.fileno()) + + def zero(self, offset: int, length: int) -> None: + raise NotImplementedError("file backend does not support zero") + + def get_capabilities(self) -> Dict[str, bool]: + return { + "read_only": False, + "can_flush": True, + "can_zero": False, + } + + def get_allocation_extents(self) -> List[Dict[str, Any]]: + raise NotImplementedError("file backend does not support extents") + + def get_dirty_extents(self, dirty_bitmap_context: str) -> List[Dict[str, Any]]: + raise NotImplementedError("file backend does not support extents") + + def open_session(self) -> FileSession: + return FileSession(self._path) + + def close(self) -> None: + pass diff --git a/scripts/vm/hypervisor/kvm/imageserver/backends/nbd.py b/scripts/vm/hypervisor/kvm/imageserver/backends/nbd.py new file mode 100644 index 000000000000..aa247be29f21 --- /dev/null +++ b/scripts/vm/hypervisor/kvm/imageserver/backends/nbd.py @@ -0,0 +1,482 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import logging +import socket +from typing import Any, Dict, List, Optional, Tuple + +import nbd + +from ..constants import ( + CHUNK_SIZE, + NBD_BLOCK_STATUS_CHUNK, + NBD_STATE_DIRTY, + NBD_STATE_HOLE, + NBD_STATE_ZERO, +) +from ..util import coalesce_allocation_extents, merge_dirty_zero_extents +from .base import BackendSession, ImageBackend + + +class NbdConnection: + """ + Low-level helper to connect to an NBD server over a Unix socket. + Opens a fresh handle per connection. + """ + + def __init__( + self, + socket_path: str, + export: Optional[str], + need_block_status: bool = False, + extra_meta_contexts: Optional[List[str]] = None, + ): + self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + self._sock.connect(socket_path) + self._nbd = nbd.NBD() + + if export and hasattr(self._nbd, "set_export_name"): + self._nbd.set_export_name(export) + + if need_block_status and hasattr(self._nbd, "add_meta_context"): + for ctx in ["base:allocation"] + (extra_meta_contexts or []): + try: + self._nbd.add_meta_context(ctx) + except Exception as e: + logging.warning("add_meta_context %r failed: %r", ctx, e) + + self._connect_existing_socket(self._sock) + + def _connect_existing_socket(self, sock: socket.socket) -> None: + last_err: Optional[BaseException] = None + if hasattr(self._nbd, "connect_socket"): + try: + self._nbd.connect_socket(sock) + return + except Exception as e: + last_err = e + try: + self._nbd.connect_socket(sock.fileno()) + return + except Exception as e2: + last_err = e2 + if hasattr(self._nbd, "connect_fd"): + try: + self._nbd.connect_fd(sock.fileno()) + return + except Exception as e: + last_err = e + raise RuntimeError( + "Unable to connect libnbd using existing socket/fd; " + f"binding missing connect_socket/connect_fd or call failed: {last_err!r}" + ) + + def size(self) -> int: + return int(self._nbd.get_size()) + + def get_capabilities(self) -> Dict[str, bool]: + """ + Query NBD export capabilities (read_only, can_flush, can_zero) from the + server handshake. Uses getattr for binding name variations. + """ + out: Dict[str, bool] = { + "read_only": True, + "can_flush": False, + "can_zero": False, + } + for name, keys in [ + ("read_only", ("is_read_only", "get_read_only")), + ("can_flush", ("can_flush", "get_can_flush")), + ("can_zero", ("can_zero", "get_can_zero")), + ]: + for attr in keys: + if hasattr(self._nbd, attr): + try: + val = getattr(self._nbd, attr)() + out[name] = bool(val) + except Exception: + pass + break + return out + + def pread(self, length: int, offset: int) -> bytes: + try: + return self._nbd.pread(length, offset) + except TypeError: + return self._nbd.pread(offset, length) + + def pwrite(self, buf: bytes, offset: int) -> None: + try: + self._nbd.pwrite(buf, offset) + except TypeError: + self._nbd.pwrite(offset, buf) + + def pzero(self, offset: int, size: int) -> None: + """ + Zero a byte range. Uses NBD WRITE_ZEROES when available, + otherwise falls back to writing zero bytes via pwrite. + """ + if size <= 0: + return + for fn_name in ("pwrite_zeros", "zero"): + if not hasattr(self._nbd, fn_name): + continue + fn = getattr(self._nbd, fn_name) + try: + fn(size, offset) + return + except TypeError: + try: + fn(offset, size) + return + except TypeError: + pass + remaining = size + pos = offset + zero_buf = b"\x00" * min(CHUNK_SIZE, size) + while remaining > 0: + chunk = min(len(zero_buf), remaining) + self.pwrite(zero_buf[:chunk], pos) + pos += chunk + remaining -= chunk + + def flush(self) -> None: + if hasattr(self._nbd, "flush"): + self._nbd.flush() + return + if hasattr(self._nbd, "fsync"): + self._nbd.fsync() + return + raise RuntimeError("libnbd binding has no flush/fsync method") + + def get_allocation_extents(self) -> List[Dict[str, Any]]: + """ + Query base:allocation and return all extents as + [{"start": ..., "length": ..., "zero": bool}, ...]. + """ + size = self.size() + if size == 0: + return [] + if not hasattr(self._nbd, "block_status") and not hasattr( + self._nbd, "block_status_64" + ): + return [{"start": 0, "length": size, "zero": False}] + if hasattr(self._nbd, "can_meta_context") and not self._nbd.can_meta_context( + "base:allocation" + ): + return [{"start": 0, "length": size, "zero": False}] + + allocation_extents: List[Dict[str, Any]] = [] + chunk = min(size, NBD_BLOCK_STATUS_CHUNK) + offset = 0 + + def extent_cb(*args: Any, **kwargs: Any) -> int: + if len(args) < 3: + return 0 + metacontext, off, entries = args[0], args[1], args[2] + if metacontext != "base:allocation" or entries is None: + return 0 + current = off + try: + flat = list(entries) + for i in range(0, len(flat), 2): + if i + 1 >= len(flat): + break + length = int(flat[i]) + flags = int(flat[i + 1]) + zero = (flags & (NBD_STATE_HOLE | NBD_STATE_ZERO)) != 0 + allocation_extents.append( + {"start": current, "length": length, "zero": zero} + ) + current += length + except (TypeError, ValueError, IndexError): + pass + return 0 + + block_status_fn = getattr( + self._nbd, "block_status_64", getattr(self._nbd, "block_status", None) + ) + if block_status_fn is None: + return [{"start": 0, "length": size, "zero": False}] + try: + while offset < size: + count = min(chunk, size - offset) + try: + block_status_fn(count, offset, extent_cb) + except TypeError: + block_status_fn(offset, count, extent_cb) + offset += count + except Exception as e: + logging.warning("get_allocation_extents block_status failed: %r", e) + return [{"start": 0, "length": size, "zero": False}] + if not allocation_extents: + return [{"start": 0, "length": size, "zero": False}] + return coalesce_allocation_extents(allocation_extents) + + def get_extents_dirty_and_zero( + self, dirty_bitmap_context: str + ) -> List[Dict[str, Any]]: + """ + Query block status for base:allocation and a dirty bitmap context, + merge boundaries, and return extents with dirty and zero flags. + """ + size = self.size() + if size == 0: + return [] + if not hasattr(self._nbd, "block_status") and not hasattr( + self._nbd, "block_status_64" + ): + return self._fallback_dirty_zero_extents(size) + if hasattr(self._nbd, "can_meta_context"): + if not self._nbd.can_meta_context("base:allocation"): + return self._fallback_dirty_zero_extents(size) + if not self._nbd.can_meta_context(dirty_bitmap_context): + logging.warning( + "dirty bitmap context %r not negotiated", dirty_bitmap_context + ) + return self._fallback_dirty_zero_extents(size) + + allocation_extents: List[Tuple[int, int, bool]] = [] + dirty_extents: List[Tuple[int, int, bool]] = [] + chunk = min(size, NBD_BLOCK_STATUS_CHUNK) + offset = 0 + + def extent_cb(*args: Any, **kwargs: Any) -> int: + if len(args) < 3: + return 0 + metacontext, off, entries = args[0], args[1], args[2] + if entries is None or not hasattr(entries, "__iter__"): + return 0 + current = off + try: + flat = list(entries) + for i in range(0, len(flat), 2): + if i + 1 >= len(flat): + break + length = int(flat[i]) + flags = int(flat[i + 1]) + if metacontext == "base:allocation": + zero = (flags & (NBD_STATE_HOLE | NBD_STATE_ZERO)) != 0 + allocation_extents.append((current, length, zero)) + elif metacontext == dirty_bitmap_context: + dirty = (flags & NBD_STATE_DIRTY) != 0 + dirty_extents.append((current, length, dirty)) + current += length + except (TypeError, ValueError, IndexError): + pass + return 0 + + block_status_fn = getattr( + self._nbd, "block_status_64", getattr(self._nbd, "block_status", None) + ) + if block_status_fn is None: + return self._fallback_dirty_zero_extents(size) + try: + while offset < size: + count = min(chunk, size - offset) + try: + block_status_fn(count, offset, extent_cb) + except TypeError: + block_status_fn(offset, count, extent_cb) + offset += count + except Exception as e: + logging.warning("get_extents_dirty_and_zero block_status failed: %r", e) + return self._fallback_dirty_zero_extents(size) + return merge_dirty_zero_extents(allocation_extents, dirty_extents, size) + + @staticmethod + def _fallback_dirty_zero_extents(size: int) -> List[Dict[str, Any]]: + return [{"start": 0, "length": size, "dirty": False, "zero": False}] + + def close(self) -> None: + try: + if hasattr(self._nbd, "shutdown"): + self._nbd.shutdown() + except Exception: + pass + try: + if hasattr(self._nbd, "close"): + self._nbd.close() + except Exception: + pass + try: + self._sock.close() + except Exception: + pass + + def __enter__(self) -> "NbdConnection": + return self + + def __exit__(self, exc_type: Any, exc: Any, tb: Any) -> None: + self.close() + + +class NbdSession(BackendSession): + """ + Holds a single NbdConnection open for the duration of a streaming operation. + Raises RuntimeError if pread returns empty data (NBD should never do this). + """ + + def __init__(self, conn: NbdConnection): + self._conn = conn + + def size(self) -> int: + return self._conn.size() + + def read(self, offset: int, length: int) -> bytes: + data = self._conn.pread(length, offset) + if not data: + raise RuntimeError("backend returned empty read") + return data + + def close(self) -> None: + self._conn.close() + + +class NbdBackend(ImageBackend): + """ + ImageBackend implementation that proxies to an NBD server via Unix socket. + Each public method opens a fresh NbdConnection (per the original design). + """ + + def __init__( + self, + socket_path: str, + export: Optional[str] = None, + export_bitmap: Optional[str] = None, + ): + self._socket_path = socket_path + self._export = export + self._export_bitmap = export_bitmap + + @property + def supports_extents(self) -> bool: + return True + + @property + def supports_range_write(self) -> bool: + return True + + @property + def export_bitmap(self) -> Optional[str]: + return self._export_bitmap + + def _connect( + self, + need_block_status: bool = False, + extra_meta_contexts: Optional[List[str]] = None, + ) -> NbdConnection: + return NbdConnection( + self._socket_path, + self._export, + need_block_status=need_block_status, + extra_meta_contexts=extra_meta_contexts, + ) + + def size(self) -> int: + with self._connect() as conn: + return conn.size() + + def read(self, offset: int, length: int) -> bytes: + with self._connect() as conn: + return conn.pread(length, offset) + + def write(self, data: bytes, offset: int) -> None: + with self._connect() as conn: + conn.pwrite(data, offset) + + def write_full(self, stream: Any, content_length: int, flush: bool) -> int: + bytes_written = 0 + with self._connect() as conn: + offset = 0 + remaining = content_length + while remaining > 0: + chunk = stream.read(min(CHUNK_SIZE, remaining)) + if not chunk: + raise IOError( + f"request body ended early at {offset} bytes" + ) + conn.pwrite(chunk, offset) + offset += len(chunk) + remaining -= len(chunk) + bytes_written += len(chunk) + if flush: + conn.flush() + return bytes_written + + def write_range(self, stream: Any, start_off: int, content_length: int) -> int: + """ + Write *content_length* bytes from *stream* to the image starting at *start_off*. + Returns bytes written. Raises ValueError if offset/length is out of bounds. + """ + bytes_written = 0 + with self._connect() as conn: + image_size = conn.size() + if start_off >= image_size: + raise ValueError(f"offset {start_off} >= image size {image_size}") + max_len = image_size - start_off + if content_length > max_len: + raise ValueError( + f"content_length {content_length} exceeds available space {max_len}" + ) + offset = start_off + remaining = content_length + while remaining > 0: + chunk = stream.read(min(CHUNK_SIZE, remaining)) + if not chunk: + raise IOError( + f"request body ended early at {bytes_written} bytes" + ) + conn.pwrite(chunk, offset) + n = len(chunk) + offset += n + remaining -= n + bytes_written += n + return bytes_written + + def flush(self) -> None: + with self._connect() as conn: + conn.flush() + + def zero(self, offset: int, length: int) -> None: + with self._connect() as conn: + image_size = conn.size() + if offset >= image_size: + raise ValueError("offset must be less than image size") + zero_size = min(length, image_size - offset) + conn.pzero(offset, zero_size) + + def get_capabilities(self) -> Dict[str, bool]: + with self._connect() as conn: + return conn.get_capabilities() + + def get_allocation_extents(self) -> List[Dict[str, Any]]: + with self._connect(need_block_status=True) as conn: + return conn.get_allocation_extents() + + def get_dirty_extents(self, dirty_bitmap_context: str) -> List[Dict[str, Any]]: + extra_contexts: List[str] = [dirty_bitmap_context] + with self._connect( + need_block_status=True, extra_meta_contexts=extra_contexts + ) as conn: + return conn.get_extents_dirty_and_zero(dirty_bitmap_context) + + def open_session(self) -> NbdSession: + return NbdSession(self._connect()) + + def close(self) -> None: + pass diff --git a/scripts/vm/hypervisor/kvm/imageserver/concurrency.py b/scripts/vm/hypervisor/kvm/imageserver/concurrency.py new file mode 100644 index 000000000000..7d91aea60131 --- /dev/null +++ b/scripts/vm/hypervisor/kvm/imageserver/concurrency.py @@ -0,0 +1,71 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import threading +from typing import Dict, NamedTuple + + +class _ImageState(NamedTuple): + read_sem: threading.Semaphore + write_sem: threading.Semaphore + lock: threading.Lock + + +class ConcurrencyManager: + """ + Manages per-image read/write semaphores and per-image mutual-exclusion locks. + + Each image_id gets its own independent pool of read slots (default MAX_PARALLEL_READS) + and write slots (default MAX_PARALLEL_WRITES), so concurrent transfers to different images + do not contend with each other. + + The per-image lock serialises operations that must not overlap on the + same image (e.g. flush while writing, extents while writing). + """ + + def __init__(self, max_reads: int, max_writes: int): + self._max_reads = max_reads + self._max_writes = max_writes + self._images: Dict[str, _ImageState] = {} + self._guard = threading.Lock() + + def _state_for(self, image_id: str) -> _ImageState: + with self._guard: + state = self._images.get(image_id) + if state is None: + state = _ImageState( + read_sem=threading.Semaphore(self._max_reads), + write_sem=threading.Semaphore(self._max_writes), + lock=threading.Lock(), + ) + self._images[image_id] = state + return state + + def acquire_read(self, image_id: str, blocking: bool = False) -> bool: + return self._state_for(image_id).read_sem.acquire(blocking=blocking) + + def release_read(self, image_id: str) -> None: + self._state_for(image_id).read_sem.release() + + def acquire_write(self, image_id: str, blocking: bool = False) -> bool: + return self._state_for(image_id).write_sem.acquire(blocking=blocking) + + def release_write(self, image_id: str) -> None: + self._state_for(image_id).write_sem.release() + + def get_image_lock(self, image_id: str) -> threading.Lock: + return self._state_for(image_id).lock diff --git a/scripts/vm/hypervisor/kvm/imageserver/config.py b/scripts/vm/hypervisor/kvm/imageserver/config.py new file mode 100644 index 000000000000..1c92fd129379 --- /dev/null +++ b/scripts/vm/hypervisor/kvm/imageserver/config.py @@ -0,0 +1,200 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import logging +import os +import threading +import time +from contextlib import contextmanager +from typing import Any, Dict, Iterator, List, Optional + +from .constants import DEFAULT_IDLE_TIMEOUT_SECONDS + + +def parse_idle_timeout_seconds(obj: dict) -> int: + """Seconds of idle time (no completed HTTP requests) before unregister.""" + v = obj.get("idle_timeout_seconds", DEFAULT_IDLE_TIMEOUT_SECONDS) + if not isinstance(v, int): + raise ValueError("idle_timeout_seconds must be an integer") + v = int(v) + if v < 1: + v = 86400 + return v + + +def validate_transfer_config(obj: dict) -> dict: + """ + Validate and normalize a transfer config dict received over the control + socket. Returns the cleaned config or raises ValueError. + """ + idle_sec = parse_idle_timeout_seconds(obj) + + backend = obj.get("backend") + if backend is None: + backend = "nbd" + if not isinstance(backend, str): + raise ValueError("invalid backend type") + backend = backend.lower() + if backend not in ("nbd", "file"): + raise ValueError(f"unsupported backend: {backend}") + + if backend == "file": + file_path = obj.get("file") + if not isinstance(file_path, str) or not file_path.strip(): + raise ValueError("missing/invalid file path for file backend") + return {"backend": "file", "file": file_path.strip(), "idle_timeout_seconds": idle_sec} + + socket_path = obj.get("socket") + export = obj.get("export") + export_bitmap = obj.get("export_bitmap") + if not isinstance(socket_path, str) or not socket_path.strip(): + raise ValueError("missing/invalid socket path for nbd backend") + if export is not None and (not isinstance(export, str) or not export): + raise ValueError("invalid export name") + return { + "backend": "nbd", + "socket": socket_path.strip(), + "export": export, + "export_bitmap": export_bitmap, + "idle_timeout_seconds": idle_sec, + } + + +def safe_transfer_id(image_id: str) -> Optional[str]: + """ + Only allow a single filename component to avoid path traversal. + Rejects anything containing '/' or '\\'. + """ + if not image_id: + return None + if image_id != os.path.basename(image_id): + return None + if "/" in image_id or "\\" in image_id: + return None + if image_id in (".", ".."): + return None + return image_id + + +class TransferRegistry: + """ + Thread-safe in-memory registry for active image transfer configurations. + + The cloudstack-agent registers/unregisters transfers via the Unix domain + control socket. The HTTP handler looks up configs through get(). + + Each transfer may specify idle_timeout_seconds (default DEFAULT_IDLE_TIMEOUT_SECONDS). + After no in-flight HTTP requests have completed for that idle period, the transfer + is removed (same effect as unregister). + """ + + def __init__(self) -> None: + self._lock = threading.Lock() + self._transfers: Dict[str, Dict[str, Any]] = {} + self._last_activity: Dict[str, float] = {} + self._inflight: Dict[str, int] = {} + + def register(self, transfer_id: str, config: Dict[str, Any]) -> bool: + safe_id = safe_transfer_id(transfer_id) + if safe_id is None: + logging.error("register rejected invalid transfer_id=%r", transfer_id) + return False + with self._lock: + self._transfers[safe_id] = config + self._last_activity[safe_id] = time.monotonic() + self._inflight.pop(safe_id, None) + logging.info("registered transfer_id=%s active=%d", safe_id, len(self._transfers)) + return True + + def unregister(self, transfer_id: str) -> int: + """Remove a transfer and return the number of remaining active transfers.""" + safe_id = safe_transfer_id(transfer_id) + if safe_id is None: + logging.error("unregister rejected invalid transfer_id=%r", transfer_id) + with self._lock: + return len(self._transfers) + with self._lock: + self._transfers.pop(safe_id, None) + self._last_activity.pop(safe_id, None) + self._inflight.pop(safe_id, None) + remaining = len(self._transfers) + logging.info("unregistered transfer_id=%s active=%d", safe_id, remaining) + return remaining + + def get(self, transfer_id: str) -> Optional[Dict[str, Any]]: + safe_id = safe_transfer_id(transfer_id) + if safe_id is None: + return None + with self._lock: + return self._transfers.get(safe_id) + + def active_count(self) -> int: + with self._lock: + return len(self._transfers) + + @contextmanager + def request_lifecycle(self, transfer_id: str) -> Iterator[None]: + """ + Track an HTTP request for idle-timeout purposes. + + Expiry is based on time since the last request *completed* (all in-flight + work for this transfer_id finished). Transfers with active requests are + never expired. + """ + safe_id = safe_transfer_id(transfer_id) + if safe_id is None: + yield + return + with self._lock: + if safe_id not in self._transfers: + yield + return + self._inflight[safe_id] = self._inflight.get(safe_id, 0) + 1 + try: + yield + finally: + now = time.monotonic() + with self._lock: + count = self._inflight.get(safe_id, 1) - 1 + if count <= 0: + self._inflight.pop(safe_id, None) + if safe_id in self._transfers: + self._last_activity[safe_id] = now + else: + self._inflight[safe_id] = count + + def sweep_expired_transfers(self) -> None: + """Remove transfers that exceeded idle_timeout_seconds with no in-flight HTTP work.""" + now = time.monotonic() + with self._lock: + expired: List[str] = [] + for tid, cfg in list(self._transfers.items()): + if self._inflight.get(tid, 0) > 0: + continue + timeout = int(cfg.get("idle_timeout_seconds", DEFAULT_IDLE_TIMEOUT_SECONDS)) + last = self._last_activity.get(tid, now) + if now - last >= timeout: + expired.append(tid) + for tid in expired: + self._transfers.pop(tid, None) + self._last_activity.pop(tid, None) + self._inflight.pop(tid, None) + logging.info( + "idle expiry: unregistered transfer_id=%s active=%d", + tid, + len(self._transfers), + ) diff --git a/scripts/vm/hypervisor/kvm/imageserver/constants.py b/scripts/vm/hypervisor/kvm/imageserver/constants.py new file mode 100644 index 000000000000..0b6465527f4b --- /dev/null +++ b/scripts/vm/hypervisor/kvm/imageserver/constants.py @@ -0,0 +1,49 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +CHUNK_SIZE = 256 * 1024 # 256 KiB + +# NBD base:allocation flags (hole=1, zero=2; hole|zero=3) +NBD_STATE_HOLE = 1 +NBD_STATE_ZERO = 2 +# NBD qemu:dirty-bitmap flags (dirty=1) +NBD_STATE_DIRTY = 1 + +MAX_PARALLEL_READS = 8 +MAX_PARALLEL_WRITES = 1 + +# HTTP server defaults +DEFAULT_LISTEN_ADDRESS = "127.0.0.1" +DEFAULT_HTTP_PORT = 54322 + +# Control socket +CONTROL_SOCKET = "/var/run/cloudstack/image-server.sock" +CONTROL_SOCKET_BACKLOG = 32 +CONTROL_SOCKET_PERMISSIONS = 0o660 +CONTROL_RECV_BUFFER = 4096 + +# Transfer idle timeout (seconds). A transfer is expired when no in-flight HTTP +# requests have completed for this duration. +DEFAULT_IDLE_TIMEOUT_SECONDS = 600 + +# Maximum size of a JSON body in a PATCH request (zero / flush ops) +MAX_PATCH_JSON_SIZE = 64 * 1024 # 64 KiB + +# Byte range requested per block_status call for NBD extent queries +NBD_BLOCK_STATUS_CHUNK = 64 * 1024 * 1024 # 64 MiB + +CFG_DIR = "/tmp/imagetransfer" diff --git a/scripts/vm/hypervisor/kvm/imageserver/handler.py b/scripts/vm/hypervisor/kvm/imageserver/handler.py new file mode 100644 index 000000000000..c28a06575814 --- /dev/null +++ b/scripts/vm/hypervisor/kvm/imageserver/handler.py @@ -0,0 +1,848 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import json +import logging +import re +from http import HTTPStatus +from http.server import BaseHTTPRequestHandler +from typing import Any, Dict, List, Optional, Tuple +from urllib.parse import parse_qs + +from .backends import NbdBackend, create_backend +from .concurrency import ConcurrencyManager +from .config import TransferRegistry +from .constants import CHUNK_SIZE, MAX_PARALLEL_READS, MAX_PARALLEL_WRITES, MAX_PATCH_JSON_SIZE +from .util import is_fallback_dirty_response, json_bytes, now_s + + +class Handler(BaseHTTPRequestHandler): + """ + HTTP request handler for the image server. + + Routing, HTTP parsing, and response formatting live here. + All backend I/O is delegated to ImageBackend implementations via the + create_backend() factory. + + Class-level attributes _concurrency and _registry are injected + by the server at startup (see server.py / make_handler()). + """ + + server_version = "cloudstack-image-server/1.0" + server_protocol = "HTTP/1.1" + + _concurrency: ConcurrencyManager + _registry: TransferRegistry + + _CONTENT_RANGE_RE = re.compile(r"^bytes\s+(\d+)-(\d+)/(?:\*|\d+)$") + + def log_message(self, fmt: str, *args: Any) -> None: + logging.info("%s - - %s", self.address_string(), fmt % args) + + # ------------------------------------------------------------------ + # Response helpers + # ------------------------------------------------------------------ + + def _send_imageio_headers( + self, allowed_methods: Optional[str] = None + ) -> None: + if allowed_methods is None: + allowed_methods = "GET, PUT, OPTIONS" + self.send_header("Access-Control-Allow-Methods", allowed_methods) + self.send_header("Accept-Ranges", "bytes") + + def _send_json( + self, + status: int, + obj: Any, + allowed_methods: Optional[str] = None, + ) -> None: + body = json_bytes(obj) + self.send_response(status) + self._send_imageio_headers(allowed_methods) + self.send_header("Content-Type", "application/json") + self.send_header("Content-Length", str(len(body))) + self.end_headers() + try: + self.wfile.write(body) + except BrokenPipeError: + pass + + def _send_error_json(self, status: int, message: str) -> None: + self._send_json(status, {"error": message}) + + def _send_range_not_satisfiable(self, size: int) -> None: + self.send_response(HTTPStatus.REQUESTED_RANGE_NOT_SATISFIABLE) + self._send_imageio_headers() + self.send_header("Content-Type", "application/json") + self.send_header("Content-Range", f"bytes */{size}") + body = json_bytes({"error": "range not satisfiable"}) + self.send_header("Content-Length", str(len(body))) + self.end_headers() + try: + self.wfile.write(body) + except BrokenPipeError: + pass + + # ------------------------------------------------------------------ + # Parsing helpers + # ------------------------------------------------------------------ + + def _parse_single_range(self, range_header: str, size: int) -> Tuple[int, int]: + """ + Parse a single HTTP byte range (RFC 7233) and return (start, end_inclusive). + Raises ValueError for invalid headers. + """ + if size < 0: + raise ValueError("invalid size") + if not range_header: + raise ValueError("empty Range") + if "," in range_header: + raise ValueError("multiple ranges not supported") + + prefix = "bytes=" + if not range_header.startswith(prefix): + raise ValueError("only bytes ranges supported") + spec = range_header[len(prefix):].strip() + if "-" not in spec: + raise ValueError("invalid bytes range") + + left, right = spec.split("-", 1) + left = left.strip() + right = right.strip() + + if left == "": + if right == "": + raise ValueError("invalid suffix range") + try: + suffix_len = int(right, 10) + except ValueError as e: + raise ValueError("invalid suffix length") from e + if suffix_len <= 0: + raise ValueError("invalid suffix length") + if size == 0: + raise ValueError("unsatisfiable") + if suffix_len >= size: + return 0, size - 1 + return size - suffix_len, size - 1 + + try: + start = int(left, 10) + except ValueError as e: + raise ValueError("invalid range start") from e + if start < 0: + raise ValueError("invalid range start") + if start >= size: + raise ValueError("unsatisfiable") + + if right == "": + return start, size - 1 + + try: + end = int(right, 10) + except ValueError as e: + raise ValueError("invalid range end") from e + if end < start: + raise ValueError("unsatisfiable") + if end >= size: + end = size - 1 + return start, end + + def _parse_route(self) -> Tuple[Optional[str], Optional[str]]: + path = self.path.split("?", 1)[0] + parts = [p for p in path.split("/") if p] + if len(parts) < 2 or parts[0] != "images": + return None, None + image_id = parts[1] + tail = parts[2] if len(parts) >= 3 else None + if len(parts) > 3: + return None, None + return image_id, tail + + def _parse_content_range(self, header: str) -> Tuple[int, int]: + """ + Parse Content-Range header "bytes start-end/*" or "bytes start-end/size". + Returns (start, end_inclusive). + """ + if not header: + raise ValueError("empty Content-Range") + m = self._CONTENT_RANGE_RE.match(header.strip()) + if not m: + raise ValueError("invalid Content-Range") + start_s, end_s = m.groups() + start = int(start_s, 10) + end = int(end_s, 10) + if start < 0 or end < start: + raise ValueError("invalid Content-Range range") + return start, end + + def _parse_query(self) -> Dict[str, List[str]]: + if "?" not in self.path: + return {} + query = self.path.split("?", 1)[1] + return parse_qs(query, keep_blank_values=True) + + def _image_cfg(self, image_id: str) -> Optional[Dict[str, Any]]: + return self._registry.get(image_id) + + # ------------------------------------------------------------------ + # HTTP verb dispatchers + # ------------------------------------------------------------------ + + def do_OPTIONS(self) -> None: + image_id, tail = self._parse_route() + if image_id is None or tail is not None: + self._send_error_json(HTTPStatus.NOT_FOUND, "not found") + return + cfg = self._image_cfg(image_id) + if cfg is None: + self._send_error_json(HTTPStatus.NOT_FOUND, "unknown image_id") + return + + with self._registry.request_lifecycle(image_id): + backend = create_backend(cfg) + try: + if not backend.supports_extents: + allowed_methods = "GET, PUT, POST, OPTIONS" + features = ["flush"] + response = { + "unix_socket": None, + "features": features, + "max_readers": MAX_PARALLEL_READS, + "max_writers": MAX_PARALLEL_WRITES, + } + self._send_json(HTTPStatus.OK, response, allowed_methods=allowed_methods) + return + + read_only = True + can_flush = False + can_zero = False + try: + caps = backend.get_capabilities() + read_only = caps["read_only"] + can_flush = caps["can_flush"] + can_zero = caps["can_zero"] + except Exception as e: + logging.warning("OPTIONS: could not query backend capabilities: %r", e) + read_only = bool(cfg.get("read_only")) + if not read_only: + can_flush = True + can_zero = True + + if read_only: + allowed_methods = "GET, OPTIONS" + features = ["extents"] + max_writers = 0 + else: + allowed_methods = "GET, PUT, PATCH, OPTIONS" + features = ["extents"] + if can_zero: + features.append("zero") + if can_flush: + features.append("flush") + max_writers = MAX_PARALLEL_WRITES + + response = { + "unix_socket": None, + "features": features, + "max_readers": MAX_PARALLEL_READS, + "max_writers": max_writers, + } + self._send_json(HTTPStatus.OK, response, allowed_methods=allowed_methods) + finally: + backend.close() + + def do_GET(self) -> None: + image_id, tail = self._parse_route() + if image_id is None: + self._send_error_json(HTTPStatus.NOT_FOUND, "not found") + return + + cfg = self._image_cfg(image_id) + if cfg is None: + self._send_error_json(HTTPStatus.NOT_FOUND, "unknown image_id") + return + + if tail == "extents": + with self._registry.request_lifecycle(image_id): + backend = create_backend(cfg) + try: + if not backend.supports_extents: + self._send_error_json( + HTTPStatus.BAD_REQUEST, "extents not supported for file backend" + ) + return + finally: + backend.close() + query = self._parse_query() + context = (query.get("context") or [None])[0] + self._handle_get_extents(image_id, cfg, context=context) + return + if tail is not None: + self._send_error_json(HTTPStatus.NOT_FOUND, "not found") + return + + range_header = self.headers.get("Range") + with self._registry.request_lifecycle(image_id): + self._handle_get_image(image_id, cfg, range_header) + + def do_PUT(self) -> None: + image_id, tail = self._parse_route() + if image_id is None or tail is not None: + self._send_error_json(HTTPStatus.NOT_FOUND, "not found") + return + + cfg = self._image_cfg(image_id) + if cfg is None: + self._send_error_json(HTTPStatus.NOT_FOUND, "unknown image_id") + return + + with self._registry.request_lifecycle(image_id): + if self.headers.get("Range") is not None: + self._send_error_json( + HTTPStatus.BAD_REQUEST, + "Range header not supported for PUT; use Content-Range or PATCH", + ) + return + + content_length_hdr = self.headers.get("Content-Length") + if content_length_hdr is None: + self._send_error_json(HTTPStatus.BAD_REQUEST, "Content-Length required") + return + try: + content_length = int(content_length_hdr) + except ValueError: + self._send_error_json(HTTPStatus.BAD_REQUEST, "Invalid Content-Length") + return + if content_length < 0: + self._send_error_json(HTTPStatus.BAD_REQUEST, "Invalid Content-Length") + return + + query = self._parse_query() + flush_param = (query.get("flush") or ["n"])[0].lower() + flush = flush_param in ("y", "yes", "true", "1") + + content_range_hdr = self.headers.get("Content-Range") + if content_range_hdr is not None: + backend = create_backend(cfg) + try: + if not backend.supports_range_write: + self._send_error_json( + HTTPStatus.BAD_REQUEST, + "Content-Range PUT not supported for file backend; use full PUT", + ) + return + finally: + backend.close() + self._handle_put_range(image_id, cfg, content_range_hdr, content_length, flush) + return + + self._handle_put_image(image_id, cfg, content_length, flush) + + def do_POST(self) -> None: + image_id, tail = self._parse_route() + if image_id is None: + self._send_error_json(HTTPStatus.NOT_FOUND, "not found") + return + + cfg = self._image_cfg(image_id) + if cfg is None: + self._send_error_json(HTTPStatus.NOT_FOUND, "unknown image_id") + return + + if tail == "flush": + with self._registry.request_lifecycle(image_id): + self._handle_post_flush(image_id, cfg) + return + self._send_error_json(HTTPStatus.NOT_FOUND, "not found") + + def do_PATCH(self) -> None: + image_id, tail = self._parse_route() + if image_id is None or tail is not None: + self._send_error_json(HTTPStatus.NOT_FOUND, "not found") + return + + cfg = self._image_cfg(image_id) + if cfg is None: + self._send_error_json(HTTPStatus.NOT_FOUND, "unknown image_id") + return + + with self._registry.request_lifecycle(image_id): + backend = create_backend(cfg) + try: + if not backend.supports_range_write: + self._send_error_json( + HTTPStatus.BAD_REQUEST, + "range writes and PATCH not supported for file backend; use PUT for full upload", + ) + return + finally: + backend.close() + + content_type = self.headers.get("Content-Type", "").split(";")[0].strip().lower() + range_header = self.headers.get("Range") + + if range_header is not None and content_type != "application/json": + content_length_hdr = self.headers.get("Content-Length") + if content_length_hdr is None: + self._send_error_json(HTTPStatus.BAD_REQUEST, "Content-Length required") + return + try: + content_length = int(content_length_hdr) + except ValueError: + self._send_error_json(HTTPStatus.BAD_REQUEST, "Invalid Content-Length") + return + if content_length <= 0: + self._send_error_json(HTTPStatus.BAD_REQUEST, "Content-Length must be positive") + return + self._handle_patch_range(image_id, cfg, range_header, content_length) + return + + if content_type != "application/json": + self._send_error_json( + HTTPStatus.UNSUPPORTED_MEDIA_TYPE, + "PATCH requires Content-Type: application/json (for zero/flush) or Range with binary body", + ) + return + + content_length_hdr = self.headers.get("Content-Length") + if content_length_hdr is None: + self._send_error_json(HTTPStatus.BAD_REQUEST, "Content-Length required") + return + try: + content_length = int(content_length_hdr) + except ValueError: + self._send_error_json(HTTPStatus.BAD_REQUEST, "Invalid Content-Length") + return + if content_length <= 0 or content_length > MAX_PATCH_JSON_SIZE: + self._send_error_json(HTTPStatus.BAD_REQUEST, "Invalid Content-Length") + return + + body = self.rfile.read(content_length) + if len(body) != content_length: + self._send_error_json(HTTPStatus.BAD_REQUEST, "request body truncated") + return + + try: + payload = json.loads(body.decode("utf-8")) + except (json.JSONDecodeError, UnicodeDecodeError) as e: + self._send_error_json(HTTPStatus.BAD_REQUEST, f"invalid JSON: {e}") + return + + if not isinstance(payload, dict): + self._send_error_json(HTTPStatus.BAD_REQUEST, "body must be a JSON object") + return + + op = payload.get("op") + if op == "flush": + self._handle_post_flush(image_id, cfg) + return + if op != "zero": + self._send_error_json( + HTTPStatus.BAD_REQUEST, + "unsupported op; only \"zero\" and \"flush\" are supported", + ) + return + + try: + size = int(payload.get("size")) + except (TypeError, ValueError): + self._send_error_json(HTTPStatus.BAD_REQUEST, "missing or invalid \"size\"") + return + if size <= 0: + self._send_error_json(HTTPStatus.BAD_REQUEST, "\"size\" must be positive") + return + + offset = payload.get("offset") + if offset is None: + offset = 0 + else: + try: + offset = int(offset) + except (TypeError, ValueError): + self._send_error_json(HTTPStatus.BAD_REQUEST, "invalid \"offset\"") + return + if offset < 0: + self._send_error_json(HTTPStatus.BAD_REQUEST, "\"offset\" must be non-negative") + return + + flush = bool(payload.get("flush", False)) + self._handle_patch_zero(image_id, cfg, offset=offset, size=size, flush=flush) + + # ------------------------------------------------------------------ + # Operation handlers + # ------------------------------------------------------------------ + + def _handle_get_image( + self, image_id: str, cfg: Dict[str, Any], range_header: Optional[str] + ) -> None: + if not self._concurrency.acquire_read(image_id): + self._send_error_json(HTTPStatus.SERVICE_UNAVAILABLE, "too many parallel reads") + return + + start = now_s() + bytes_sent = 0 + try: + logging.info("GET start image_id=%s range=%s", image_id, range_header or "-") + backend = create_backend(cfg) + session = None + try: + session = backend.open_session() + size = session.size() + except OSError as e: + logging.error("GET size error image_id=%s err=%r", image_id, e) + self._send_error_json(HTTPStatus.INTERNAL_SERVER_ERROR, "failed to access image") + if session is not None: + session.close() + backend.close() + return + + try: + start_off = 0 + end_off_incl = size - 1 if size > 0 else -1 + status = HTTPStatus.OK + content_length = size + if range_header is not None: + try: + start_off, end_off_incl = self._parse_single_range(range_header, size) + except ValueError as e: + if "unsatisfiable" in str(e): + self._send_range_not_satisfiable(size) + return + self._send_error_json(HTTPStatus.BAD_REQUEST, "invalid Range header") + return + status = HTTPStatus.PARTIAL_CONTENT + content_length = (end_off_incl - start_off) + 1 + + self.send_response(status) + self._send_imageio_headers() + self.send_header("Content-Type", "application/octet-stream") + self.send_header("Content-Length", str(content_length)) + if status == HTTPStatus.PARTIAL_CONTENT: + self.send_header("Content-Range", f"bytes {start_off}-{end_off_incl}/{size}") + self.end_headers() + + offset = start_off + end_excl = end_off_incl + 1 + while offset < end_excl: + to_read = min(CHUNK_SIZE, end_excl - offset) + data = session.read(offset, to_read) + if not data: + break + try: + self.wfile.write(data) + except BrokenPipeError: + logging.info("GET client disconnected image_id=%s at=%d", image_id, offset) + break + offset += len(data) + bytes_sent += len(data) + finally: + session.close() + backend.close() + except Exception as e: + logging.error("GET error image_id=%s err=%r", image_id, e) + try: + if not self.wfile.closed: + self.close_connection = True + except Exception: + pass + finally: + self._concurrency.release_read(image_id) + dur = now_s() - start + logging.info( + "GET end image_id=%s bytes=%d duration_s=%.3f", image_id, bytes_sent, dur + ) + + def _handle_put_image( + self, image_id: str, cfg: Dict[str, Any], content_length: int, flush: bool + ) -> None: + lock = self._concurrency.get_image_lock(image_id) + lock.acquire() + + if not self._concurrency.acquire_write(image_id): + lock.release() + self._send_error_json(HTTPStatus.SERVICE_UNAVAILABLE, "too many parallel writes") + return + + start = now_s() + bytes_written = 0 + try: + logging.info("PUT start image_id=%s content_length=%d", image_id, content_length) + backend = create_backend(cfg) + try: + bytes_written = backend.write_full(self.rfile, content_length, flush) + self._send_json( + HTTPStatus.OK, + {"ok": True, "bytes_written": bytes_written, "flushed": flush}, + ) + except IOError as e: + self._send_error_json(HTTPStatus.BAD_REQUEST, str(e)) + finally: + backend.close() + except Exception as e: + logging.error("PUT error image_id=%s err=%r", image_id, e) + self._send_error_json(HTTPStatus.INTERNAL_SERVER_ERROR, "backend error") + finally: + self._concurrency.release_write(image_id) + lock.release() + dur = now_s() - start + logging.info( + "PUT end image_id=%s bytes=%d duration_s=%.3f", image_id, bytes_written, dur + ) + + def _handle_put_range( + self, + image_id: str, + cfg: Dict[str, Any], + content_range: str, + content_length: int, + flush: bool, + ) -> None: + lock = self._concurrency.get_image_lock(image_id) + lock.acquire() + + if not self._concurrency.acquire_write(image_id): + lock.release() + self._send_error_json(HTTPStatus.SERVICE_UNAVAILABLE, "too many parallel writes") + return + + start = now_s() + bytes_written = 0 + try: + logging.info( + "PUT range start image_id=%s Content-Range=%s content_length=%d flush=%s", + image_id, content_range, content_length, flush, + ) + try: + start_off, _end_inclusive = self._parse_content_range(content_range) + except ValueError as e: + self._send_error_json( + HTTPStatus.BAD_REQUEST, f"invalid Content-Range header: {e}" + ) + return + + backend = create_backend(cfg) + try: + nbd_backend: NbdBackend = backend # type: ignore[assignment] + bytes_written = nbd_backend.write_range(self.rfile, start_off, content_length) + if flush: + nbd_backend.flush() + self._send_json( + HTTPStatus.OK, + {"ok": True, "bytes_written": bytes_written, "flushed": flush}, + ) + except ValueError: + image_size = backend.size() + self._send_range_not_satisfiable(image_size) + except IOError as e: + self._send_error_json(HTTPStatus.BAD_REQUEST, str(e)) + finally: + backend.close() + except Exception as e: + logging.error("PUT range error image_id=%s err=%r", image_id, e) + self._send_error_json(HTTPStatus.INTERNAL_SERVER_ERROR, "backend error") + finally: + self._concurrency.release_write(image_id) + lock.release() + dur = now_s() - start + logging.info( + "PUT range end image_id=%s bytes=%d duration_s=%.3f flush=%s", + image_id, bytes_written, dur, flush, + ) + + def _handle_get_extents( + self, image_id: str, cfg: Dict[str, Any], context: Optional[str] = None + ) -> None: + lock = self._concurrency.get_image_lock(image_id) + if not lock.acquire(blocking=False): + self._send_error_json(HTTPStatus.CONFLICT, "image busy") + return + + start = now_s() + try: + logging.info("EXTENTS start image_id=%s context=%s", image_id, context) + backend = create_backend(cfg) + try: + if context == "dirty": + nbd_backend: NbdBackend = backend # type: ignore[assignment] + export_bitmap = nbd_backend.export_bitmap + if not export_bitmap: + allocation = nbd_backend.get_allocation_extents() + extents: List[Dict[str, Any]] = [ + {"start": e["start"], "length": e["length"], "dirty": True, "zero": e["zero"]} + for e in allocation + ] + else: + dirty_bitmap_ctx = f"qemu:dirty-bitmap:{export_bitmap}" + extents = nbd_backend.get_dirty_extents(dirty_bitmap_ctx) + if is_fallback_dirty_response(extents): + allocation = nbd_backend.get_allocation_extents() + extents = [ + { + "start": e["start"], + "length": e["length"], + "dirty": True, + "zero": e["zero"], + } + for e in allocation + ] + else: + extents = backend.get_allocation_extents() + self._send_json(HTTPStatus.OK, extents) + finally: + backend.close() + except Exception as e: + logging.error("EXTENTS error image_id=%s err=%r", image_id, e) + self._send_error_json(HTTPStatus.INTERNAL_SERVER_ERROR, "backend error") + finally: + lock.release() + dur = now_s() - start + logging.info("EXTENTS end image_id=%s duration_s=%.3f", image_id, dur) + + def _handle_post_flush(self, image_id: str, cfg: Dict[str, Any]) -> None: + lock = self._concurrency.get_image_lock(image_id) + if not lock.acquire(blocking=False): + self._send_error_json(HTTPStatus.CONFLICT, "image busy") + return + + start = now_s() + try: + logging.info("FLUSH start image_id=%s", image_id) + backend = create_backend(cfg) + try: + backend.flush() + self._send_json(HTTPStatus.OK, {"ok": True}) + finally: + backend.close() + except Exception as e: + logging.error("FLUSH error image_id=%s err=%r", image_id, e) + self._send_error_json(HTTPStatus.INTERNAL_SERVER_ERROR, "backend error") + finally: + lock.release() + dur = now_s() - start + logging.info("FLUSH end image_id=%s duration_s=%.3f", image_id, dur) + + def _handle_patch_zero( + self, + image_id: str, + cfg: Dict[str, Any], + offset: int, + size: int, + flush: bool, + ) -> None: + lock = self._concurrency.get_image_lock(image_id) + if not lock.acquire(blocking=False): + self._send_error_json(HTTPStatus.CONFLICT, "image busy") + return + + if not self._concurrency.acquire_write(image_id): + lock.release() + self._send_error_json(HTTPStatus.SERVICE_UNAVAILABLE, "too many parallel writes") + return + + start = now_s() + try: + logging.info( + "PATCH zero start image_id=%s offset=%d size=%d flush=%s", + image_id, offset, size, flush, + ) + backend = create_backend(cfg) + try: + backend.zero(offset, size) + if flush: + backend.flush() + self._send_json(HTTPStatus.OK, {"ok": True}) + except ValueError as e: + self._send_error_json(HTTPStatus.BAD_REQUEST, str(e)) + finally: + backend.close() + except Exception as e: + logging.error("PATCH zero error image_id=%s err=%r", image_id, e) + self._send_error_json(HTTPStatus.INTERNAL_SERVER_ERROR, "backend error") + finally: + self._concurrency.release_write(image_id) + lock.release() + dur = now_s() - start + logging.info("PATCH zero end image_id=%s duration_s=%.3f", image_id, dur) + + def _handle_patch_range( + self, + image_id: str, + cfg: Dict[str, Any], + range_header: str, + content_length: int, + ) -> None: + lock = self._concurrency.get_image_lock(image_id) + if not lock.acquire(blocking=False): + self._send_error_json(HTTPStatus.CONFLICT, "image busy") + return + + if not self._concurrency.acquire_write(image_id): + lock.release() + self._send_error_json(HTTPStatus.SERVICE_UNAVAILABLE, "too many parallel writes") + return + + start = now_s() + bytes_written = 0 + try: + logging.info( + "PATCH range start image_id=%s range=%s content_length=%d", + image_id, range_header, content_length, + ) + backend = create_backend(cfg) + try: + image_size = backend.size() + try: + start_off, end_inclusive = self._parse_single_range( + range_header, image_size + ) + except ValueError as e: + if "unsatisfiable" in str(e).lower(): + self._send_range_not_satisfiable(image_size) + else: + self._send_error_json( + HTTPStatus.BAD_REQUEST, f"invalid Range header: {e}" + ) + return + expected_len = end_inclusive - start_off + 1 + if content_length != expected_len: + self._send_error_json( + HTTPStatus.BAD_REQUEST, + f"Content-Length ({content_length}) must equal range length ({expected_len})", + ) + return + nbd_backend: NbdBackend = backend # type: ignore[assignment] + bytes_written = nbd_backend.write_range(self.rfile, start_off, content_length) + self._send_json(HTTPStatus.OK, {"ok": True, "bytes_written": bytes_written}) + except ValueError: + image_size = backend.size() + self._send_range_not_satisfiable(image_size) + except IOError as e: + self._send_error_json(HTTPStatus.BAD_REQUEST, str(e)) + finally: + backend.close() + except Exception as e: + logging.error("PATCH range error image_id=%s err=%r", image_id, e) + self._send_error_json(HTTPStatus.INTERNAL_SERVER_ERROR, "backend error") + finally: + self._concurrency.release_write(image_id) + lock.release() + dur = now_s() - start + logging.info( + "PATCH range end image_id=%s bytes=%d duration_s=%.3f", + image_id, bytes_written, dur, + ) diff --git a/scripts/vm/hypervisor/kvm/imageserver/server.py b/scripts/vm/hypervisor/kvm/imageserver/server.py new file mode 100644 index 000000000000..1bc42252d4f2 --- /dev/null +++ b/scripts/vm/hypervisor/kvm/imageserver/server.py @@ -0,0 +1,227 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import argparse +import json +import logging +import os +import socket +import ssl +import threading +import time +from http.server import HTTPServer +from socketserver import ThreadingMixIn +from typing import Type + +try: + from http.server import ThreadingHTTPServer +except ImportError: + class ThreadingHTTPServer(ThreadingMixIn, HTTPServer): # type: ignore[no-redef] + pass + +from .concurrency import ConcurrencyManager +from .config import TransferRegistry, validate_transfer_config +from .constants import ( + CONTROL_RECV_BUFFER, + CONTROL_SOCKET, + CONTROL_SOCKET_BACKLOG, + CONTROL_SOCKET_PERMISSIONS, + DEFAULT_HTTP_PORT, + DEFAULT_LISTEN_ADDRESS, + MAX_PARALLEL_READS, + MAX_PARALLEL_WRITES, +) +from .handler import Handler + + +def make_handler( + concurrency: ConcurrencyManager, + registry: TransferRegistry, +) -> Type[Handler]: + """ + Create a Handler subclass with injected dependencies. + + BaseHTTPRequestHandler is instantiated per-request by the server, so we + cannot pass constructor args. Instead, we set class-level attributes. + """ + + class ConfiguredHandler(Handler): + _concurrency = concurrency + _registry = registry + + return ConfiguredHandler + + +def _handle_control_conn(conn: socket.socket, registry: TransferRegistry) -> None: + """Handle a single control-socket connection (one JSON request/response).""" + try: + data = b"" + while True: + chunk = conn.recv(CONTROL_RECV_BUFFER) + if not chunk: + break + data += chunk + if b"\n" in data: + break + + msg = json.loads(data.strip()) + action = msg.get("action") + + if action == "register": + transfer_id = msg.get("transfer_id") + raw_config = msg.get("config") + if not transfer_id or not isinstance(raw_config, dict): + resp = {"status": "error", "message": "missing transfer_id or config"} + else: + try: + config = validate_transfer_config(raw_config) + except ValueError as e: + resp = {"status": "error", "message": str(e)} + else: + if registry.register(transfer_id, config): + resp = {"status": "ok", "active_transfers": registry.active_count()} + else: + resp = {"status": "error", "message": "invalid transfer_id"} + elif action == "unregister": + transfer_id = msg.get("transfer_id") + if not transfer_id: + resp = {"status": "error", "message": "missing transfer_id"} + else: + remaining = registry.unregister(transfer_id) + resp = {"status": "ok", "active_transfers": remaining} + elif action == "status": + resp = {"status": "ok", "active_transfers": registry.active_count()} + else: + resp = {"status": "error", "message": f"unknown action: {action}"} + + conn.sendall((json.dumps(resp) + "\n").encode("utf-8")) + except Exception as e: + logging.error("control socket error: %r", e) + try: + conn.sendall((json.dumps({"status": "error", "message": str(e)}) + "\n").encode("utf-8")) + except Exception: + pass + finally: + conn.close() + + +def _idle_sweep_loop(registry: TransferRegistry, interval_s: float = 10.0) -> None: + while True: + time.sleep(interval_s) + try: + registry.sweep_expired_transfers() + except Exception: + logging.exception("idle sweep error") + + +def _control_listener(registry: TransferRegistry, sock_path: str) -> None: + """Accept loop for the Unix domain control socket (runs in a daemon thread).""" + if os.path.exists(sock_path): + os.unlink(sock_path) + os.makedirs(os.path.dirname(sock_path), exist_ok=True) + + srv = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + srv.bind(sock_path) + os.chmod(sock_path, CONTROL_SOCKET_PERMISSIONS) + srv.listen(CONTROL_SOCKET_BACKLOG) + logging.info("control socket listening on %s", sock_path) + + while True: + conn, _ = srv.accept() + threading.Thread( + target=_handle_control_conn, + args=(conn, registry), + daemon=True, + ).start() + + +def main() -> None: + parser = argparse.ArgumentParser( + description="CloudStack image server backed by NBD / local file" + ) + parser.add_argument("--listen", default=DEFAULT_LISTEN_ADDRESS, help="Address to bind") + parser.add_argument("--port", type=int, default=DEFAULT_HTTP_PORT, help="Port to listen on") + parser.add_argument( + "--control-socket", + default=CONTROL_SOCKET, + help="Path to the Unix domain control socket", + ) + parser.add_argument( + "--tls-enabled", + action="store_true", + help="Enable TLS for the HTTP transfer endpoint", + ) + parser.add_argument( + "--tls-cert-file", + default=None, + help="Path to PEM certificate file used when TLS is enabled", + ) + parser.add_argument( + "--tls-key-file", + default=None, + help="Path to PEM private key file used when TLS is enabled", + ) + args = parser.parse_args() + + if args.tls_enabled and (not args.tls_cert_file or not args.tls_key_file): + parser.error("--tls-enabled requires --tls-cert-file and --tls-key-file") + + logging.basicConfig( + level=logging.INFO, + format="%(asctime)s %(levelname)s %(message)s", + ) + + registry = TransferRegistry() + concurrency = ConcurrencyManager(MAX_PARALLEL_READS, MAX_PARALLEL_WRITES) + handler_cls = make_handler(concurrency, registry) + + ctrl_thread = threading.Thread( + target=_control_listener, + args=(registry, args.control_socket), + daemon=True, + ) + ctrl_thread.start() + + sweep_thread = threading.Thread( + target=_idle_sweep_loop, + args=(registry,), + daemon=True, + ) + sweep_thread.start() + + addr = (args.listen, args.port) + httpd = ThreadingHTTPServer(addr, handler_cls) + + scheme = "http" + if args.tls_enabled: + context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) + + if hasattr(ssl, "TLSVersion") and hasattr(context, "minimum_version"): + context.minimum_version = ssl.TLSVersion.TLSv1_2 + else: + if hasattr(ssl, "OP_NO_TLSv1"): + context.options |= ssl.OP_NO_TLSv1 + if hasattr(ssl, "OP_NO_TLSv1_1"): + context.options |= ssl.OP_NO_TLSv1_1 + + context.load_cert_chain(certfile=args.tls_cert_file, keyfile=args.tls_key_file) + + httpd.socket = context.wrap_socket(httpd.socket, server_side=True) + scheme = "https" + + logging.info("listening on %s://%s:%d", scheme, args.listen, args.port) + httpd.serve_forever() diff --git a/scripts/vm/hypervisor/kvm/imageserver/tests/__init__.py b/scripts/vm/hypervisor/kvm/imageserver/tests/__init__.py new file mode 100644 index 000000000000..0ccbeeeafb7c --- /dev/null +++ b/scripts/vm/hypervisor/kvm/imageserver/tests/__init__.py @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/scripts/vm/hypervisor/kvm/imageserver/tests/test_base.py b/scripts/vm/hypervisor/kvm/imageserver/tests/test_base.py new file mode 100644 index 000000000000..c8703f8a1082 --- /dev/null +++ b/scripts/vm/hypervisor/kvm/imageserver/tests/test_base.py @@ -0,0 +1,568 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +Shared infrastructure for the image-server test suite (stdlib unittest only). + +Provides: +- A singleton image server process started once for the entire test run. +- Server stdout/stderr appended to ``/imageserver.log``. +- On shutdown: stop the child process, close the log handle, unlink the control socket; + the temp directory and ``imageserver.log`` are left on disk. +- Control-socket helpers using pure-Python AF_UNIX. +- qemu-nbd server management. +- Transfer registration / teardown helpers. +- HTTP helper functions. +""" + +import atexit +import functools +import json +import logging +import os +import random +import signal +import socket +import subprocess +import sys +import tempfile +import time +import unittest +import uuid +from pathlib import Path +from typing import Any, Dict, Optional, TextIO + +IMAGE_SIZE = 1 * 1024 * 1024 # 1 MiB +SERVER_STARTUP_TIMEOUT = 10 +QEMU_NBD_STARTUP_TIMEOUT = 5 +HTTP_TIMEOUT = 30 # seconds per HTTP request + +logging.basicConfig( + level=logging.INFO, + stream=sys.stderr, + format="%(asctime)s [TEST] %(message)s", +) +log = logging.getLogger(__name__) + + +def randbytes(seed, n): + """Generate n deterministic pseudo-random bytes (works on Python 3.6+).""" + rng = random.Random(seed) + return rng.getrandbits(8 * n).to_bytes(n, "big") + + +def test_timeout(seconds): + """Decorator that fails a test if it exceeds *seconds* (SIGALRM, Unix only).""" + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + def _alarm(signum, frame): + raise TimeoutError( + "{} timed out after {}s".format(func.__qualname__, seconds) + ) + prev = signal.signal(signal.SIGALRM, _alarm) + signal.alarm(seconds) + try: + return func(*args, **kwargs) + finally: + signal.alarm(0) + signal.signal(signal.SIGALRM, prev) + return wrapper + return decorator + +# ── Singleton state shared across all test modules ────────────────────── + +_tmp_dir: Optional[str] = None +_server_proc: Optional[subprocess.Popen] = None +_server_info: Optional[Dict[str, Any]] = None +_server_log_fp: Optional[TextIO] = None +_server_log_path: Optional[str] = None +_atexit_registered: bool = False + + +def _free_port() -> int: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(("127.0.0.1", 0)) + return s.getsockname()[1] + + +def control_socket_send(sock_path: str, message: dict, retries: int = 5) -> dict: + """Send a JSON message to the control socket and return the parsed response.""" + payload = (json.dumps(message) + "\n").encode("utf-8") + last_err = None + for attempt in range(retries): + try: + with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s: + s.settimeout(5) + s.connect(sock_path) + s.sendall(payload) + s.shutdown(socket.SHUT_WR) + data = b"" + while True: + chunk = s.recv(4096) + if not chunk: + break + data += chunk + return json.loads(data.strip()) + except (BlockingIOError, ConnectionRefusedError, OSError) as e: + last_err = e + time.sleep(0.1 * (attempt + 1)) + raise last_err + + +def _wait_for_control_socket(sock_path: str, timeout: float = SERVER_STARTUP_TIMEOUT) -> None: + deadline = time.monotonic() + timeout + while time.monotonic() < deadline: + try: + resp = control_socket_send(sock_path, {"action": "status"}) + if resp.get("status") == "ok": + return + except (ConnectionRefusedError, FileNotFoundError, OSError): + pass + time.sleep(0.2) + raise RuntimeError( + f"Image server control socket at {sock_path} not ready within {timeout}s" + ) + + +def _wait_for_nbd_socket(sock_path: str, timeout: float = QEMU_NBD_STARTUP_TIMEOUT) -> None: + deadline = time.monotonic() + timeout + while time.monotonic() < deadline: + if os.path.exists(sock_path): + try: + with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s: + s.settimeout(1) + s.connect(sock_path) + return + except (ConnectionRefusedError, OSError): + pass + time.sleep(0.2) + raise RuntimeError( + f"qemu-nbd socket at {sock_path} not ready within {timeout}s" + ) + + +def get_tmp_dir() -> str: + global _tmp_dir + if _tmp_dir is None: + _tmp_dir = tempfile.mkdtemp(prefix="imageserver_test_") + return _tmp_dir + + +def _read_log_tail(path: str, max_bytes: int = 65536) -> str: + """Return up to *max_bytes* of UTF-8 text from the end of *path*.""" + try: + with open(path, "rb") as f: + f.seek(0, os.SEEK_END) + size = f.tell() + f.seek(max(0, size - max_bytes)) + return f.read().decode("utf-8", errors="replace") + except OSError as e: + return f"(could not read log: {e})" + + +def get_image_server() -> Dict[str, Any]: + """Return the singleton image-server info dict, starting it if needed.""" + global _server_proc, _server_info, _server_log_fp, _server_log_path, _atexit_registered + + if _server_info is not None: + return _server_info + + tmp = get_tmp_dir() + port = _free_port() + ctrl_sock = os.path.join(tmp, "ctrl.sock") + log_path = os.path.join(tmp, "imageserver.log") + _server_log_path = log_path + + imageserver_pkg = str(Path(__file__).resolve().parent.parent) + parent_dir = str(Path(imageserver_pkg).parent) + + env = os.environ.copy() + env["PYTHONPATH"] = parent_dir + os.pathsep + env.get("PYTHONPATH", "") + + _server_log_fp = open( + log_path, "a", encoding="utf-8", buffering=1, errors="replace" + ) + try: + _server_log_fp.write( + "\n========== imageserver test subprocess log ==========\n" + ) + _server_log_fp.flush() + except OSError: + pass + + proc = subprocess.Popen( + [ + sys.executable, "-m", "imageserver", + "--listen", "127.0.0.1", + "--port", str(port), + "--control-socket", ctrl_sock, + ], + cwd=parent_dir, + env=env, + stdout=_server_log_fp, + stderr=_server_log_fp, + ) + _server_proc = proc + + try: + _wait_for_control_socket(ctrl_sock) + except RuntimeError: + proc.kill() + try: + proc.wait(timeout=5) + except subprocess.TimeoutExpired: + proc.kill() + proc.wait(timeout=5) + try: + _server_log_fp.flush() + except OSError: + pass + tail = _read_log_tail(log_path) + try: + _server_log_fp.close() + except OSError: + pass + _server_log_fp = None + _server_proc = None + raise RuntimeError( + "Image server failed to start.\n" + f"Log file: {log_path}\n" + f"--- log tail ---\n{tail}" + ) + + def send(msg: dict) -> dict: + return control_socket_send(ctrl_sock, msg) + + _server_info = { + "base_url": f"http://127.0.0.1:{port}", + "port": port, + "ctrl_sock": ctrl_sock, + "send": send, + "imageserver_log": log_path, + } + if not _atexit_registered: + atexit.register(shutdown_image_server) + _atexit_registered = True + sys.stdout.write( + "\n[IMAGESERVER_TEST] child image server log file: %s\n\n" % log_path + ) + sys.stdout.flush() + return _server_info + + +def shutdown_image_server() -> None: + global _server_proc, _server_info, _tmp_dir, _server_log_fp, _server_log_path + ctrl_sock: Optional[str] = None + if _server_info is not None: + ctrl_sock = _server_info.get("ctrl_sock") + + if _server_proc is not None: + _server_proc.terminate() + try: + _server_proc.wait(timeout=5) + except subprocess.TimeoutExpired: + _server_proc.kill() + _server_proc.wait(timeout=5) + _server_proc = None + if _server_log_fp is not None: + try: + _server_log_fp.flush() + _server_log_fp.close() + except OSError: + pass + _server_log_fp = None + _server_info = None + _server_log_path = None + + if ctrl_sock: + try: + os.unlink(ctrl_sock) + except FileNotFoundError: + pass + + # Leave temp dir and imageserver.log on disk for debugging; clear pointer only. + _tmp_dir = None + + +# ── qemu-nbd server ──────────────────────────────────────────────────── + +class QemuNbdServer: + """Manages a qemu-nbd process exporting a disk image over a Unix socket.""" + + def __init__( + self, + image_path: str, + socket_path: str, + image_size: int = IMAGE_SIZE, + image_format: str = "raw", + ): + self.image_path = image_path + self.socket_path = socket_path + self.image_size = image_size + self.image_format = image_format + self._proc: Optional[subprocess.Popen] = None + + def start(self) -> None: + if not os.path.exists(self.image_path): + if self.image_format == "raw": + with open(self.image_path, "wb") as f: + f.truncate(self.image_size) + else: + raise FileNotFoundError( + f"disk image not found for format {self.image_format!r}: {self.image_path}" + ) + + self._proc = subprocess.Popen( + [ + "qemu-nbd", + "--socket", self.socket_path, + "--format", self.image_format, + "--persistent", + "--shared=0", + "--cache=none", + self.image_path, + ], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + _wait_for_nbd_socket(self.socket_path) + + def stop(self) -> None: + if self._proc is not None: + for pipe in (self._proc.stdout, self._proc.stderr): + if pipe: + try: + pipe.close() + except Exception: + pass + self._proc.terminate() + try: + self._proc.wait(timeout=5) + except subprocess.TimeoutExpired: + self._proc.kill() + self._proc.wait(timeout=5) + self._proc = None + + +# ── Factory helpers ───────────────────────────────────────────────────── + +def make_tmp_image(data=None, image_size=IMAGE_SIZE) -> str: + """Create a temp raw image file in the shared tmp dir; return path.""" + tmp = get_tmp_dir() + path = os.path.join(tmp, f"img_{uuid.uuid4().hex[:8]}.raw") + if data is not None: + with open(path, "wb") as f: + f.write(data) + else: + with open(path, "wb") as f: + f.write(randbytes(42, image_size)) + return path + + +def make_file_transfer(data=None, image_size=IMAGE_SIZE, idle_timeout_seconds=None): + """ + Create a temp file + register a file-backend transfer. + Returns (transfer_id, url, file_path, cleanup_callable). + + If *idle_timeout_seconds* is set, it is sent in the transfer config (for idle expiry tests). + """ + srv = get_image_server() + path = make_tmp_image(data=data, image_size=image_size) + transfer_id = f"file-{uuid.uuid4().hex[:8]}" + cfg = {"backend": "file", "file": path} + if idle_timeout_seconds is not None: + cfg["idle_timeout_seconds"] = idle_timeout_seconds + resp = srv["send"]({ + "action": "register", + "transfer_id": transfer_id, + "config": cfg, + }) + assert resp["status"] == "ok", f"register failed: {resp}" + url = f"{srv['base_url']}/images/{transfer_id}" + + def cleanup(): + srv["send"]({"action": "unregister", "transfer_id": transfer_id}) + try: + os.unlink(path) + except FileNotFoundError: + pass + + return transfer_id, url, path, cleanup + + +def make_nbd_transfer(image_size=IMAGE_SIZE): + """ + Create a qemu-nbd server + register an NBD-backend transfer. + Returns (transfer_id, url, QemuNbdServer, cleanup_callable). + """ + srv = get_image_server() + tmp = get_tmp_dir() + img_path = os.path.join(tmp, f"nbd_{uuid.uuid4().hex[:8]}.raw") + sock_path = os.path.join(tmp, f"nbd_{uuid.uuid4().hex[:8]}.sock") + + server = QemuNbdServer(img_path, sock_path, image_size=image_size) + server.start() + + transfer_id = f"nbd-{uuid.uuid4().hex[:8]}" + resp = srv["send"]({ + "action": "register", + "transfer_id": transfer_id, + "config": {"backend": "nbd", "socket": sock_path}, + }) + assert resp["status"] == "ok", f"register failed: {resp}" + url = f"{srv['base_url']}/images/{transfer_id}" + + def cleanup(): + srv["send"]({"action": "unregister", "transfer_id": transfer_id}) + server.stop() + for p in (img_path, sock_path): + try: + os.unlink(p) + except FileNotFoundError: + pass + + return transfer_id, url, server, cleanup + + +def make_nbd_transfer_existing_disk(image_path: str, image_format: str = "qcow2"): + """ + Start qemu-nbd for an existing on-disk image (e.g. qcow2) and register a transfer. + + Does not delete *image_path* on cleanup (only the Unix socket under tmp). + + Returns (transfer_id, url, QemuNbdServer, cleanup_callable). + """ + srv = get_image_server() + tmp = get_tmp_dir() + sock_path = os.path.join(tmp, f"nbd_{uuid.uuid4().hex[:8]}.sock") + + server = QemuNbdServer( + image_path, sock_path, image_format=image_format + ) + server.start() + + transfer_id = f"nbd-{uuid.uuid4().hex[:8]}" + resp = srv["send"]({ + "action": "register", + "transfer_id": transfer_id, + "config": {"backend": "nbd", "socket": sock_path}, + }) + assert resp["status"] == "ok", f"register failed: {resp}" + url = f"{srv['base_url']}/images/{transfer_id}" + + def cleanup(): + srv["send"]({"action": "unregister", "transfer_id": transfer_id}) + server.stop() + try: + os.unlink(sock_path) + except FileNotFoundError: + pass + + return transfer_id, url, server, cleanup + + +# ── HTTP helpers ──────────────────────────────────────────────────────── + +import urllib.request +import urllib.error + + +def http_get(url, headers=None, timeout=HTTP_TIMEOUT): + req = urllib.request.Request(url, headers=headers or {}) + return urllib.request.urlopen(req, timeout=timeout) + + +def http_put(url, data, headers=None, timeout=HTTP_TIMEOUT): + hdrs = {"Content-Length": str(len(data))} + if headers: + hdrs.update(headers) + req = urllib.request.Request(url, data=data, headers=hdrs, method="PUT") + return urllib.request.urlopen(req, timeout=timeout) + + +def http_post(url, data=b"", headers=None, timeout=HTTP_TIMEOUT): + hdrs = {} + if headers: + hdrs.update(headers) + req = urllib.request.Request(url, data=data, headers=hdrs, method="POST") + return urllib.request.urlopen(req, timeout=timeout) + + +def http_options(url, timeout=HTTP_TIMEOUT): + req = urllib.request.Request(url, method="OPTIONS") + return urllib.request.urlopen(req, timeout=timeout) + + +def http_patch(url, data, headers=None, timeout=HTTP_TIMEOUT): + hdrs = {} + if headers: + hdrs.update(headers) + req = urllib.request.Request(url, data=data, headers=hdrs, method="PATCH") + return urllib.request.urlopen(req, timeout=timeout) + + +# ── Base TestCase with shared setUp/tearDown ──────────────────────────── + +class ImageServerTestCase(unittest.TestCase): + """ + Base class for image-server tests. + + Ensures the image server is running before any test method. + Subclasses that need a file or NBD transfer should set them up + in setUp() and tear down in tearDown(). + """ + + @classmethod + def setUpClass(cls): + cls.server = get_image_server() + cls.base_url = cls.server["base_url"] + + def ctrl(self, msg): + """Send a control-socket message; wraps server['send'] to avoid descriptor issues.""" + return self.server["send"](msg) + + def _make_tmp_image(self, data=None): + return make_tmp_image(data=data) + + def _register_file_transfer(self, data=None): + return make_file_transfer(data=data) + + def _register_nbd_transfer(self): + return make_nbd_transfer() + + @staticmethod + def dump_server_logs(max_bytes: int = 256 * 1024): + """Print a tail of the image-server log file (shared by all tests in the run).""" + path = _server_log_path + if not path or not os.path.isfile(path): + return + try: + if _server_log_fp is not None: + _server_log_fp.flush() + except OSError: + pass + try: + data = _read_log_tail(path, max_bytes=max_bytes) + if data.strip(): + sys.stderr.write("\n=== IMAGE SERVER LOG (tail) ===\n") + sys.stderr.write(data) + if not data.endswith("\n"): + sys.stderr.write("\n") + sys.stderr.write("=== END IMAGE SERVER LOG ===\n") + except Exception: + pass diff --git a/scripts/vm/hypervisor/kvm/imageserver/tests/test_combinations.py b/scripts/vm/hypervisor/kvm/imageserver/tests/test_combinations.py new file mode 100644 index 000000000000..509f9fde05a5 --- /dev/null +++ b/scripts/vm/hypervisor/kvm/imageserver/tests/test_combinations.py @@ -0,0 +1,397 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +Multi-operation sequences, parallel reads across multiple transfer objects, +cross-backend scenarios, and edge cases. +""" + +import json +import logging +import unittest +import urllib.error +from concurrent.futures import ThreadPoolExecutor, as_completed + +from .test_base import ( + IMAGE_SIZE, + ImageServerTestCase, + http_get, + http_patch, + http_post, + http_put, + make_file_transfer, + make_nbd_transfer, + randbytes, + shutdown_image_server, + test_timeout, +) + +log = logging.getLogger(__name__) +FUTURES_TIMEOUT = 60 # seconds for as_completed to collect all results + + +def _fetch(url, headers=None): + """GET *url* and return the body bytes, properly closing the response.""" + resp = http_get(url, headers=headers) + try: + return resp.read() + finally: + resp.close() + + +class TestParallelReadsFileBackend(ImageServerTestCase): + """Multiple concurrent GET requests to multiple file-backed transfers.""" + + @test_timeout(120) + def test_parallel_reads_single_file_transfer(self): + data = randbytes(500, IMAGE_SIZE) + tid, url, path, cleanup = make_file_transfer(data=data) + try: + results = {} + with ThreadPoolExecutor(max_workers=8) as pool: + futures = {} + for i in range(8): + start = i * (IMAGE_SIZE // 8) + end = start + (IMAGE_SIZE // 8) - 1 + f = pool.submit( + _fetch, url, headers={"Range": f"bytes={start}-{end}"} + ) + futures[f] = (start, end) + + for f in as_completed(futures, timeout=FUTURES_TIMEOUT): + start, end = futures[f] + results[(start, end)] = f.result() + + for (start, end), chunk in sorted(results.items()): + self.assertEqual(chunk, data[start:end + 1], f"Mismatch at {start}-{end}") + finally: + cleanup() + + @test_timeout(120) + def test_parallel_reads_multiple_file_transfers(self): + """Parallel reads across 4 different file-backed transfer objects.""" + transfers = [] + try: + for i in range(4): + data = randbytes(600 + i, IMAGE_SIZE) + tid, url, path, cleanup = make_file_transfer(data=data) + transfers.append((tid, url, data, cleanup)) + + with ThreadPoolExecutor(max_workers=8) as pool: + futures = {} + for idx, (tid, url, data, _) in enumerate(transfers): + for j in range(2): + f = pool.submit(_fetch, url) + futures[f] = (idx, data) + + for f in as_completed(futures, timeout=FUTURES_TIMEOUT): + idx, expected_data = futures[f] + got = f.result() + self.assertEqual(got, expected_data, f"Transfer {idx} mismatch") + finally: + for _, _, _, cleanup in transfers: + cleanup() + + +class TestParallelReadsNbdBackend(ImageServerTestCase): + """Multiple concurrent GET requests to multiple NBD-backed transfers.""" + + @test_timeout(120) + def test_parallel_reads_single_nbd_transfer(self): + data = randbytes(700, IMAGE_SIZE) + tid, url, nbd_server, cleanup = make_nbd_transfer() + try: + log.info("Writing %d bytes to NBD transfer %s", IMAGE_SIZE, tid) + http_put(url, data) + log.info("NBD write done, starting 8 parallel range reads") + + results = {} + with ThreadPoolExecutor(max_workers=8) as pool: + futures = {} + for i in range(8): + start = i * (IMAGE_SIZE // 8) + end = start + (IMAGE_SIZE // 8) - 1 + f = pool.submit( + _fetch, url, headers={"Range": f"bytes={start}-{end}"} + ) + futures[f] = (start, end) + + completed = 0 + for f in as_completed(futures, timeout=FUTURES_TIMEOUT): + start, end = futures[f] + results[(start, end)] = f.result() + completed += 1 + log.info("NBD range read %d/8 done: bytes=%d-%d", completed, start, end) + + for (start, end), chunk in sorted(results.items()): + self.assertEqual(chunk, data[start:end + 1], f"Mismatch at {start}-{end}") + finally: + cleanup() + + @test_timeout(120) + def test_parallel_reads_multiple_nbd_transfers(self): + """Parallel reads across 4 different NBD-backed transfer objects.""" + transfers = [] + try: + for i in range(4): + data = randbytes(800 + i, IMAGE_SIZE) + log.info("Setting up NBD transfer %d", i) + tid, url, nbd_server, cleanup = make_nbd_transfer() + log.info("Writing data to NBD transfer %d (tid=%s)", i, tid) + http_put(url, data) + transfers.append((tid, url, data, cleanup)) + log.info("NBD transfer %d ready", i) + + log.info("Starting parallel reads across %d NBD transfers", len(transfers)) + with ThreadPoolExecutor(max_workers=8) as pool: + futures = {} + for idx, (tid, url, data, _) in enumerate(transfers): + for j in range(2): + f = pool.submit(_fetch, url) + futures[f] = (idx, data) + + completed = 0 + for f in as_completed(futures, timeout=FUTURES_TIMEOUT): + idx, expected_data = futures[f] + got = f.result() + completed += 1 + log.info("Read %d/%d done: NBD transfer idx=%d, %d bytes", + completed, len(futures), idx, len(got)) + self.assertEqual(got, expected_data, f"NBD transfer {idx} mismatch") + finally: + for _, _, _, cleanup in transfers: + cleanup() + + +class TestParallelReadsMixedBackends(ImageServerTestCase): + """Parallel reads across a mix of file and NBD transfers simultaneously.""" + + @test_timeout(120) + def test_parallel_reads_file_and_nbd_mixed(self): + transfers = [] + try: + for i in range(2): + log.info("Setting up file transfer %d", i) + data = randbytes(900 + i, IMAGE_SIZE) + tid, url, path, cleanup = make_file_transfer(data=data) + transfers.append(("file", tid, url, data, cleanup)) + log.info("File transfer %d ready: tid=%s", i, tid) + + for i in range(2): + log.info("Setting up NBD transfer %d", i) + data = randbytes(950 + i, IMAGE_SIZE) + tid, url, nbd_server, cleanup = make_nbd_transfer() + log.info("NBD transfer %d registered: tid=%s, writing data...", i, tid) + http_put(url, data) + transfers.append(("nbd", tid, url, data, cleanup)) + log.info("NBD transfer %d ready", i) + + log.info("Starting parallel reads across %d transfers (2 file + 2 nbd)", + len(transfers)) + with ThreadPoolExecutor(max_workers=8) as pool: + futures = {} + for idx, (backend_type, tid, url, data, _) in enumerate(transfers): + for j in range(2): + f = pool.submit(_fetch, url) + futures[f] = (idx, backend_type, data) + + completed = 0 + for f in as_completed(futures, timeout=FUTURES_TIMEOUT): + idx, backend_type, expected = futures[f] + got = f.result() + completed += 1 + log.info("Read %d/%d done: %s transfer idx=%d, %d bytes", + completed, len(futures), backend_type, idx, len(got)) + self.assertEqual(got, expected, f"{backend_type} transfer {idx} mismatch") + + log.info("All parallel mixed reads completed successfully") + except TimeoutError: + log.error("TIMEOUT in mixed parallel reads — dumping server logs") + self.dump_server_logs() + raise + finally: + for _, _, _, _, cleanup in transfers: + cleanup() + + +class TestWriteThenReadNbd(ImageServerTestCase): + """Multi-step write sequences on NBD backend.""" + + def setUp(self): + self._tid, self._url, self._nbd, self._cleanup = make_nbd_transfer() + + def tearDown(self): + self._cleanup() + + def test_partial_writes_then_full_read(self): + http_put(self._url, b"\x00" * IMAGE_SIZE) + + chunk_size = IMAGE_SIZE // 4 + for i in range(4): + offset = i * chunk_size + end = offset + chunk_size - 1 + data = bytes([i & 0xFF]) * chunk_size + http_patch(self._url, data, headers={ + "Range": f"bytes={offset}-{end}", + "Content-Type": "application/octet-stream", + "Content-Length": str(chunk_size), + }) + + resp = http_get(self._url) + full = resp.read() + for i in range(4): + offset = i * chunk_size + self.assertEqual(full[offset:offset + chunk_size], bytes([i & 0xFF]) * chunk_size) + + def test_zero_then_extents(self): + http_put(self._url, randbytes(1000, IMAGE_SIZE)) + + payload = json.dumps({"op": "zero", "size": IMAGE_SIZE // 2, "offset": 0}).encode() + http_patch(self._url, payload, headers={ + "Content-Type": "application/json", + "Content-Length": str(len(payload)), + }) + + resp = http_get(f"{self._url}/extents") + extents = json.loads(resp.read()) + total = sum(e["length"] for e in extents) + self.assertEqual(total, IMAGE_SIZE) + + def test_write_flush_read(self): + data = randbytes(1001, IMAGE_SIZE) + resp = http_put(f"{self._url}?flush=y", data) + body = json.loads(resp.read()) + self.assertTrue(body["flushed"]) + + resp2 = http_get(self._url) + self.assertEqual(resp2.read(), data) + + +class TestWriteThenReadFile(ImageServerTestCase): + def setUp(self): + self._tid, self._url, self._path, self._cleanup = make_file_transfer() + + def tearDown(self): + self._cleanup() + + def test_put_then_get_roundtrip(self): + data = randbytes(1100, IMAGE_SIZE) + http_put(self._url, data) + resp = http_get(self._url) + self.assertEqual(resp.read(), data) + + +class TestRegisterUseUnregisterUse(ImageServerTestCase): + def test_unregistered_transfer_returns_404(self): + data = randbytes(1200, IMAGE_SIZE) + tid, url, path, cleanup = make_file_transfer(data=data) + + resp = http_get(url) + self.assertEqual(resp.read(), data) + + cleanup() + + with self.assertRaises(urllib.error.HTTPError) as ctx: + http_get(url) + self.assertEqual(ctx.exception.code, 404) + + +class TestMultipleTransfersSimultaneous(ImageServerTestCase): + @test_timeout(120) + def test_operate_on_file_and_nbd_concurrently(self): + file_data = randbytes(1300, IMAGE_SIZE) + nbd_data = randbytes(1301, IMAGE_SIZE) + + ftid, furl, fpath, fcleanup = make_file_transfer(data=file_data) + ntid, nurl, nbd_server, ncleanup = make_nbd_transfer() + + try: + log.info("Writing data to NBD transfer %s", ntid) + http_put(nurl, nbd_data) + + log.info("Starting concurrent file + NBD reads") + with ThreadPoolExecutor(max_workers=4) as pool: + f_file = pool.submit(_fetch, furl) + f_nbd = pool.submit(_fetch, nurl) + + self.assertEqual(f_file.result(timeout=FUTURES_TIMEOUT), file_data) + self.assertEqual(f_nbd.result(timeout=FUTURES_TIMEOUT), nbd_data) + log.info("Concurrent reads completed successfully") + finally: + fcleanup() + ncleanup() + + +class TestLargeChunkedTransfer(ImageServerTestCase): + def test_put_larger_than_chunk_size_file(self): + """Upload data that spans multiple CHUNK_SIZE boundaries.""" + tid, url, path, cleanup = make_file_transfer() + try: + data = randbytes(1400, IMAGE_SIZE) + http_put(url, data) + resp = http_get(url) + self.assertEqual(resp.read(), data) + finally: + cleanup() + + def test_nbd_put_larger_than_chunk_size(self): + tid, url, nbd_server, cleanup = make_nbd_transfer() + try: + data = randbytes(1401, IMAGE_SIZE) + http_put(url, data) + resp = http_get(url) + self.assertEqual(resp.read(), data) + finally: + cleanup() + + +class TestEdgeCases(ImageServerTestCase): + def test_get_not_found_path(self): + with self.assertRaises(urllib.error.HTTPError) as ctx: + http_get(f"{self.base_url}/not/images/path") + self.assertEqual(ctx.exception.code, 404) + + def test_post_unknown_tail(self): + tid, url, path, cleanup = make_file_transfer() + try: + with self.assertRaises(urllib.error.HTTPError) as ctx: + http_post(f"{url}/unknown") + self.assertEqual(ctx.exception.code, 404) + finally: + cleanup() + + def test_get_extents_then_flush_nbd(self): + tid, url, nbd_server, cleanup = make_nbd_transfer() + try: + http_put(url, randbytes(1500, IMAGE_SIZE)) + + resp = http_get(f"{url}/extents") + self.assertEqual(resp.status, 200) + resp.read() + + resp2 = http_post(f"{url}/flush") + body = json.loads(resp2.read()) + self.assertTrue(body["ok"]) + finally: + cleanup() + + +if __name__ == "__main__": + try: + unittest.main() + finally: + shutdown_image_server() diff --git a/scripts/vm/hypervisor/kvm/imageserver/tests/test_control_socket.py b/scripts/vm/hypervisor/kvm/imageserver/tests/test_control_socket.py new file mode 100644 index 000000000000..187592ff1070 --- /dev/null +++ b/scripts/vm/hypervisor/kvm/imageserver/tests/test_control_socket.py @@ -0,0 +1,258 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Tests for the Unix domain control socket protocol (register / unregister / status).""" + +import json +import socket +import unittest +import uuid +from concurrent.futures import ThreadPoolExecutor, as_completed + +from .test_base import ImageServerTestCase, make_tmp_image, shutdown_image_server, test_timeout + + +class TestStatus(ImageServerTestCase): + def test_status_returns_ok(self): + resp = self.ctrl({"action": "status"}) + self.assertEqual(resp["status"], "ok") + self.assertIn("active_transfers", resp) + + def test_status_count_is_integer(self): + resp = self.ctrl({"action": "status"}) + self.assertIsInstance(resp["active_transfers"], int) + self.assertGreaterEqual(resp["active_transfers"], 0) + + +class TestRegister(ImageServerTestCase): + def test_register_file_backend(self): + img = make_tmp_image() + tid = f"test-{uuid.uuid4().hex[:8]}" + try: + resp = self.ctrl({ + "action": "register", + "transfer_id": tid, + "config": {"backend": "file", "file": img}, + }) + self.assertEqual(resp["status"], "ok") + self.assertGreaterEqual(resp["active_transfers"], 1) + finally: + self.ctrl({"action": "unregister", "transfer_id": tid}) + + def test_register_nbd_backend(self): + tid = f"test-{uuid.uuid4().hex[:8]}" + try: + resp = self.ctrl({ + "action": "register", + "transfer_id": tid, + "config": {"backend": "nbd", "socket": "/tmp/fake.sock"}, + }) + self.assertEqual(resp["status"], "ok") + finally: + self.ctrl({"action": "unregister", "transfer_id": tid}) + + def test_register_increments_active_count(self): + img = make_tmp_image() + before = self.ctrl({"action": "status"})["active_transfers"] + tid = f"test-{uuid.uuid4().hex[:8]}" + try: + self.ctrl({ + "action": "register", + "transfer_id": tid, + "config": {"backend": "file", "file": img}, + }) + after = self.ctrl({"action": "status"})["active_transfers"] + self.assertEqual(after, before + 1) + finally: + self.ctrl({"action": "unregister", "transfer_id": tid}) + + def test_register_missing_transfer_id(self): + img = make_tmp_image() + resp = self.ctrl({ + "action": "register", + "config": {"backend": "file", "file": img}, + }) + self.assertEqual(resp["status"], "error") + + def test_register_empty_transfer_id(self): + img = make_tmp_image() + resp = self.ctrl({ + "action": "register", + "transfer_id": "", + "config": {"backend": "file", "file": img}, + }) + self.assertEqual(resp["status"], "error") + + def test_register_missing_config(self): + resp = self.ctrl({ + "action": "register", + "transfer_id": f"test-{uuid.uuid4().hex[:8]}", + }) + self.assertEqual(resp["status"], "error") + + def test_register_invalid_backend(self): + resp = self.ctrl({ + "action": "register", + "transfer_id": f"test-{uuid.uuid4().hex[:8]}", + "config": {"backend": "invalid"}, + }) + self.assertEqual(resp["status"], "error") + + def test_register_file_missing_path(self): + resp = self.ctrl({ + "action": "register", + "transfer_id": f"test-{uuid.uuid4().hex[:8]}", + "config": {"backend": "file"}, + }) + self.assertEqual(resp["status"], "error") + + def test_register_nbd_missing_socket(self): + resp = self.ctrl({ + "action": "register", + "transfer_id": f"test-{uuid.uuid4().hex[:8]}", + "config": {"backend": "nbd"}, + }) + self.assertEqual(resp["status"], "error") + + def test_register_path_traversal_rejected(self): + img = make_tmp_image() + resp = self.ctrl({ + "action": "register", + "transfer_id": "../etc/passwd", + "config": {"backend": "file", "file": img}, + }) + self.assertEqual(resp["status"], "error") + + def test_register_dot_rejected(self): + img = make_tmp_image() + resp = self.ctrl({ + "action": "register", + "transfer_id": ".", + "config": {"backend": "file", "file": img}, + }) + self.assertEqual(resp["status"], "error") + + def test_register_slash_rejected(self): + img = make_tmp_image() + resp = self.ctrl({ + "action": "register", + "transfer_id": "a/b", + "config": {"backend": "file", "file": img}, + }) + self.assertEqual(resp["status"], "error") + + def test_register_duplicate_replaces(self): + img = make_tmp_image() + tid = f"test-{uuid.uuid4().hex[:8]}" + try: + self.ctrl({ + "action": "register", + "transfer_id": tid, + "config": {"backend": "file", "file": img}, + }) + count_before = self.ctrl({"action": "status"})["active_transfers"] + self.ctrl({ + "action": "register", + "transfer_id": tid, + "config": {"backend": "file", "file": img}, + }) + count_after = self.ctrl({"action": "status"})["active_transfers"] + self.assertEqual(count_after, count_before) + finally: + self.ctrl({"action": "unregister", "transfer_id": tid}) + + +class TestUnregister(ImageServerTestCase): + def test_unregister_existing(self): + img = make_tmp_image() + tid = f"test-{uuid.uuid4().hex[:8]}" + self.ctrl({ + "action": "register", + "transfer_id": tid, + "config": {"backend": "file", "file": img}, + }) + before = self.ctrl({"action": "status"})["active_transfers"] + resp = self.ctrl({"action": "unregister", "transfer_id": tid}) + self.assertEqual(resp["status"], "ok") + self.assertEqual(resp["active_transfers"], before - 1) + + def test_unregister_nonexistent(self): + resp = self.ctrl({"action": "unregister", "transfer_id": "does-not-exist"}) + self.assertEqual(resp["status"], "ok") + + def test_unregister_missing_id(self): + resp = self.ctrl({"action": "unregister"}) + self.assertEqual(resp["status"], "error") + + +class TestUnknownAction(ImageServerTestCase): + def test_unknown_action(self): + resp = self.ctrl({"action": "foobar"}) + self.assertEqual(resp["status"], "error") + self.assertIn("unknown", resp.get("message", "").lower()) + + +class TestMalformed(ImageServerTestCase): + def test_malformed_json(self): + sock_path = self.server["ctrl_sock"] + payload = b"not valid json\n" + with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s: + s.settimeout(5) + s.connect(sock_path) + s.sendall(payload) + s.shutdown(socket.SHUT_WR) + data = b"" + while True: + chunk = s.recv(4096) + if not chunk: + break + data += chunk + resp = json.loads(data.strip()) + self.assertEqual(resp["status"], "error") + + +class TestConcurrentRegistrations(ImageServerTestCase): + @test_timeout(60) + def test_concurrent_registers(self): + img = make_tmp_image() + tids = [f"conc-{uuid.uuid4().hex[:8]}" for _ in range(20)] + results = [] + + def register_one(tid): + return self.ctrl({ + "action": "register", + "transfer_id": tid, + "config": {"backend": "file", "file": img}, + }) + + try: + with ThreadPoolExecutor(max_workers=10) as pool: + futures = {pool.submit(register_one, tid): tid for tid in tids} + for f in as_completed(futures, timeout=30): + results.append(f.result()) + + self.assertTrue(all(r["status"] == "ok" for r in results)) + finally: + for tid in tids: + self.ctrl({"action": "unregister", "transfer_id": tid}) + + +if __name__ == "__main__": + try: + unittest.main() + finally: + shutdown_image_server() diff --git a/scripts/vm/hypervisor/kvm/imageserver/tests/test_file_backend.py b/scripts/vm/hypervisor/kvm/imageserver/tests/test_file_backend.py new file mode 100644 index 000000000000..be6eb259cc38 --- /dev/null +++ b/scripts/vm/hypervisor/kvm/imageserver/tests/test_file_backend.py @@ -0,0 +1,230 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Tests for HTTP operations against a file-backend transfer.""" + +import json +import os +import unittest +import urllib.error + +from .test_base import ( + IMAGE_SIZE, + ImageServerTestCase, + http_get, + http_options, + http_patch, + http_post, + http_put, + make_file_transfer, + randbytes, + shutdown_image_server, +) + + +class FileBackendTestCase(ImageServerTestCase): + """Base that creates a file-backend transfer per test.""" + + def setUp(self): + self._tid, self._url, self._path, self._cleanup = make_file_transfer() + + def tearDown(self): + self._cleanup() + + +class TestOptions(FileBackendTestCase): + def test_options_returns_features(self): + resp = http_options(self._url) + self.assertEqual(resp.status, 200) + body = json.loads(resp.read()) + self.assertIn("flush", body["features"]) + self.assertGreaterEqual(body["max_readers"], 1) + self.assertGreaterEqual(body["max_writers"], 1) + + def test_options_allowed_methods(self): + resp = http_options(self._url) + methods = resp.getheader("Access-Control-Allow-Methods") + for m in ("GET", "PUT", "POST", "OPTIONS"): + self.assertIn(m, methods) + + +class TestGetFull(FileBackendTestCase): + def test_get_full_returns_file_content(self): + with open(self._path, "rb") as f: + expected = f.read() + resp = http_get(self._url) + self.assertEqual(resp.status, 200) + data = resp.read() + self.assertEqual(len(data), len(expected)) + self.assertEqual(data, expected) + + def test_get_full_content_type(self): + resp = http_get(self._url) + resp.read() + self.assertIn("application/octet-stream", resp.getheader("Content-Type")) + + def test_get_full_content_length(self): + resp = http_get(self._url) + resp.read() + self.assertEqual(int(resp.getheader("Content-Length")), os.path.getsize(self._path)) + + +class TestGetRange(FileBackendTestCase): + def test_get_range_partial(self): + with open(self._path, "rb") as f: + f.seek(100) + expected = f.read(200) + resp = http_get(self._url, headers={"Range": "bytes=100-299"}) + self.assertEqual(resp.status, 206) + self.assertEqual(resp.read(), expected) + + def test_get_range_content_range_header(self): + size = os.path.getsize(self._path) + resp = http_get(self._url, headers={"Range": "bytes=0-99"}) + self.assertEqual(resp.status, 206) + resp.read() + self.assertEqual(resp.getheader("Content-Range"), f"bytes 0-99/{size}") + + def test_get_range_suffix(self): + with open(self._path, "rb") as f: + expected = f.read()[-100:] + resp = http_get(self._url, headers={"Range": "bytes=-100"}) + self.assertEqual(resp.status, 206) + self.assertEqual(resp.read(), expected) + + def test_get_range_open_ended(self): + with open(self._path, "rb") as f: + f.seek(IMAGE_SIZE - 50) + expected = f.read() + resp = http_get(self._url, headers={"Range": f"bytes={IMAGE_SIZE - 50}-"}) + self.assertEqual(resp.status, 206) + self.assertEqual(resp.read(), expected) + + def test_get_range_unsatisfiable(self): + with self.assertRaises(urllib.error.HTTPError) as ctx: + http_get(self._url, headers={"Range": f"bytes={IMAGE_SIZE + 100}-{IMAGE_SIZE + 200}"}) + self.assertEqual(ctx.exception.code, 416) + + +class TestPut(FileBackendTestCase): + def test_put_full_upload(self): + new_data = randbytes(99, IMAGE_SIZE) + resp = http_put(self._url, new_data) + body = json.loads(resp.read()) + self.assertEqual(resp.status, 200) + self.assertTrue(body["ok"]) + self.assertEqual(body["bytes_written"], IMAGE_SIZE) + + with open(self._path, "rb") as f: + self.assertEqual(f.read(), new_data) + + def test_put_with_flush(self): + new_data = randbytes(100, IMAGE_SIZE) + resp = http_put(f"{self._url}?flush=y", new_data) + body = json.loads(resp.read()) + self.assertTrue(body["ok"]) + self.assertTrue(body["flushed"]) + + def test_put_verify_by_get(self): + new_data = randbytes(101, IMAGE_SIZE) + http_put(self._url, new_data) + resp = http_get(self._url) + self.assertEqual(resp.read(), new_data) + + def test_put_with_content_range_rejected(self): + data = b"x" * 100 + with self.assertRaises(urllib.error.HTTPError) as ctx: + http_put(self._url, data, headers={"Content-Range": "bytes 0-99/*"}) + self.assertEqual(ctx.exception.code, 400) + + def test_put_with_range_header_rejected(self): + data = b"x" * 100 + with self.assertRaises(urllib.error.HTTPError) as ctx: + http_put(self._url, data, headers={"Range": "bytes=0-99"}) + self.assertEqual(ctx.exception.code, 400) + + +class TestFlush(FileBackendTestCase): + def test_post_flush(self): + resp = http_post(f"{self._url}/flush") + body = json.loads(resp.read()) + self.assertEqual(resp.status, 200) + self.assertTrue(body["ok"]) + + +class TestPatchRejected(FileBackendTestCase): + def test_patch_rejected_for_file(self): + data = json.dumps({"op": "zero", "size": 100}).encode() + with self.assertRaises(urllib.error.HTTPError) as ctx: + http_patch(self._url, data, headers={ + "Content-Type": "application/json", + "Content-Length": str(len(data)), + }) + self.assertEqual(ctx.exception.code, 400) + + +class TestExtentsRejected(FileBackendTestCase): + def test_extents_rejected_for_file(self): + with self.assertRaises(urllib.error.HTTPError) as ctx: + http_get(f"{self._url}/extents") + self.assertEqual(ctx.exception.code, 400) + + +class TestUnknownImage(ImageServerTestCase): + def test_get_unknown_image(self): + with self.assertRaises(urllib.error.HTTPError) as ctx: + http_get(f"{self.base_url}/images/nonexistent-id") + self.assertEqual(ctx.exception.code, 404) + + def test_put_unknown_image(self): + with self.assertRaises(urllib.error.HTTPError) as ctx: + http_put(f"{self.base_url}/images/nonexistent-id", b"data") + self.assertEqual(ctx.exception.code, 404) + + def test_options_unknown_image(self): + with self.assertRaises(urllib.error.HTTPError) as ctx: + http_options(f"{self.base_url}/images/nonexistent-id") + self.assertEqual(ctx.exception.code, 404) + + +class TestRoundTrip(FileBackendTestCase): + def test_put_then_get_roundtrip(self): + payload = randbytes(200, IMAGE_SIZE) + http_put(self._url, payload) + resp = http_get(self._url) + self.assertEqual(resp.read(), payload) + + def test_put_then_ranged_get_roundtrip(self): + payload = randbytes(201, IMAGE_SIZE) + http_put(self._url, payload) + resp = http_get(self._url, headers={"Range": "bytes=512-1023"}) + self.assertEqual(resp.read(), payload[512:1024]) + + def test_multiple_puts_last_wins(self): + first = randbytes(300, IMAGE_SIZE) + second = randbytes(301, IMAGE_SIZE) + http_put(self._url, first) + http_put(self._url, second) + resp = http_get(self._url) + self.assertEqual(resp.read(), second) + + +if __name__ == "__main__": + try: + unittest.main() + finally: + shutdown_image_server() diff --git a/scripts/vm/hypervisor/kvm/imageserver/tests/test_nbd_backend.py b/scripts/vm/hypervisor/kvm/imageserver/tests/test_nbd_backend.py new file mode 100644 index 000000000000..da120ae6bad5 --- /dev/null +++ b/scripts/vm/hypervisor/kvm/imageserver/tests/test_nbd_backend.py @@ -0,0 +1,706 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Tests for HTTP operations against an NBD-backend transfer (real qemu-nbd).""" + +import json +import os +import subprocess +import unittest +import uuid +import urllib.error +import urllib.request +from concurrent.futures import ThreadPoolExecutor + +from imageserver.constants import MAX_PARALLEL_READS, MAX_PARALLEL_WRITES + +from .test_base import ( + IMAGE_SIZE, + ImageServerTestCase, + get_tmp_dir, + http_get, + http_options, + http_patch, + http_post, + http_put, + make_nbd_transfer, + make_nbd_transfer_existing_disk, + randbytes, + shutdown_image_server, +) + + +class NbdBackendTestCase(ImageServerTestCase): + """Base that creates an NBD-backend transfer per test.""" + + def setUp(self): + self._tid, self._url, self._nbd, self._cleanup = make_nbd_transfer() + + def tearDown(self): + self._cleanup() + + +class TestOptions(NbdBackendTestCase): + def test_options_returns_extents_feature(self): + resp = http_options(self._url) + self.assertEqual(resp.status, 200) + body = json.loads(resp.read()) + self.assertIn("extents", body["features"]) + + def test_options_includes_patch_method(self): + resp = http_options(self._url) + methods = resp.getheader("Access-Control-Allow-Methods") + self.assertIn("PATCH", methods) + + def test_options_has_capabilities(self): + resp = http_options(self._url) + body = json.loads(resp.read()) + self.assertGreaterEqual(body["max_readers"], 1) + self.assertGreaterEqual(body["max_writers"], 1) + + +class TestGetFull(NbdBackendTestCase): + def test_get_full_returns_image_data(self): + with open(self._nbd.image_path, "rb") as f: + expected = f.read() + resp = http_get(self._url) + data = resp.read() + self.assertEqual(resp.status, 200) + self.assertEqual(len(data), len(expected)) + self.assertEqual(data, expected) + + def test_get_full_content_length(self): + resp = http_get(self._url) + resp.read() + self.assertEqual(int(resp.getheader("Content-Length")), IMAGE_SIZE) + + +class TestGetRange(NbdBackendTestCase): + def test_get_range_partial(self): + test_data = randbytes(50, IMAGE_SIZE) + http_put(self._url, test_data) + + resp = http_get(self._url, headers={"Range": "bytes=100-299"}) + self.assertEqual(resp.status, 206) + self.assertEqual(resp.read(), test_data[100:300]) + + def test_get_range_content_range_header(self): + resp = http_get(self._url, headers={"Range": "bytes=0-99"}) + self.assertEqual(resp.status, 206) + resp.read() + self.assertEqual(resp.getheader("Content-Range"), f"bytes 0-99/{IMAGE_SIZE}") + + def test_get_range_suffix(self): + test_data = randbytes(51, IMAGE_SIZE) + http_put(self._url, test_data) + + resp = http_get(self._url, headers={"Range": "bytes=-100"}) + self.assertEqual(resp.status, 206) + self.assertEqual(resp.read(), test_data[-100:]) + + def test_get_range_unsatisfiable(self): + with self.assertRaises(urllib.error.HTTPError) as ctx: + http_get(self._url, headers={"Range": f"bytes={IMAGE_SIZE + 100}-{IMAGE_SIZE + 200}"}) + self.assertEqual(ctx.exception.code, 416) + + +class TestPutFull(NbdBackendTestCase): + def test_put_full_upload(self): + new_data = randbytes(60, IMAGE_SIZE) + resp = http_put(self._url, new_data) + body = json.loads(resp.read()) + self.assertEqual(resp.status, 200) + self.assertTrue(body["ok"]) + self.assertEqual(body["bytes_written"], IMAGE_SIZE) + + resp2 = http_get(self._url) + self.assertEqual(resp2.read(), new_data) + + def test_put_with_flush(self): + new_data = randbytes(61, IMAGE_SIZE) + resp = http_put(f"{self._url}?flush=y", new_data) + body = json.loads(resp.read()) + self.assertTrue(body["ok"]) + self.assertTrue(body["flushed"]) + + +class TestPutRange(NbdBackendTestCase): + def test_put_content_range(self): + base_data = randbytes(70, IMAGE_SIZE) + http_put(self._url, base_data) + + patch_data = b"\xAB" * 512 + resp = http_put(self._url, patch_data, headers={ + "Content-Range": "bytes 0-511/*", + "Content-Length": str(len(patch_data)), + }) + body = json.loads(resp.read()) + self.assertEqual(resp.status, 200) + self.assertTrue(body["ok"]) + self.assertEqual(body["bytes_written"], 512) + + resp2 = http_get(self._url, headers={"Range": "bytes=0-511"}) + self.assertEqual(resp2.read(), patch_data) + + resp3 = http_get(self._url, headers={"Range": "bytes=512-1023"}) + self.assertEqual(resp3.read(), base_data[512:1024]) + + def test_put_content_range_with_flush(self): + base_data = b"\x00" * IMAGE_SIZE + http_put(self._url, base_data) + + patch_data = b"\xFF" * 256 + resp = http_put(f"{self._url}?flush=y", patch_data, headers={ + "Content-Range": "bytes 1024-1279/*", + "Content-Length": str(len(patch_data)), + }) + body = json.loads(resp.read()) + self.assertTrue(body["ok"]) + self.assertTrue(body["flushed"]) + + +class TestPatchRange(NbdBackendTestCase): + def test_patch_binary_range(self): + base_data = randbytes(80, IMAGE_SIZE) + http_put(self._url, base_data) + + patch_data = b"\xCD" * 1024 + resp = http_patch(self._url, patch_data, headers={ + "Range": "bytes=2048-3071", + "Content-Type": "application/octet-stream", + "Content-Length": str(len(patch_data)), + }) + body = json.loads(resp.read()) + self.assertEqual(resp.status, 200) + self.assertTrue(body["ok"]) + self.assertEqual(body["bytes_written"], 1024) + + resp2 = http_get(self._url, headers={"Range": "bytes=2048-3071"}) + self.assertEqual(resp2.read(), patch_data) + + def test_patch_multiple_ranges_preserves_unwritten(self): + base_data = randbytes(81, IMAGE_SIZE) + http_put(self._url, base_data) + + patch1 = b"\x11" * 256 + http_patch(self._url, patch1, headers={ + "Range": "bytes=0-255", + "Content-Type": "application/octet-stream", + "Content-Length": "256", + }) + + patch2 = b"\x22" * 256 + http_patch(self._url, patch2, headers={ + "Range": "bytes=512-767", + "Content-Type": "application/octet-stream", + "Content-Length": "256", + }) + + resp = http_get(self._url, headers={"Range": "bytes=0-767"}) + got = resp.read() + self.assertEqual(got[:256], patch1) + self.assertEqual(got[256:512], base_data[256:512]) + self.assertEqual(got[512:768], patch2) + + +class TestPatchZero(NbdBackendTestCase): + def test_patch_zero(self): + data = randbytes(90, IMAGE_SIZE) + http_put(self._url, data) + + payload = json.dumps({"op": "zero", "size": 4096, "offset": 0}).encode() + resp = http_patch(self._url, payload, headers={ + "Content-Type": "application/json", + "Content-Length": str(len(payload)), + }) + body = json.loads(resp.read()) + self.assertEqual(resp.status, 200) + self.assertTrue(body["ok"]) + + resp2 = http_get(self._url, headers={"Range": "bytes=0-4095"}) + self.assertEqual(resp2.read(), b"\x00" * 4096) + + def test_patch_zero_with_flush(self): + data = b"\xFF" * IMAGE_SIZE + http_put(self._url, data) + + payload = json.dumps({"op": "zero", "size": 512, "offset": 1024, "flush": True}).encode() + resp = http_patch(self._url, payload, headers={ + "Content-Type": "application/json", + "Content-Length": str(len(payload)), + }) + body = json.loads(resp.read()) + self.assertTrue(body["ok"]) + + resp2 = http_get(self._url, headers={"Range": "bytes=1024-1535"}) + self.assertEqual(resp2.read(), b"\x00" * 512) + + def test_patch_zero_preserves_neighbors(self): + data = randbytes(91, IMAGE_SIZE) + http_put(self._url, data) + + payload = json.dumps({"op": "zero", "size": 256, "offset": 512}).encode() + http_patch(self._url, payload, headers={ + "Content-Type": "application/json", + "Content-Length": str(len(payload)), + }) + + resp = http_get(self._url, headers={"Range": "bytes=0-1023"}) + got = resp.read() + self.assertEqual(got[:512], data[:512]) + self.assertEqual(got[512:768], b"\x00" * 256) + self.assertEqual(got[768:1024], data[768:1024]) + + +class TestPatchFlush(NbdBackendTestCase): + def test_patch_flush_op(self): + payload = json.dumps({"op": "flush"}).encode() + resp = http_patch(self._url, payload, headers={ + "Content-Type": "application/json", + "Content-Length": str(len(payload)), + }) + body = json.loads(resp.read()) + self.assertEqual(resp.status, 200) + self.assertTrue(body["ok"]) + + +class TestPostFlush(NbdBackendTestCase): + def test_post_flush(self): + resp = http_post(f"{self._url}/flush") + body = json.loads(resp.read()) + self.assertEqual(resp.status, 200) + self.assertTrue(body["ok"]) + + +class TestExtents(NbdBackendTestCase): + def test_get_allocation_extents(self): + resp = http_get(f"{self._url}/extents") + self.assertEqual(resp.status, 200) + extents = json.loads(resp.read()) + self.assertIsInstance(extents, list) + self.assertGreaterEqual(len(extents), 1) + for ext in extents: + self.assertIn("start", ext) + self.assertIn("length", ext) + self.assertIn("zero", ext) + + def test_extents_cover_full_image(self): + resp = http_get(f"{self._url}/extents") + extents = json.loads(resp.read()) + total = sum(e["length"] for e in extents) + self.assertEqual(total, IMAGE_SIZE) + + def test_extents_dirty_context_without_bitmap(self): + resp = http_get(f"{self._url}/extents?context=dirty") + self.assertEqual(resp.status, 200) + extents = json.loads(resp.read()) + self.assertIsInstance(extents, list) + self.assertGreaterEqual(len(extents), 1) + for ext in extents: + self.assertIn("dirty", ext) + self.assertTrue(ext["dirty"]) + + def test_extents_after_write_and_zero(self): + http_put(self._url, randbytes(95, IMAGE_SIZE)) + + payload = json.dumps({"op": "zero", "size": 4096, "offset": 0}).encode() + http_patch(self._url, payload, headers={ + "Content-Type": "application/json", + "Content-Length": str(len(payload)), + }) + + resp = http_get(f"{self._url}/extents") + extents = json.loads(resp.read()) + self.assertGreaterEqual(len(extents), 1) + total = sum(e["length"] for e in extents) + self.assertEqual(total, IMAGE_SIZE) + + +def _allocated_subranges(extents, granularity): + """Split each non-hole extent (zero=False) into [start, end] inclusive byte ranges.""" + out = [] + for ext in extents: + if ext.get("zero"): + continue + start = int(ext["start"]) + length = int(ext["length"]) + pos = start + end_abs = start + length + while pos < end_abs: + chunk_end = min(pos + granularity, end_abs) + out.append((pos, chunk_end - 1)) + pos = chunk_end + return out + + +def _qemu_img_virtual_size(path: str) -> int: + """Return virtual size in bytes (requires ``qemu-img`` on PATH).""" + # stdout=PIPE + universal_newlines: Python 3.6 compatible (no capture_output/text). + cp = subprocess.run( + ["qemu-img", "info", "--output=json", path], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + check=True, + ) + return int(json.loads(cp.stdout)["virtual-size"]) + + +def _http_error_detail(exc: urllib.error.HTTPError) -> str: + """Build a readable message from an ``HTTPError`` (status, url, JSON/text body).""" + parts = ["HTTP %s %r" % (exc.code, exc.reason), "url=%r" % getattr(exc, "url", "")] + try: + if exc.fp is not None: + raw = exc.fp.read() + if raw: + text = raw.decode("utf-8", errors="replace") + parts.append("response_body=%r" % (text,)) + except Exception as read_err: + parts.append("read_body_error=%r" % (read_err,)) + return "; ".join(parts) + + +def _http_get_checked( + url, + headers=None, + expected_status=200, + label="GET", +): + """ + Like ``http_get`` but raises ``AssertionError`` with ``_http_error_detail`` on failure. + """ + try: + resp = http_get(url, headers=headers) + except urllib.error.HTTPError as e: + raise AssertionError( + "%s failed for %r: %s" % (label, url, _http_error_detail(e)) + ) from e + if resp.status != expected_status: + body = resp.read() + raise AssertionError( + "%s %r: expected HTTP %s, got %s; body=%r" + % (label, url, expected_status, resp.status, body) + ) + return resp + + +def _http_put_checked(url, data, headers, label="PUT"): + try: + resp = http_put(url, data, headers=headers) + except urllib.error.HTTPError as e: + raise AssertionError( + "%s failed for %r: %s" % (label, url, _http_error_detail(e)) + ) from e + body = resp.read() + if resp.status != 200: + raise AssertionError( + "%s %r: expected HTTP 200, got %s; body=%r" + % (label, url, resp.status, body) + ) + return resp, body + + +def _http_post_checked(url, data=b"", headers=None, label="POST"): + try: + resp = http_post(url, data=data, headers=headers) + except urllib.error.HTTPError as e: + raise AssertionError( + "%s failed for %r: %s" % (label, url, _http_error_detail(e)) + ) from e + body = resp.read() + if resp.status != 200: + raise AssertionError( + "%s %r: expected HTTP 200, got %s; body=%r" + % (label, url, resp.status, body) + ) + return resp, body + + +class TestQcow2ExtentsParallelReads(ImageServerTestCase): + """ + Optional integration tests: export a user-supplied qcow2 via qemu-nbd, fetch + allocation extents, parallel range GETs over allocated regions, and (second + test) per-range GET-then-PUT pipeline with ``min(MAX_PARALLEL_READS, + MAX_PARALLEL_WRITES)`` workers. + + Requires ``qemu-img`` and ``qemu-nbd`` on PATH. + + Set IMAGESERVER_TEST_QCOW2 to the absolute path of a qcow2 file. + Optional: IMAGESERVER_TEST_QCOW2_READ_GRANULARITY — byte step (default 4 MiB). + """ + + def setUp(self): + super().setUp() + self._qcow2_path = os.environ.get("IMAGESERVER_TEST_QCOW2", "").strip() + if not self._qcow2_path or not os.path.isfile(self._qcow2_path): + self.skipTest( + "Set IMAGESERVER_TEST_QCOW2 to an existing qcow2 path to run this test" + ) + raw_g = os.environ.get("IMAGESERVER_TEST_QCOW2_READ_GRANULARITY", "").strip() + self._read_granularity = int(raw_g) if raw_g else 4 * 1024 * 1024 + if self._read_granularity <= 0: + self.skipTest("IMAGESERVER_TEST_QCOW2_READ_GRANULARITY must be positive") + + def test_parallel_range_reads_allocated_extents(self): + _, url, _, cleanup = make_nbd_transfer_existing_disk( + self._qcow2_path, "qcow2" + ) + try: + resp = _http_get_checked( + "%s/extents" % (url,), + expected_status=200, + label="GET /extents", + ) + extents = json.loads(resp.read()) + self.assertIsInstance(extents, list) + ranges = _allocated_subranges(extents, self._read_granularity) + if not ranges: + self.skipTest("no allocated extents (all holes/zero) in qcow2") + + def fetch(span): + start_b, end_b = span + range_hdr = "bytes=%s-%s" % (start_b, end_b) + r = _http_get_checked( + url, + headers={"Range": range_hdr}, + expected_status=206, + label="Range GET %s" % (range_hdr,), + ) + data = r.read() + expected_len = end_b - start_b + 1 + if len(data) != expected_len: + raise AssertionError( + "Range GET %s: got %d bytes, expected %d (url=%r)" + % (range_hdr, len(data), expected_len, url) + ) + + with ThreadPoolExecutor(max_workers=MAX_PARALLEL_READS) as pool: + pool.map(fetch, ranges) + finally: + cleanup() + + def test_parallel_reads_then_put_range_copy_matches_source(self): + """ + Create an empty qcow2 with the same virtual size as the source, copy every + allocated range using one worker pool: for each span, Range GET from src + then Content-Range PUT to dest. + Worker count is ``min(MAX_PARALLEL_READS, MAX_PARALLEL_WRITES)`` so each + worker holds at most one chunk. + """ + src_path = self._qcow2_path + try: + vsize = _qemu_img_virtual_size(src_path) + except (FileNotFoundError, subprocess.CalledProcessError, KeyError, json.JSONDecodeError, TypeError, ValueError) as e: + self.skipTest(f"qemu-img info failed: {e}") + + tmp = get_tmp_dir() + dest_path = os.path.join(tmp, f"qcow2_copy_{uuid.uuid4().hex[:8]}.qcow2") + try: + subprocess.run( + ["qemu-img", "create", "-f", "qcow2", dest_path, str(vsize)], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + check=True, + ) + except (FileNotFoundError, subprocess.CalledProcessError) as e: + self.skipTest(f"qemu-img create failed: {e}") + + _, src_url, _, cleanup_src = make_nbd_transfer_existing_disk( + src_path, "qcow2" + ) + _, dest_url, _, cleanup_dest = make_nbd_transfer_existing_disk( + dest_path, "qcow2" + ) + try: + resp = _http_get_checked( + "%s/extents" % (src_url,), + expected_status=200, + label="GET src /extents", + ) + extents = json.loads(resp.read()) + ranges = _allocated_subranges(extents, self._read_granularity) + if not ranges: + self.skipTest("no allocated extents (all holes/zero) in qcow2") + + transfer_workers = max( + 1, min(MAX_PARALLEL_READS, MAX_PARALLEL_WRITES) + ) + + def transfer_span(span): + start_b, end_b = span + range_hdr = "bytes=%s-%s" % (start_b, end_b) + r = _http_get_checked( + src_url, + headers={"Range": range_hdr}, + expected_status=206, + label="Range GET src %s" % (range_hdr,), + ) + data = r.read() + expected_len = end_b - start_b + 1 + if len(data) != expected_len: + raise AssertionError( + "Range GET src %s: got %d bytes, expected %d (url=%r)" + % (range_hdr, len(data), expected_len, src_url) + ) + end_inclusive = start_b + len(data) - 1 + cr = "bytes %s-%s/*" % (start_b, end_inclusive) + _put_resp, put_body = _http_put_checked( + dest_url, + data, + headers={ + "Content-Range": cr, + "Content-Length": str(len(data)), + }, + label="PUT dest %s" % (cr,), + ) + try: + body = json.loads(put_body) + except ValueError: + raise AssertionError( + "PUT dest %s: invalid JSON body=%r (url=%r)" + % (cr, put_body, dest_url) + ) + if not body.get("ok"): + raise AssertionError( + "PUT dest %s: JSON ok=false, full=%r (url=%r)" + % (cr, body, dest_url) + ) + if body.get("bytes_written") != len(data): + raise AssertionError( + "PUT dest %s: bytes_written=%r expected %d (url=%r)" + % (cr, body.get("bytes_written"), len(data), dest_url) + ) + + with ThreadPoolExecutor(max_workers=transfer_workers) as pool: + pool.map(transfer_span, ranges) + + _flush, flush_body = _http_post_checked( + "%s/flush" % (dest_url,), + label="POST dest /flush", + ) + try: + flush_json = json.loads(flush_body) + except ValueError: + raise AssertionError( + "POST dest /flush: invalid JSON body=%r (url=%r)" + % (flush_body, dest_url) + ) + if not flush_json.get("ok"): + raise AssertionError( + "POST dest /flush: ok=false, full=%r (url=%r)" + % (flush_json, dest_url) + ) + finally: + cleanup_dest() + cleanup_src() + + try: + cmp = subprocess.run( + ["qemu-img", "compare", src_path, dest_path], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + ) + self.assertEqual( + cmp.returncode, + 0, + "qemu-img compare %r vs %r failed (rc=%s): stderr=%r stdout=%r" + % ( + src_path, + dest_path, + cmp.returncode, + cmp.stderr, + cmp.stdout, + ), + ) + finally: + try: + os.unlink(dest_path) + except FileNotFoundError: + pass + + +class TestErrorCases(NbdBackendTestCase): + def test_patch_unsupported_op(self): + payload = json.dumps({"op": "invalid"}).encode() + with self.assertRaises(urllib.error.HTTPError) as ctx: + http_patch(self._url, payload, headers={ + "Content-Type": "application/json", + "Content-Length": str(len(payload)), + }) + self.assertEqual(ctx.exception.code, 400) + + def test_patch_zero_missing_size(self): + payload = json.dumps({"op": "zero", "offset": 0}).encode() + with self.assertRaises(urllib.error.HTTPError) as ctx: + http_patch(self._url, payload, headers={ + "Content-Type": "application/json", + "Content-Length": str(len(payload)), + }) + self.assertEqual(ctx.exception.code, 400) + + def test_put_missing_content_length(self): + import http.client + from urllib.parse import urlparse + parsed = urlparse(self._url) + conn = http.client.HTTPConnection(parsed.hostname, parsed.port, timeout=30) + try: + conn.putrequest("PUT", parsed.path) + conn.endheaders() + resp = conn.getresponse() + self.assertEqual(resp.status, 400) + finally: + conn.close() + + +class TestRoundTrip(NbdBackendTestCase): + def test_write_read_full_roundtrip(self): + payload = randbytes(110, IMAGE_SIZE) + http_put(self._url, payload) + resp = http_get(self._url) + self.assertEqual(resp.read(), payload) + + def test_write_read_range_roundtrip(self): + payload = randbytes(111, IMAGE_SIZE) + http_put(self._url, payload) + + for start, end in [(0, 255), (1024, 2047), (IMAGE_SIZE - 512, IMAGE_SIZE - 1)]: + resp = http_get(self._url, headers={"Range": f"bytes={start}-{end}"}) + self.assertEqual(resp.read(), payload[start:end + 1]) + + def test_range_write_read_roundtrip(self): + http_put(self._url, b"\x00" * IMAGE_SIZE) + + chunk = randbytes(112, 4096) + http_put(self._url, chunk, headers={ + "Content-Range": "bytes 8192-12287/*", + "Content-Length": str(len(chunk)), + }) + + resp = http_get(self._url, headers={"Range": "bytes=8192-12287"}) + self.assertEqual(resp.read(), chunk) + + resp2 = http_get(self._url, headers={"Range": "bytes=0-4095"}) + self.assertEqual(resp2.read(), b"\x00" * 4096) + + +if __name__ == "__main__": + try: + unittest.main() + finally: + shutdown_image_server() diff --git a/scripts/vm/hypervisor/kvm/imageserver/tests/test_registry_idle.py b/scripts/vm/hypervisor/kvm/imageserver/tests/test_registry_idle.py new file mode 100644 index 000000000000..3fa592d8953a --- /dev/null +++ b/scripts/vm/hypervisor/kvm/imageserver/tests/test_registry_idle.py @@ -0,0 +1,101 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Unit tests for transfer idle timeout (no image server / nbd dependency).""" + +import unittest +from unittest.mock import patch + +from imageserver.config import ( + TransferRegistry, + parse_idle_timeout_seconds, + validate_transfer_config, +) +from imageserver.constants import DEFAULT_IDLE_TIMEOUT_SECONDS + + +class TestParseIdleTimeout(unittest.TestCase): + def test_default_600(self): + self.assertEqual(parse_idle_timeout_seconds({}), DEFAULT_IDLE_TIMEOUT_SECONDS) + + def test_explicit(self): + self.assertEqual( + parse_idle_timeout_seconds({"idle_timeout_seconds": 30}), 30 + ) + + def test_zero_timeout(self): + self.assertEqual( + parse_idle_timeout_seconds({"idle_timeout_seconds": 0}), 86400 + ) + + +class TestValidateTransferConfig(unittest.TestCase): + def test_file_merges_idle(self): + c = validate_transfer_config( + {"backend": "file", "file": "/tmp/x", "idle_timeout_seconds": 3} + ) + self.assertEqual(c["idle_timeout_seconds"], 3) + self.assertEqual(c["backend"], "file") + + +class TestRegistryIdleSweep(unittest.TestCase): + def test_sweep_unregisters_after_idle(self): + clock = [0.0] + + def mono(): + return clock[0] + + with patch("imageserver.config.time.monotonic", mono): + r = TransferRegistry() + r.register( + "t1", + validate_transfer_config( + {"backend": "file", "file": "/x", "idle_timeout_seconds": 2} + ), + ) + clock[0] = 5.0 + r.sweep_expired_transfers() + self.assertIsNone(r.get("t1")) + + def test_inflight_prevents_sweep_until_request_ends(self): + clock = [0.0] + + def mono(): + return clock[0] + + with patch("imageserver.config.time.monotonic", mono): + r = TransferRegistry() + r.register( + "t1", + validate_transfer_config( + {"backend": "file", "file": "/x", "idle_timeout_seconds": 2} + ), + ) + clock[0] = 1.0 + ctx = r.request_lifecycle("t1") + ctx.__enter__() + clock[0] = 100.0 + r.sweep_expired_transfers() + self.assertIsNotNone(r.get("t1")) + ctx.__exit__(None, None, None) + clock[0] = 103.0 + r.sweep_expired_transfers() + self.assertIsNone(r.get("t1")) + + +if __name__ == "__main__": + unittest.main() diff --git a/scripts/vm/hypervisor/kvm/imageserver/tests/test_transfer_idle_expiry.py b/scripts/vm/hypervisor/kvm/imageserver/tests/test_transfer_idle_expiry.py new file mode 100644 index 000000000000..2730c8ed16ca --- /dev/null +++ b/scripts/vm/hypervisor/kvm/imageserver/tests/test_transfer_idle_expiry.py @@ -0,0 +1,57 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Integration tests for per-transfer HTTP idle timeout (requires image server deps e.g. nbd).""" + +import time +import urllib.error + +from .test_base import ( + ImageServerTestCase, + http_options, + make_file_transfer, +) + + +class TestTransferIdleExpiry(ImageServerTestCase): + def test_transfer_expires_after_idle(self): + """No HTTP activity after registration: transfer is unregistered after idle_timeout_seconds.""" + _tid, url, _path, cleanup = make_file_transfer(idle_timeout_seconds=15) + try: + time.sleep(30) + with self.assertRaises(urllib.error.HTTPError) as ctx: + http_options(url) + self.assertEqual(ctx.exception.code, 404) + st = self.ctrl({"action": "status"}) + self.assertEqual(st.get("status"), "ok") + finally: + cleanup() + + def test_http_activity_resets_idle_deadline(self): + """Completing a request resets the idle timer; transfer stays past a single interval.""" + _tid, url, _path, cleanup = make_file_transfer(idle_timeout_seconds=15) + try: + http_options(url) + time.sleep(10) + http_options(url) + time.sleep(10) + http_options(url) + time.sleep(10) + resp = http_options(url) + self.assertEqual(resp.status, 200) + finally: + cleanup() diff --git a/scripts/vm/hypervisor/kvm/imageserver/tests/test_util.py b/scripts/vm/hypervisor/kvm/imageserver/tests/test_util.py new file mode 100644 index 000000000000..159dff30a929 --- /dev/null +++ b/scripts/vm/hypervisor/kvm/imageserver/tests/test_util.py @@ -0,0 +1,122 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Unit tests for imageserver.util extent coalescing helpers.""" + +import unittest + +from imageserver.util import ( + coalesce_allocation_extents, + coalesce_dirty_zero_extents, + merge_dirty_zero_extents, +) + + +class TestCoalesceAllocationExtents(unittest.TestCase): + def test_empty(self): + self.assertEqual(coalesce_allocation_extents([]), []) + + def test_single(self): + inp = [{"start": 0, "length": 4096, "zero": False}] + out = coalesce_allocation_extents(inp) + self.assertEqual(out, [{"start": 0, "length": 4096, "zero": False}]) + self.assertIsNot(out[0], inp[0]) + + def test_merges_contiguous_same_zero(self): + inp = [ + {"start": 0, "length": 10, "zero": False}, + {"start": 10, "length": 5, "zero": False}, + {"start": 15, "length": 100, "zero": False}, + ] + self.assertEqual( + coalesce_allocation_extents(inp), + [{"start": 0, "length": 115, "zero": False}], + ) + + def test_does_not_merge_different_zero(self): + inp = [ + {"start": 0, "length": 64, "zero": False}, + {"start": 64, "length": 64, "zero": True}, + {"start": 128, "length": 64, "zero": False}, + ] + self.assertEqual(coalesce_allocation_extents(inp), inp) + + def test_does_not_merge_gap(self): + inp = [ + {"start": 0, "length": 100, "zero": False}, + {"start": 200, "length": 50, "zero": False}, + ] + self.assertEqual(coalesce_allocation_extents(inp), inp) + + def test_does_not_merge_same_zero_with_gap(self): + inp = [ + {"start": 0, "length": 10, "zero": True}, + {"start": 20, "length": 10, "zero": True}, + ] + self.assertEqual(coalesce_allocation_extents(inp), inp) + + +class TestCoalesceDirtyZeroExtents(unittest.TestCase): + def test_empty(self): + self.assertEqual(coalesce_dirty_zero_extents([]), []) + + def test_single(self): + inp = [{"start": 0, "length": 8192, "dirty": True, "zero": False}] + out = coalesce_dirty_zero_extents(inp) + self.assertEqual( + out, [{"start": 0, "length": 8192, "dirty": True, "zero": False}] + ) + + def test_merges_contiguous_same_flags(self): + inp = [ + {"start": 0, "length": 50, "dirty": True, "zero": False}, + {"start": 50, "length": 50, "dirty": True, "zero": False}, + ] + self.assertEqual( + coalesce_dirty_zero_extents(inp), + [{"start": 0, "length": 100, "dirty": True, "zero": False}], + ) + + def test_does_not_merge_differing_dirty(self): + inp = [ + {"start": 0, "length": 32, "dirty": False, "zero": False}, + {"start": 32, "length": 32, "dirty": True, "zero": False}, + ] + self.assertEqual(coalesce_dirty_zero_extents(inp), inp) + + def test_does_not_merge_differing_zero(self): + inp = [ + {"start": 0, "length": 16, "dirty": False, "zero": False}, + {"start": 16, "length": 16, "dirty": False, "zero": True}, + ] + self.assertEqual(coalesce_dirty_zero_extents(inp), inp) + + +class TestMergeDirtyZeroExtentsCoalescing(unittest.TestCase): + def test_coalesces_adjacent_identical_flags_after_boundary_merge(self): + """Boundary grid can split one logical run; coalesce should reunite.""" + allocation = [(0, 200, False)] + dirty = [(0, 100, False), (100, 100, False)] + merged = merge_dirty_zero_extents(allocation, dirty, 200) + self.assertEqual( + merged, + [{"start": 0, "length": 200, "dirty": False, "zero": False}], + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/scripts/vm/hypervisor/kvm/imageserver/util.py b/scripts/vm/hypervisor/kvm/imageserver/util.py new file mode 100644 index 000000000000..473f58a50c07 --- /dev/null +++ b/scripts/vm/hypervisor/kvm/imageserver/util.py @@ -0,0 +1,125 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import json +import time +from typing import Any, Dict, List, Set, Tuple + + +def coalesce_allocation_extents( + extents: List[Dict[str, Any]], +) -> List[Dict[str, Any]]: + """Merge contiguous extents that share the same ``zero`` flag.""" + if not extents: + return [] + out: List[Dict[str, Any]] = [dict(extents[0])] + for e in extents[1:]: + prev = out[-1] + if ( + prev["start"] + prev["length"] == e["start"] + and prev["zero"] == e["zero"] + ): + prev["length"] += e["length"] + else: + out.append({"start": e["start"], "length": e["length"], "zero": e["zero"]}) + return out + + +def coalesce_dirty_zero_extents( + extents: List[Dict[str, Any]], +) -> List[Dict[str, Any]]: + """Merge contiguous extents that share the same ``dirty`` and ``zero`` flags.""" + if not extents: + return [] + out: List[Dict[str, Any]] = [dict(extents[0])] + for e in extents[1:]: + prev = out[-1] + if ( + prev["start"] + prev["length"] == e["start"] + and prev["dirty"] == e["dirty"] + and prev["zero"] == e["zero"] + ): + prev["length"] += e["length"] + else: + out.append( + { + "start": e["start"], + "length": e["length"], + "dirty": e["dirty"], + "zero": e["zero"], + } + ) + return out + + +def json_bytes(obj: Any) -> bytes: + return json.dumps(obj, separators=(",", ":"), ensure_ascii=False).encode("utf-8") + + +def merge_dirty_zero_extents( + allocation_extents: List[Tuple[int, int, bool]], + dirty_extents: List[Tuple[int, int, bool]], + size: int, +) -> List[Dict[str, Any]]: + """ + Merge allocation (start, length, zero) and dirty (start, length, dirty) extents + into a single list of {start, length, dirty, zero} with unified boundaries. + """ + boundaries: Set[int] = {0, size} + for start, length, _ in allocation_extents: + boundaries.add(start) + boundaries.add(start + length) + for start, length, _ in dirty_extents: + boundaries.add(start) + boundaries.add(start + length) + sorted_boundaries = sorted(boundaries) + + def lookup( + extents: List[Tuple[int, int, bool]], offset: int, default: bool + ) -> bool: + for start, length, flag in extents: + if start <= offset < start + length: + return flag + return default + + result: List[Dict[str, Any]] = [] + for i in range(len(sorted_boundaries) - 1): + a, b = sorted_boundaries[i], sorted_boundaries[i + 1] + if a >= b: + continue + result.append( + { + "start": a, + "length": b - a, + "dirty": lookup(dirty_extents, a, False), + "zero": lookup(allocation_extents, a, False), + } + ) + return coalesce_dirty_zero_extents(result) + + +def is_fallback_dirty_response(extents: List[Dict[str, Any]]) -> bool: + """True if extents is the single-extent fallback (dirty=false, zero=false).""" + return ( + len(extents) == 1 + and extents[0].get("dirty") is False + and extents[0].get("zero") is False + ) + + +def now_s() -> float: + return time.monotonic() diff --git a/server/src/main/java/com/cloud/api/ApiServer.java b/server/src/main/java/com/cloud/api/ApiServer.java index dc07814c9724..1bda053ec19a 100644 --- a/server/src/main/java/com/cloud/api/ApiServer.java +++ b/server/src/main/java/com/cloud/api/ApiServer.java @@ -16,6 +16,10 @@ // under the License. package com.cloud.api; +import static com.cloud.user.AccountManagerImpl.apiKeyAccess; +import static org.apache.cloudstack.api.ApiConstants.PASSWORD_CHANGE_REQUIRED; +import static org.apache.cloudstack.user.UserPasswordResetManager.UserPasswordResetEnabled; + import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InterruptedIOException; @@ -31,6 +35,7 @@ import java.security.Security; import java.text.ParseException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.Date; import java.util.EnumSet; @@ -39,7 +44,6 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; -import java.util.Arrays; import java.util.Map; import java.util.Set; import java.util.TimeZone; @@ -58,16 +62,6 @@ import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpSession; -import com.cloud.cluster.ManagementServerHostVO; -import com.cloud.cluster.dao.ManagementServerHostDao; -import com.cloud.utils.Ternary; -import com.cloud.user.Account; -import com.cloud.user.AccountManager; -import com.cloud.user.AccountManagerImpl; -import com.cloud.user.DomainManager; -import com.cloud.user.User; -import com.cloud.user.UserAccount; -import com.cloud.user.UserVO; import org.apache.cloudstack.acl.APIChecker; import org.apache.cloudstack.acl.ApiKeyPairManagerImpl; import org.apache.cloudstack.acl.apikeypair.ApiKeyPair; @@ -161,6 +155,8 @@ import com.cloud.api.dispatch.DispatchChainFactory; import com.cloud.api.dispatch.DispatchTask; import com.cloud.api.response.ApiResponseSerializer; +import com.cloud.cluster.ManagementServerHostVO; +import com.cloud.cluster.dao.ManagementServerHostDao; import com.cloud.domain.Domain; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; @@ -179,14 +175,22 @@ import com.cloud.exception.UnavailableCommandException; import com.cloud.projects.dao.ProjectDao; import com.cloud.storage.VolumeApiService; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.AccountManagerImpl; +import com.cloud.user.DomainManager; +import com.cloud.user.User; +import com.cloud.user.UserAccount; +import com.cloud.user.UserVO; import com.cloud.utils.ConstantTimeComparator; import com.cloud.utils.DateUtil; import com.cloud.utils.HttpUtils; -import com.cloud.utils.HttpUtils.ApiSessionKeySameSite; import com.cloud.utils.HttpUtils.ApiSessionKeyCheckOption; +import com.cloud.utils.HttpUtils.ApiSessionKeySameSite; import com.cloud.utils.Pair; import com.cloud.utils.ReflectUtil; import com.cloud.utils.StringUtils; +import com.cloud.utils.Ternary; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.component.PluggableService; @@ -199,10 +203,6 @@ import com.cloud.utils.net.NetUtils; import com.google.gson.reflect.TypeToken; -import static com.cloud.user.AccountManagerImpl.apiKeyAccess; -import static org.apache.cloudstack.api.ApiConstants.PASSWORD_CHANGE_REQUIRED; -import static org.apache.cloudstack.user.UserPasswordResetManager.UserPasswordResetEnabled; - @Component public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiServerService, Configurable { private static final Logger ACCESSLOGGER = LogManager.getLogger("apiserver." + ApiServer.class.getName()); @@ -792,85 +792,14 @@ private String queueCommand(final BaseCmd cmdObj, final Map para // BaseAsyncCreateCmd: cmd params are processed and create() is called, then same workflow as BaseAsyncCmd. // BaseAsyncCmd: cmd is processed and submitted as an AsyncJob, job related info is serialized and returned. if (cmdObj instanceof BaseAsyncCmd) { - if (!asyncMgr.isAsyncJobsEnabled()) { - String msg = "Maintenance or Shutdown has been initiated on this management server. Can not accept new jobs"; - logger.warn(msg); - throw new ServerApiException(ApiErrorCode.SERVICE_UNAVAILABLE, msg); - } - Long objectId = null; - String objectUuid; - if (cmdObj instanceof BaseAsyncCreateCmd) { - final BaseAsyncCreateCmd createCmd = (BaseAsyncCreateCmd)cmdObj; - dispatcher.dispatchCreateCmd(createCmd, params); - objectId = createCmd.getEntityId(); - objectUuid = createCmd.getEntityUuid(); - params.put("id", objectId.toString()); - Class entityClass = EventTypes.getEntityClassForEvent(createCmd.getEventType()); - if (entityClass != null) - ctx.putContextParameter(entityClass, objectUuid); - } else { - // Extract the uuid before params are processed and id reflects internal db id - objectUuid = params.get(ApiConstants.ID); - dispatchChainFactory.getStandardDispatchChain().dispatch(new DispatchTask(cmdObj, params)); - } - - final BaseAsyncCmd asyncCmd = (BaseAsyncCmd)cmdObj; - - if (callerUserId != null) { - params.put("ctxUserId", callerUserId.toString()); - } - if (caller != null) { - params.put("ctxAccountId", String.valueOf(caller.getId())); - } - if (objectUuid != null) { - params.put("uuid", objectUuid); - } - - long startEventId = ctx.getStartEventId(); - asyncCmd.setStartEventId(startEventId); - - // save the scheduled event - final Long eventId = - ActionEventUtils.onScheduledActionEvent((callerUserId == null) ? (Long)User.UID_SYSTEM : callerUserId, asyncCmd.getEntityOwnerId(), asyncCmd.getEventType(), - asyncCmd.getEventDescription(), asyncCmd.getApiResourceId(), asyncCmd.getApiResourceType().toString(), asyncCmd.isDisplay(), startEventId); - if (startEventId == 0) { - // There was no create event before, set current event id as start eventId - startEventId = eventId; - } - - params.put("ctxStartEventId", String.valueOf(startEventId)); - params.put("cmdEventType", asyncCmd.getEventType()); - params.put("ctxDetails", ApiGsonHelper.getBuilder().create().toJson(ctx.getContextParameters())); - if (asyncCmd.getHttpMethod() != null) { - params.put(ApiConstants.HTTPMETHOD, asyncCmd.getHttpMethod().toString()); - } - - Long instanceId = (objectId == null) ? asyncCmd.getApiResourceId() : objectId; - - // users can provide the job id they want to use, so log as it is a uuid and is unique - String injectedJobId = asyncCmd.getInjectedJobId(); - uuidMgr.checkUuidSimple(injectedJobId, AsyncJob.class); - - AsyncJobVO job = new AsyncJobVO("", callerUserId, caller.getId(), cmdObj.getClass().getName(), - ApiGsonHelper.getBuilder().create().toJson(params), instanceId, - asyncCmd.getApiResourceType() != null ? asyncCmd.getApiResourceType().toString() : null, - injectedJobId); - job.setDispatcher(asyncDispatcher.getName()); - - final long jobId = asyncMgr.submitAsyncJob(job); - - if (jobId == 0L) { - final String errorMsg = "Unable to schedule async job for command " + job.getCmd(); - logger.warn(errorMsg); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMsg); - } + AsyncCmdResult result = processAsyncCmd((BaseAsyncCmd)cmdObj, params, ctx, callerUserId, caller); final String response; - if (objectId != null) { - final String objUuid = (objectUuid == null) ? objectId.toString() : objectUuid; - response = getBaseAsyncCreateResponse(jobId, (BaseAsyncCreateCmd)asyncCmd, objUuid); + if (result.objectId != null) { + final String objUuid = (result.objectUuid == null) ? result.objectId.toString() : result.objectUuid; + response = getBaseAsyncCreateResponse(result.jobId, (BaseAsyncCreateCmd) result.asyncCmd, objUuid); } else { SerializationContext.current().setUuidTranslation(true); - response = getBaseAsyncResponse(jobId, asyncCmd); + response = getBaseAsyncResponse(result.jobId, result.asyncCmd); } // Always log response for async for now, I don't think any sensitive data will be in here. // It might be nice to send this through scrubbing similar to how @@ -900,6 +829,81 @@ private String queueCommand(final BaseCmd cmdObj, final Map para } } + @Override + public AsyncCmdResult processAsyncCmd(BaseAsyncCmd asyncCmd, Map params, CallContext ctx, Long callerUserId, Account caller) throws Exception { + if (!asyncMgr.isAsyncJobsEnabled()) { + String msg = "Maintenance or Shutdown has been initiated on this management server. Can not accept new jobs"; + logger.warn(msg); + throw new ServerApiException(ApiErrorCode.SERVICE_UNAVAILABLE, msg); + } + Long objectId = null; + String objectUuid; + if (asyncCmd instanceof BaseAsyncCreateCmd) { + final BaseAsyncCreateCmd createCmd = (BaseAsyncCreateCmd) asyncCmd; + dispatcher.dispatchCreateCmd(createCmd, params); + objectId = createCmd.getEntityId(); + objectUuid = createCmd.getEntityUuid(); + params.put("id", objectId.toString()); + Class entityClass = EventTypes.getEntityClassForEvent(createCmd.getEventType()); + if (entityClass != null) + ctx.putContextParameter(entityClass, objectUuid); + } else { + // Extract the uuid before params are processed and id reflects internal db id + objectUuid = params.get(ApiConstants.ID); + dispatchChainFactory.getStandardDispatchChain().dispatch(new DispatchTask(asyncCmd, params)); + } + + if (callerUserId != null) { + params.put("ctxUserId", callerUserId.toString()); + } + if (caller != null) { + params.put("ctxAccountId", String.valueOf(caller.getId())); + } + if (objectUuid != null) { + params.put("uuid", objectUuid); + } + + long startEventId = ctx.getStartEventId(); + asyncCmd.setStartEventId(startEventId); + + // save the scheduled event + final Long eventId = + ActionEventUtils.onScheduledActionEvent((callerUserId == null) ? (Long)User.UID_SYSTEM : callerUserId, asyncCmd.getEntityOwnerId(), asyncCmd.getEventType(), + asyncCmd.getEventDescription(), asyncCmd.getApiResourceId(), asyncCmd.getApiResourceType().toString(), asyncCmd.isDisplay(), startEventId); + if (startEventId == 0) { + // There was no create event before, set current event id as start eventId + startEventId = eventId; + } + + params.put("ctxStartEventId", String.valueOf(startEventId)); + params.put("cmdEventType", asyncCmd.getEventType()); + params.put("ctxDetails", ApiGsonHelper.getBuilder().create().toJson(ctx.getContextParameters())); + if (asyncCmd.getHttpMethod() != null) { + params.put(ApiConstants.HTTPMETHOD, asyncCmd.getHttpMethod().toString()); + } + + Long instanceId = (objectId == null) ? asyncCmd.getApiResourceId() : objectId; + + // users can provide the job id they want to use, so log as it is a uuid and is unique + String injectedJobId = asyncCmd.getInjectedJobId(); + uuidMgr.checkUuidSimple(injectedJobId, AsyncJob.class); + + AsyncJobVO job = new AsyncJobVO("", callerUserId, caller.getId(), asyncCmd.getClass().getName(), + ApiGsonHelper.getBuilder().create().toJson(params), instanceId, + asyncCmd.getApiResourceType() != null ? asyncCmd.getApiResourceType().toString() : null, + injectedJobId); + job.setDispatcher(asyncDispatcher.getName()); + + final long jobId = asyncMgr.submitAsyncJob(job); + + if (jobId == 0L) { + final String errorMsg = "Unable to schedule async job for command " + job.getCmd(); + logger.warn(errorMsg); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMsg); + } + return new AsyncCmdResult(objectId, objectUuid, asyncCmd, jobId); + } + @SuppressWarnings("unchecked") private void buildAsyncListResponse(final BaseListCmd command, final Account account) { final List responses = ((ListResponse)command.getResponseObject()).getResponses(); diff --git a/server/src/main/java/com/cloud/api/query/dao/AsyncJobJoinDao.java b/server/src/main/java/com/cloud/api/query/dao/AsyncJobJoinDao.java index 756425f5093e..43974bcf9cc6 100644 --- a/server/src/main/java/com/cloud/api/query/dao/AsyncJobJoinDao.java +++ b/server/src/main/java/com/cloud/api/query/dao/AsyncJobJoinDao.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.api.query.dao; +import java.util.List; + import org.apache.cloudstack.api.response.AsyncJobResponse; import org.apache.cloudstack.framework.jobs.AsyncJob; @@ -28,4 +30,6 @@ public interface AsyncJobJoinDao extends GenericDao { AsyncJobJoinVO newAsyncJobView(AsyncJob vol); + List listByIds(List ids); + } diff --git a/server/src/main/java/com/cloud/api/query/dao/AsyncJobJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/AsyncJobJoinDaoImpl.java index 10ef67bbbea1..93af9a04e144 100644 --- a/server/src/main/java/com/cloud/api/query/dao/AsyncJobJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/AsyncJobJoinDaoImpl.java @@ -16,17 +16,17 @@ // under the License. package com.cloud.api.query.dao; +import java.util.Collections; import java.util.Date; import java.util.List; - import javax.inject.Inject; -import org.springframework.stereotype.Component; - import org.apache.cloudstack.api.ResponseObject; import org.apache.cloudstack.api.response.AsyncJobResponse; import org.apache.cloudstack.framework.jobs.AsyncJob; +import org.apache.commons.collections.CollectionUtils; +import org.springframework.stereotype.Component; import com.cloud.api.ApiResponseHelper; import com.cloud.api.ApiSerializerHelper; @@ -115,4 +115,16 @@ public AsyncJobJoinVO newAsyncJobView(AsyncJob job) { } + @Override + public List listByIds(List ids) { + if (CollectionUtils.isEmpty(ids)) { + return Collections.emptyList(); + } + SearchBuilder idsSearch = createSearchBuilder(); + idsSearch.and("ids", idsSearch.entity().getId(), SearchCriteria.Op.IN); + idsSearch.done(); + SearchCriteria sc = idsSearch.create(); + sc.setParameters("ids", ids.toArray()); + return listBy(sc); + } } diff --git a/server/src/main/java/com/cloud/api/query/dao/HostJoinDao.java b/server/src/main/java/com/cloud/api/query/dao/HostJoinDao.java index bc6ec7931366..acce4b7426ae 100644 --- a/server/src/main/java/com/cloud/api/query/dao/HostJoinDao.java +++ b/server/src/main/java/com/cloud/api/query/dao/HostJoinDao.java @@ -25,6 +25,8 @@ import com.cloud.api.query.vo.HostJoinVO; import com.cloud.host.Host; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDao; public interface HostJoinDao extends GenericDao { @@ -41,4 +43,6 @@ public interface HostJoinDao extends GenericDao { List findByClusterId(Long clusterId, Host.Type type); + List listRoutingHostsByHypervisor(Hypervisor.HypervisorType hypervisorType, Filter filter); + } diff --git a/server/src/main/java/com/cloud/api/query/dao/HostJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/HostJoinDaoImpl.java index e7265a7e3b9a..6d3174d94325 100644 --- a/server/src/main/java/com/cloud/api/query/dao/HostJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/HostJoinDaoImpl.java @@ -55,6 +55,7 @@ import com.cloud.hypervisor.Hypervisor; import com.cloud.storage.StorageStats; import com.cloud.user.AccountManager; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @@ -413,4 +414,16 @@ private String calculateResourceAllocatedPercentage(float resource, float resour return decimalFormat.format(((float)resource / resourceWithOverProvision * 100.0f)) + "%"; } + @Override + public List listRoutingHostsByHypervisor(Hypervisor.HypervisorType hypervisorType, Filter filter) { + SearchBuilder sb = createSearchBuilder(); + sb.and("type", sb.entity().getType(), SearchCriteria.Op.EQ); + sb.and("hypervisorType", sb.entity().getHypervisorType(), SearchCriteria.Op.EQ); + sb.done(); + + SearchCriteria sc = sb.create(); + sc.setParameters("type", Host.Type.Routing); + sc.setParameters("hypervisorType", hypervisorType); + return listBy(sc, filter); + } } diff --git a/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDao.java b/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDao.java index bc19e0892057..54a98a225bcb 100644 --- a/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDao.java +++ b/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDao.java @@ -22,7 +22,9 @@ import org.apache.cloudstack.api.response.StoragePoolResponse; import com.cloud.api.query.vo.StoragePoolJoinVO; +import com.cloud.storage.Storage; import com.cloud.storage.StoragePool; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; @@ -44,4 +46,6 @@ public interface StoragePoolJoinDao extends GenericDao List findStoragePoolByScopeAndRuleTags(Long datacenterId, Long podId, Long clusterId, ScopeType scopeType, List tags); + List listByZoneAndType(long zoneId, List types, Filter filter); + } diff --git a/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java index 8bfce47b1204..fe040f8011e0 100644 --- a/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java @@ -35,6 +35,7 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.utils.jsinterpreter.TagAsRuleHelper; +import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; @@ -49,6 +50,7 @@ import com.cloud.storage.VolumeApiServiceImpl; import com.cloud.user.AccountManager; import com.cloud.utils.StringUtils; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @@ -410,4 +412,17 @@ public List findStoragePoolByScopeAndRuleTags(Long datacenterId, return filteredPools; } + @Override + public List listByZoneAndType(long zoneId, List types, Filter filter) { + SearchBuilder sb = createSearchBuilder(); + sb.and("zoneId", sb.entity().getZoneId(), SearchCriteria.Op.EQ); + sb.and("types", sb.entity().getPoolType(), SearchCriteria.Op.IN); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("zoneId", zoneId); + if (CollectionUtils.isNotEmpty(types)) { + sc.setParameters("types", types.toArray()); + } + return listBy(sc, filter); + } } diff --git a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDao.java b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDao.java index 79312460d2c0..0612e9066665 100644 --- a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDao.java +++ b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDao.java @@ -17,8 +17,10 @@ package com.cloud.api.query.dao; import com.cloud.api.query.vo.UserVmJoinVO; +import com.cloud.hypervisor.Hypervisor; import com.cloud.user.Account; import com.cloud.uservm.UserVm; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDao; import com.cloud.vm.VirtualMachine; import org.apache.cloudstack.api.ApiConstants.VMDetails; @@ -49,4 +51,7 @@ List listByAccountServiceOfferingTemplateAndNotInState(long accoun List listEligibleInstancesWithExpiredLease(); List listLeaseInstancesExpiringInDays(int days); + + List listByHypervisorTypeAndOwners(Hypervisor.HypervisorType hypervisorType, List accountIds, + String domainPath, Filter filter); } diff --git a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java index 687fea1c4e33..a0a5c1a43dda 100644 --- a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java @@ -17,14 +17,13 @@ package com.cloud.api.query.dao; import java.text.DecimalFormat; -import java.util.ArrayList; -import java.util.Collections; import java.time.LocalDate; import java.time.ZoneId; import java.time.temporal.ChronoUnit; +import java.util.ArrayList; import java.util.Calendar; +import java.util.Collections; import java.util.Date; - import java.util.HashMap; import java.util.Hashtable; import java.util.List; @@ -34,8 +33,6 @@ import javax.inject.Inject; -import com.cloud.gpu.dao.VgpuProfileDao; -import com.cloud.service.dao.ServiceOfferingDao; import org.apache.cloudstack.affinity.AffinityGroupResponse; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; @@ -61,11 +58,14 @@ import com.cloud.api.ApiResponseHelper; import com.cloud.api.query.vo.UserVmJoinVO; import com.cloud.gpu.GPU; +import com.cloud.gpu.dao.VgpuProfileDao; import com.cloud.host.ControlState; +import com.cloud.hypervisor.Hypervisor; import com.cloud.network.IpAddress; import com.cloud.network.vpc.VpcVO; import com.cloud.network.vpc.dao.VpcDao; import com.cloud.service.ServiceOfferingDetailsVO; +import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.GuestOS; import com.cloud.storage.Storage.TemplateType; @@ -83,6 +83,7 @@ import com.cloud.user.dao.UserDao; import com.cloud.user.dao.UserStatisticsDao; import com.cloud.uservm.UserVm; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @@ -94,7 +95,6 @@ import com.cloud.vm.VmStats; import com.cloud.vm.dao.NicExtraDhcpOptionDao; import com.cloud.vm.dao.NicSecondaryIpVO; - import com.cloud.vm.dao.VMInstanceDetailsDao; @Component @@ -497,7 +497,7 @@ public UserVmResponse newUserVmResponse(ResponseView view, String objectName, Us } if (userVm.getUserDataId() != null) { - userVmResponse.setUserDataId(userVm.getUserDataUUid()); + userVmResponse.setUserDataId(userVm.getUserDataUuid()); userVmResponse.setUserDataName(userVm.getUserDataName()); userVmResponse.setUserDataDetails(userVm.getUserDataDetails()); userVmResponse.setUserDataPolicy(userVm.getUserDataPolicy()); @@ -832,4 +832,28 @@ public List listLeaseInstancesExpiringInDays(int days) { } return listBy(sc); } + + @Override + public List listByHypervisorTypeAndOwners(Hypervisor.HypervisorType hypervisorType, + List accountIds, String domainPath, Filter filter) { + SearchBuilder sb = createSearchBuilder(); + sb.and("hypervisorType", sb.entity().getHypervisorType(), Op.EQ); + boolean accountIdsNotEmpty = CollectionUtils.isNotEmpty(accountIds); + boolean domainPathNotBlank = StringUtils.isNotBlank(domainPath); + if (accountIdsNotEmpty || domainPathNotBlank) { + sb.and().op("account", sb.entity().getAccountId(), Op.IN); + sb.or("domainPath", sb.entity().getDomainPath(), Op.LIKE); + sb.cp(); + } + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("hypervisorType", hypervisorType); + if (accountIdsNotEmpty) { + sc.setParameters("account", accountIds.toArray()); + } + if (domainPathNotBlank) { + sc.setParameters("domainPath", domainPath + "%"); + } + return listBy(sc, filter); + } } diff --git a/server/src/main/java/com/cloud/api/query/dao/VolumeJoinDao.java b/server/src/main/java/com/cloud/api/query/dao/VolumeJoinDao.java index ebcf0bca391e..7cfdfbe78e68 100644 --- a/server/src/main/java/com/cloud/api/query/dao/VolumeJoinDao.java +++ b/server/src/main/java/com/cloud/api/query/dao/VolumeJoinDao.java @@ -22,7 +22,9 @@ import org.apache.cloudstack.api.response.VolumeResponse; import com.cloud.api.query.vo.VolumeJoinVO; +import com.cloud.hypervisor.Hypervisor; import com.cloud.storage.Volume; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDao; public interface VolumeJoinDao extends GenericDao { @@ -34,4 +36,9 @@ public interface VolumeJoinDao extends GenericDao { List newVolumeView(Volume vol); List searchByIds(Long... ids); + + List listByInstanceId(long instanceId); + + List listByHypervisorTypeAndOwners(Hypervisor.HypervisorType hypervisorType, List accountIds, + String domainPath, Filter filter); } diff --git a/server/src/main/java/com/cloud/api/query/dao/VolumeJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/VolumeJoinDaoImpl.java index 4f5d984c969a..8e79dfe4b742 100644 --- a/server/src/main/java/com/cloud/api/query/dao/VolumeJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/VolumeJoinDaoImpl.java @@ -21,8 +21,6 @@ import javax.inject.Inject; -import com.cloud.hypervisor.Hypervisor; -import com.cloud.offering.DiskOffering; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.ResponseObject.ResponseView; @@ -31,11 +29,15 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; import com.cloud.api.ApiResponseHelper; import com.cloud.api.query.vo.VolumeJoinVO; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.offering.DiskOffering; import com.cloud.offering.ServiceOffering; import com.cloud.storage.Storage; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; @@ -43,8 +45,10 @@ import com.cloud.user.AccountManager; import com.cloud.user.VmDiskStatisticsVO; import com.cloud.user.dao.VmDiskStatisticsDao; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.vm.VirtualMachine; @Component public class VolumeJoinDaoImpl extends GenericDaoBaseWithTagInformation implements VolumeJoinDao { @@ -372,4 +376,37 @@ public List searchByIds(Long... volIds) { return uvList; } + @Override + public List listByInstanceId(long instanceId) { + SearchCriteria sc = createSearchCriteria(); + sc.addAnd("vmId", SearchCriteria.Op.EQ, instanceId); + return search(sc, null); + } + + @Override + public List listByHypervisorTypeAndOwners(Hypervisor.HypervisorType hypervisorType, + List accountIds, String domainPath, Filter filter) { + SearchBuilder sb = createSearchBuilder(); + sb.and("vmType", sb.entity().getVmType(), SearchCriteria.Op.EQ); + sb.and("hypervisorType", sb.entity().getHypervisorType(), SearchCriteria.Op.EQ); + boolean accountIdsNotEmpty = CollectionUtils.isNotEmpty(accountIds); + boolean domainPathNotBlank = StringUtils.isNotBlank(domainPath); + if (accountIdsNotEmpty || domainPathNotBlank) { + sb.and().op("account", sb.entity().getAccountId(), SearchCriteria.Op.IN); + sb.or("domainPath", sb.entity().getDomainPath(), SearchCriteria.Op.LIKE); + sb.cp(); + } + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("vmType", VirtualMachine.Type.User); + sc.setParameters("hypervisorType", hypervisorType); + if (accountIdsNotEmpty) { + sc.setParameters("account", accountIds.toArray()); + } + if (domainPathNotBlank) { + sc.setParameters("domainPath", domainPath + "%"); + } + return search(sc, filter); + } + } diff --git a/server/src/main/java/com/cloud/api/query/vo/UserVmJoinVO.java b/server/src/main/java/com/cloud/api/query/vo/UserVmJoinVO.java index 0b60d99adc2c..3e50f70e5e24 100644 --- a/server/src/main/java/com/cloud/api/query/vo/UserVmJoinVO.java +++ b/server/src/main/java/com/cloud/api/query/vo/UserVmJoinVO.java @@ -131,6 +131,9 @@ public class UserVmJoinVO extends BaseViewWithTagInformationVO implements Contro @Column(name = "guest_os_uuid") private String guestOsUuid; + @Column(name = "guest_os_display_name") + private String guestOsDisplayName; + @Column(name = "hypervisor_type") @Convert(converter = HypervisorTypeConverter.class) private HypervisorType hypervisorType; @@ -429,7 +432,7 @@ public class UserVmJoinVO extends BaseViewWithTagInformationVO implements Contro private int jobStatus; @Column(name = "affinity_group_id") - private long affinityGroupId; + private Long affinityGroupId; @Column(name = "affinity_group_uuid") private String affinityGroupUuid; @@ -612,6 +615,10 @@ public String getGuestOsUuid() { return guestOsUuid; } + public String getGuestOsDisplayName() { + return guestOsDisplayName; + } + public HypervisorType getHypervisorType() { return hypervisorType; } @@ -1012,7 +1019,7 @@ public String getIp6Cidr() { return ip6Cidr; } - public long getAffinityGroupId() { + public Long getAffinityGroupId() { return affinityGroupId; } @@ -1057,7 +1064,7 @@ public Long getUserDataId() { return userDataId; } - public String getUserDataUUid() { + public String getUserDataUuid() { return userDataUuid; } diff --git a/server/src/main/java/com/cloud/api/query/vo/VolumeJoinVO.java b/server/src/main/java/com/cloud/api/query/vo/VolumeJoinVO.java index 2ae720fa8524..ba932c775beb 100644 --- a/server/src/main/java/com/cloud/api/query/vo/VolumeJoinVO.java +++ b/server/src/main/java/com/cloud/api/query/vo/VolumeJoinVO.java @@ -23,6 +23,7 @@ import javax.persistence.Entity; import javax.persistence.EnumType; import javax.persistence.Enumerated; +import javax.persistence.Id; import javax.persistence.Table; import javax.persistence.Temporal; import javax.persistence.TemporalType; @@ -40,6 +41,10 @@ @Table(name = "volume_view") public class VolumeJoinVO extends BaseViewWithTagInformationVO implements ControlledViewEntity { + @Id + @Column(name = "id", updatable = false, nullable = false) + private long id; + @Column(name = "uuid") private String uuid; diff --git a/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java b/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java index 95fdfa0fde89..a5ca5b1bf07d 100644 --- a/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java +++ b/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java @@ -476,7 +476,7 @@ public ProjectAccount assignAccountToProject(Project project, long accountId, Pr return _projectAccountDao.persist(projectAccountVO); } - public ProjectAccount assignUserToProject(Project project, long userId, long accountId, Role userRole, Long projectRoleId) { + public ProjectAccount assignUserToProject(Project project, long userId, long accountId, Role userRole, Long projectRoleId) { return assignAccountToProject(project, accountId, userRole, userId, projectRoleId); } diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 17961dbd955f..1e91c300e9bb 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -639,21 +639,6 @@ private Long getDefaultCustomOfferingId(Account owner, DataCenter zone) { return null; } - private Long getCustomDiskOfferingIdForVolumeUpload(Account owner, DataCenter zone) { - Long offeringId = getDefaultCustomOfferingId(owner, zone); - if (offeringId != null) { - return offeringId; - } - List offerings = _diskOfferingDao.findCustomDiskOfferings(); - for (DiskOfferingVO offering : offerings) { - try { - _configMgr.checkDiskOfferingAccess(owner, offering, zone); - return offering.getId(); - } catch (PermissionDeniedException ignored) {} - } - return null; - } - @DB protected VolumeVO persistVolume(final Account owner, final Long zoneId, final String volumeName, final String url, final String format, final Long diskOfferingId, final Volume.State state) { return Transaction.execute(new TransactionCallbackWithException() { @@ -719,17 +704,31 @@ public VolumeVO doInTransaction(TransactionStatus status) { * If the retrieved volume name is null, empty or blank, then A random name * will be generated using getRandomVolumeName method. * - * @param cmd + * @param userSpecifiedName * @return Either the retrieved name or a random name. */ - public String getVolumeNameFromCommand(CreateVolumeCmd cmd) { - String userSpecifiedName = cmd.getVolumeName(); - - if (StringUtils.isBlank(userSpecifiedName)) { - userSpecifiedName = getRandomVolumeName(); + public String getVolumeNameFromCommand(String userSpecifiedName) { + if (StringUtils.isNotBlank(userSpecifiedName)) { + return userSpecifiedName; } - return userSpecifiedName; + return getRandomVolumeName(); + } + + @Override + public Long getCustomDiskOfferingIdForVolumeUpload(Account owner, DataCenter zone) { + Long offeringId = getDefaultCustomOfferingId(owner, zone); + if (offeringId != null) { + return offeringId; + } + List offerings = _diskOfferingDao.findCustomDiskOfferings(); + for (DiskOfferingVO offering : offerings) { + try { + _configMgr.checkDiskOfferingAccess(owner, offering, zone); + return offering.getId(); + } catch (PermissionDeniedException ignored) {} + } + return null; } /* @@ -741,11 +740,20 @@ public String getVolumeNameFromCommand(CreateVolumeCmd cmd) { @DB @ActionEvent(eventType = EventTypes.EVENT_VOLUME_CREATE, eventDescription = "creating volume", create = true) public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationException { + return allocVolume(cmd.getEntityOwnerId(), cmd.getZoneId(), cmd.getDiskOfferingId(), cmd.getVirtualMachineId(), + cmd.getSnapshotId(), getVolumeNameFromCommand(cmd.getVolumeName()), cmd.getSize(), + cmd.getDisplayVolume(), cmd.getMinIops(), cmd.getMaxIops(), cmd.getCustomId()); + } + + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_VOLUME_CREATE, eventDescription = "creating volume", create = true) + public VolumeVO allocVolume(long ownerId, Long zoneId, Long diskOfferingId, Long vmId, Long snapshotId, + String name, Long cmdSize, Boolean displayVolume, Long cmdMinIops, Long cmdMaxIops, String customId) + throws ResourceAllocationException { Account caller = CallContext.current().getCallingAccount(); - long ownerId = cmd.getEntityOwnerId(); Account owner = _accountMgr.getActiveAccountById(ownerId); - Boolean displayVolume = cmd.getDisplayVolume(); // permission check _accountMgr.checkAccess(caller, null, true, _accountMgr.getActiveAccountById(ownerId)); @@ -758,8 +766,6 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept } } - Long zoneId = cmd.getZoneId(); - Long diskOfferingId = null; DiskOfferingVO diskOffering = null; Long size = null; Long minIops = null; @@ -768,13 +774,13 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept VolumeVO parentVolume = null; // validate input parameters before creating the volume - if (cmd.getSnapshotId() == null && cmd.getDiskOfferingId() == null) { + if (snapshotId == null && diskOfferingId == null) { throw new InvalidParameterValueException("At least one of disk Offering ID or snapshot ID must be passed whilst creating volume"); } // disallow passing disk offering ID with DATA disk volume snapshots - if (cmd.getSnapshotId() != null && cmd.getDiskOfferingId() != null) { - SnapshotVO snapshot = _snapshotDao.findById(cmd.getSnapshotId()); + if (snapshotId != null && diskOfferingId != null) { + SnapshotVO snapshot = _snapshotDao.findById(snapshotId); if (snapshot != null) { parentVolume = _volsDao.findByIdIncludingRemoved(snapshot.getVolumeId()); if (parentVolume != null && parentVolume.getVolumeType() != Volume.Type.ROOT) @@ -784,10 +790,8 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept } Map details = new HashMap<>(); - if (cmd.getDiskOfferingId() != null) { // create a new volume - - diskOfferingId = cmd.getDiskOfferingId(); - size = cmd.getSize(); + if (diskOfferingId != null) { // create a new volume + size = cmdSize; Long sizeInGB = size; if (size != null) { if (size > 0) { @@ -833,8 +837,8 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept if (isCustomizedIops != null) { if (isCustomizedIops) { - minIops = cmd.getMinIops(); - maxIops = cmd.getMaxIops(); + minIops = cmdMinIops; + maxIops = cmdMaxIops; if (minIops == null && maxIops == null) { minIops = 0L; @@ -866,8 +870,7 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept } } - if (cmd.getSnapshotId() != null) { // create volume from snapshot - Long snapshotId = cmd.getSnapshotId(); + if (snapshotId != null) { // create volume from snapshot SnapshotVO snapshotCheck = _snapshotDao.findById(snapshotId); if (snapshotCheck == null) { throw new InvalidParameterValueException("unable to find a snapshot with id " + snapshotId); @@ -918,7 +921,6 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept // one step operation - create volume in VM's cluster and attach it // to the VM - Long vmId = cmd.getVirtualMachineId(); if (vmId != null) { // Check that the virtual machine ID is valid and it's a user vm UserVmVO vm = _userVmDao.findById(vmId); @@ -960,10 +962,10 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept throw new InvalidParameterValueException("Zone is not configured to use local storage but volume's disk offering " + diskOffering.getName() + " uses it"); } - String userSpecifiedName = getVolumeNameFromCommand(cmd); + String userSpecifiedName = getVolumeNameFromCommand(name); - return commitVolume(cmd.getSnapshotId(), caller, owner, displayVolume, zoneId, diskOfferingId, provisioningType, size, minIops, maxIops, parentVolume, userSpecifiedName, - _uuidMgr.generateUuid(Volume.class, cmd.getCustomId()), details); + return commitVolume(snapshotId, caller, owner, displayVolume, zoneId, diskOfferingId, provisioningType, size, minIops, maxIops, parentVolume, userSpecifiedName, + _uuidMgr.generateUuid(Volume.class, customId), details); } @Override @@ -1046,29 +1048,48 @@ public boolean validateVolumeSizeInBytes(long size) { return true; } + private VolumeVO allocateVolumeOnStorage(Long volumeId, Long storageId) throws ExecutionException, InterruptedException { + DataStore destStore = dataStoreMgr.getDataStore(storageId, DataStoreRole.Primary); + VolumeInfo destVolume = volFactory.getVolume(volumeId, destStore); + AsyncCallFuture createVolumeFuture = volService.createVolumeAsync(destVolume, destStore); + VolumeApiResult createVolumeResult = createVolumeFuture.get(); + if (createVolumeResult.isFailed()) { + throw new CloudRuntimeException("Creation of a dest volume failed: " + createVolumeResult.getResult()); + } + return _volsDao.findById(destVolume.getId()); + } + @Override @DB @ActionEvent(eventType = EventTypes.EVENT_VOLUME_CREATE, eventDescription = "creating volume", async = true) public VolumeVO createVolume(CreateVolumeCmd cmd) { - VolumeVO volume = _volsDao.findById(cmd.getEntityId()); + return createVolume(cmd.getEntityId(), cmd.getVirtualMachineId(), cmd.getSnapshotId(), cmd.getStorageId(), + cmd.getDisplayVolume()); + } + + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_VOLUME_CREATE, eventDescription = "creating volume", async = true) + public VolumeVO createVolume(long volumeId, Long vmId, Long snapshotId, Long storageId, Boolean display) { + VolumeVO volume = _volsDao.findById(volumeId); boolean created = true; try { - if (cmd.getSnapshotId() != null) { - volume = createVolumeFromSnapshot(volume, cmd.getSnapshotId(), cmd.getVirtualMachineId()); + if (snapshotId != null) { + volume = createVolumeFromSnapshot(volume, snapshotId, vmId); if (volume.getState() != Volume.State.Ready) { created = false; } // if VM Id is provided, attach the volume to the VM - if (cmd.getVirtualMachineId() != null) { + if (vmId != null) { try { - attachVolumeToVM(cmd.getVirtualMachineId(), volume.getId(), volume.getDeviceId(), false); + attachVolumeToVM(vmId, volume.getId(), volume.getDeviceId(), false); } catch (Exception ex) { StringBuilder message = new StringBuilder("Volume: "); message.append(volume.getUuid()); message.append(" created successfully, but failed to attach the newly created volume to VM: "); - message.append(cmd.getVirtualMachineId()); + message.append(vmId); message.append(" due to error: "); message.append(ex.getMessage()); if (logger.isDebugEnabled()) { @@ -1077,18 +1098,20 @@ public VolumeVO createVolume(CreateVolumeCmd cmd) { throw new CloudRuntimeException(message.toString()); } } + } else if (storageId != null) { + volume = allocateVolumeOnStorage(volumeId, storageId); } return volume; } catch (Exception e) { created = false; - VolumeInfo vol = volFactory.getVolume(cmd.getEntityId()); + VolumeInfo vol = volFactory.getVolume(volumeId); vol.stateTransit(Volume.Event.DestroyRequested); throw new CloudRuntimeException(String.format("Failed to create volume: %s", volume), e); } finally { if (!created) { VolumeVO finalVolume = volume; logger.trace("Decrementing volume resource count for account {} as volume failed to create on the backend", () -> _accountMgr.getAccount(finalVolume.getAccountId())); - _resourceLimitMgr.decrementVolumeResourceCount(volume.getAccountId(), cmd.getDisplayVolume(), + _resourceLimitMgr.decrementVolumeResourceCount(volume.getAccountId(), display, volume.getSize(), _diskOfferingDao.findByIdIncludingRemoved(volume.getDiskOfferingId())); } } diff --git a/server/src/main/java/com/cloud/user/AccountManager.java b/server/src/main/java/com/cloud/user/AccountManager.java index 98d2419e0486..eca1a571dd88 100644 --- a/server/src/main/java/com/cloud/user/AccountManager.java +++ b/server/src/main/java/com/cloud/user/AccountManager.java @@ -204,6 +204,8 @@ void buildACLViewSearchCriteria(SearchCriteria s void checkApiAccess(Account caller, String command, String apiKey); + void checkApiAccess(Account caller, String command); + UserAccount clearUserTwoFactorAuthenticationInSetupStateOnLogin(UserAccount user); void verifyCallerPrivilegeForUserOrAccountOperations(Account userAccount); diff --git a/server/src/main/java/com/cloud/user/AccountManagerImpl.java b/server/src/main/java/com/cloud/user/AccountManagerImpl.java index 2011d4556465..9c7c8141f8e0 100644 --- a/server/src/main/java/com/cloud/user/AccountManagerImpl.java +++ b/server/src/main/java/com/cloud/user/AccountManagerImpl.java @@ -1535,6 +1535,12 @@ public void checkApiAccess(Account caller, String command, String apiKey) { checkApiAccess(apiCheckers, caller, command, keyPairPermissions.toArray(new ApiKeyPairPermission[0])); } + @Override + public void checkApiAccess(Account caller, String command) { + List apiCheckers = getEnabledApiCheckers(); + checkApiAccess(apiCheckers, caller, command); + } + @NotNull private List getEnabledApiCheckers() { // we are really only interested in the dynamic access checker @@ -2755,6 +2761,11 @@ public Account getActiveAccountById(long accountId) { return _accountDao.findById(accountId); } + @Override + public Account getActiveAccountByUuid(String accountUuid) { + return _accountDao.findByUuid(accountUuid); + } + @Override public Account getAccount(long accountId) { return _accountDao.findByIdIncludingRemoved(accountId); @@ -2773,6 +2784,15 @@ public User getActiveUser(long userId) { return _userDao.findById(userId); } + @Override + public User getOneActiveUserForAccount(Account account) { + List users = _userDao.listByAccount(account.getId()); + if (CollectionUtils.isEmpty(users)) { + return null; + } + return users.get(0); + } + @Override public User getUserIncludingRemoved(long userId) { return _userDao.findByIdIncludingRemoved(userId); diff --git a/server/src/main/java/com/cloud/vm/UserVmManager.java b/server/src/main/java/com/cloud/vm/UserVmManager.java index 0a744709644c..69f11b41d1fa 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManager.java +++ b/server/src/main/java/com/cloud/vm/UserVmManager.java @@ -16,13 +16,14 @@ // under the License. package com.cloud.vm; +import static com.cloud.user.ResourceLimitService.ResourceLimitHostTags; + import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; -import com.cloud.utils.StringUtils; import org.apache.cloudstack.api.BaseCmd.HTTPMethod; import org.apache.cloudstack.framework.config.ConfigKey; @@ -40,8 +41,7 @@ import com.cloud.template.VirtualMachineTemplate; import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; - -import static com.cloud.user.ResourceLimitService.ResourceLimitHostTags; +import com.cloud.utils.StringUtils; /** * @@ -204,4 +204,5 @@ static Set getStrictHostTags() { */ boolean isVMPartOfAnyCKSCluster(VMInstanceVO vm); + boolean isBlankInstance(VirtualMachineTemplate template); } diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 9134be3d3bd9..2ab4156a1aea 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -60,9 +60,6 @@ import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.ParserConfigurationException; -import com.cloud.serializer.GsonHelper; -import com.cloud.storage.SnapshotPolicyVO; -import com.cloud.storage.dao.SnapshotPolicyDao; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; @@ -315,6 +312,7 @@ import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceState; import com.cloud.resourcelimit.CheckedReservation; +import com.cloud.serializer.GsonHelper; import com.cloud.server.ManagementService; import com.cloud.server.ResourceTag; import com.cloud.service.ServiceOfferingVO; @@ -324,8 +322,10 @@ import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.GuestOSCategoryVO; import com.cloud.storage.GuestOSVO; +import com.cloud.storage.LaunchPermissionVO; import com.cloud.storage.ScopeType; import com.cloud.storage.Snapshot; +import com.cloud.storage.SnapshotPolicyVO; import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage; import com.cloud.storage.Storage.ImageFormat; @@ -343,7 +343,9 @@ import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.GuestOSCategoryDao; import com.cloud.storage.dao.GuestOSDao; +import com.cloud.storage.dao.LaunchPermissionDao; import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.SnapshotPolicyDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateZoneDao; import com.cloud.storage.dao.VolumeDao; @@ -421,6 +423,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir private static final long GiB_TO_BYTES = 1024 * 1024 * 1024; + private static final String KVM_VM_DUMMY_TEMPLATE_NAME = "kvm-vm-dummy-template"; + + @Inject private EntityManager _entityMgr; @Inject @@ -617,6 +622,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir @Inject BackupScheduleDao backupScheduleDao; @Inject + LaunchPermissionDao launchPermissionDao; + @Inject private UserDataDao userDataDao; @Inject protected SnapshotHelper snapshotHelper; @@ -2336,9 +2343,9 @@ public HashMap getVolumeStatistics(long clusterId, Str } private List getVolumesByHost(HostVO host, StoragePool pool) { - List vmsPerHost = _vmInstanceDao.listByHostId(host.getId()); + List vmsPerHost = _vmInstanceDao.listIdsByHostIdForVolumeStats(host.getId()); return vmsPerHost.stream() - .flatMap(vm -> _volsDao.findNonDestroyedVolumesByInstanceIdAndPoolId(vm.getId(),pool.getId()).stream().map(vol -> + .flatMap(vmId -> _volsDao.findNonDestroyedVolumesByInstanceIdAndPoolId(vmId,pool.getId()).stream().map(vol -> vol.getState() == Volume.State.Ready ? (vol.getFormat() == ImageFormat.OVA ? vol.getChainInfo() : vol.getPath()) : null).filter(Objects::nonNull)) .collect(Collectors.toList()); } @@ -2497,7 +2504,6 @@ public boolean configure(String name, Map params) throws Configu _scaleRetry = NumbersUtil.parseInt(configs.get(Config.ScaleRetry.key()), 2); _vmIpFetchThreadExecutor = Executors.newFixedThreadPool(VmIpFetchThreadPoolMax.value(), new NamedThreadFactory("vmIpFetchThread")); - logger.info("User VM Manager is configured."); return true; @@ -3927,7 +3933,9 @@ public UserVm createAdvancedSecurityGroupVirtualMachine(DataCenter zone, Service _accountMgr.checkAccess(owner, _diskOfferingDao.findById(diskOfferingId), zone); // If no network is specified, find system security group enabled network - if (networkIdList == null || networkIdList.isEmpty()) { + if (isBlankInstance(template)) { + logger.debug("Blank instance for {} hypervisor, skipping network allocation in an advanced security group enabled zone", hypervisor); + } else if (networkIdList == null || networkIdList.isEmpty()) { Network networkWithSecurityGroup = _networkModel.getNetworkWithSGWithFreeIPs(owner, zone.getId()); if (networkWithSecurityGroup == null) { throw new InvalidParameterValueException("No network with security enabled is found in zone id=" + zone.getUuid()); @@ -4040,7 +4048,9 @@ public UserVm createAdvancedVirtualMachine(DataCenter zone, ServiceOffering serv _accountMgr.checkAccess(owner, diskOffering, zone); List vpcSupportedHTypes = _vpcMgr.getSupportedVpcHypervisors(); - if (networkIdList == null || networkIdList.isEmpty()) { + if (isBlankInstance(template)) { + logger.debug("Template is a dummy template for hypervisor {}, skipping network allocation in an advanced zone", hypervisor); + } else if (networkIdList == null || networkIdList.isEmpty()) { NetworkVO defaultNetwork = getDefaultNetwork(zone, owner, false); if (defaultNetwork != null) { networkList.add(defaultNetwork); @@ -4475,7 +4485,8 @@ private UserVm getUncheckedUserVmResource(DataCenter zone, String hostName, Stri } } - if (TemplateType.SYSTEM.equals(template.getTemplateType()) && !CKS_NODE.equals(vmType) && !SHAREDFSVM.equals(vmType)) { + if (TemplateType.SYSTEM.equals(template.getTemplateType()) && !CKS_NODE.equals(vmType) && + !SHAREDFSVM.equals(vmType) && !isBlankInstanceDefaultTemplate(template)) { throw new InvalidParameterValueException(String.format("Unable to use system template %s to deploy a user vm", template)); } @@ -4488,7 +4499,7 @@ private UserVm getUncheckedUserVmResource(DataCenter zone, String hostName, Stri if (CollectionUtils.isEmpty(snapshotsOnZone)) { throw new InvalidParameterValueException("The snapshot does not exist on zone " + zone.getId()); } - } else { + } else if (!isBlankInstanceDefaultTemplate(template)) { List listZoneTemplate = _templateZoneDao.listByZoneTemplate(zone.getId(), template.getId()); if (listZoneTemplate == null || listZoneTemplate.isEmpty()) { throw new InvalidParameterValueException("The template " + template.getId() + " is not available for use"); @@ -4603,7 +4614,11 @@ private UserVm getUncheckedUserVmResource(DataCenter zone, String hostName, Stri // by Agent Manager in order to configure default // gateway for the vm if (defaultNetworkNumber == 0) { - throw new InvalidParameterValueException("At least 1 default network has to be specified for the vm"); + if (isBlankInstance(template)) { + logger.debug("Template is a dummy template for hypervisor {}, vm can be created without a default network", hypervisorType); + } else { + throw new InvalidParameterValueException("At least 1 default network has to be specified for the vm"); + } } else if (defaultNetworkNumber > 1) { throw new InvalidParameterValueException("Only 1 default network per vm is supported"); } @@ -4765,7 +4780,7 @@ protected long configureCustomRootDiskSize(Map customParameters, return rootDiskSize; } else { // For baremetal, size can be 0 (zero) - Long templateSize = _templateDao.findById(template.getId()).getSize(); + Long templateSize = template.getSize(); if (templateSize != null) { return templateSize; } @@ -5321,7 +5336,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { @ActionEvent(eventType = EventTypes.EVENT_VM_CREATE, eventDescription = "deploying Vm", async = true) public UserVm startVirtualMachine(DeployVMCmd cmd) throws ResourceUnavailableException, InsufficientCapacityException, ConcurrentOperationException, ResourceAllocationException { long vmId = cmd.getEntityId(); - if (!cmd.getStartVm()) { + if (!cmd.getStartVm() || cmd.isBlankInstance()) { return getUserVm(vmId); } Long podId = null; @@ -6469,6 +6484,16 @@ public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityE (!(HypervisorType.KVM.equals(template.getHypervisorType()) || HypervisorType.KVM.equals(cmd.getHypervisor())))) { throw new InvalidParameterValueException("Deploying a virtual machine with existing volume/snapshot is supported only from KVM hypervisors"); } + boolean blankInstance = cmd.isBlankInstance(); + if (blankInstance) { + CallContext.current().putContextParameter(ApiConstants.BLANK_INSTANCE, true); + } + if (template == null && HypervisorType.KVM.equals(cmd.getHypervisor()) && blankInstance) { + template = getBlankInstanceTemplate(); + logger.info("Creating launch permission for Dummy template"); + LaunchPermissionVO launchPermission = new LaunchPermissionVO(template.getId(), owner.getId()); + launchPermissionDao.persist(launchPermission); + } // Make sure a valid template ID was specified if (template == null) { throw new InvalidParameterValueException("Unable to use template " + templateId); @@ -6627,6 +6652,12 @@ private UserVm createVirtualMachine(BaseDeployVMCmd cmd, DataCenter zone, Accoun if (isLeaseFeatureEnabled) { applyLeaseOnCreateInstance(vm, cmd.getLeaseDuration(), cmd.getLeaseExpiryAction(), svcOffering); } + + if (isBlankInstance(template) && cmd instanceof DeployVMCmd && ((DeployVMCmd) cmd).isBlankInstance()) { + logger.info("Revoking launch permission for Dummy template"); + launchPermissionDao.removePermissions(template.getId(), Collections.singletonList(owner.getId())); + } + return vm; } @@ -7991,7 +8022,10 @@ public UserVm moveVmToUser(final AssignVMCmd cmd) throws ResourceAllocationExcep logger.trace("Verifying if the new account [{}] has access to the specified domain [{}].", newAccount, domain); _accountMgr.checkAccess(newAccount, domain); - Network newNetwork = ensureDestinationNetwork(cmd, vm, newAccount); + Network newNetwork = null; + if (!cmd.isSkipNetwork()) { + newNetwork = ensureDestinationNetwork(cmd, vm, newAccount); + } try { Transaction.execute(new TransactionCallbackNoReturn() { @Override @@ -10061,4 +10095,33 @@ private void setVncPasswordForKvmIfAvailable(Map customParameter vm.setVncPassword(customParameters.get(VmDetailConstants.KVM_VNC_PASSWORD)); } } + + protected boolean isBlankInstanceDefaultTemplate(VirtualMachineTemplate template) { + return KVM_VM_DUMMY_TEMPLATE_NAME.equals(template.getUniqueName()); + } + + @Override + public boolean isBlankInstance(VirtualMachineTemplate template) { + if (isBlankInstanceDefaultTemplate(template)) { + return true; + } + return Boolean.TRUE.equals( + MapUtils.getBoolean(CallContext.current().getContextParameters(), ApiConstants.BLANK_INSTANCE)); + } + + VMTemplateVO getBlankInstanceTemplate() { + VMTemplateVO template = _templateDao.findByName(KVM_VM_DUMMY_TEMPLATE_NAME); + if (template != null) { + return template; + } + template = VMTemplateVO.createSystemIso(_templateDao.getNextInSequence(Long.class, "id"), + KVM_VM_DUMMY_TEMPLATE_NAME, KVM_VM_DUMMY_TEMPLATE_NAME, true, + "", true, 64, Account.ACCOUNT_ID_SYSTEM, "", + "Dummy Template for KVM VM", false, 1); + template.setState(VirtualMachineTemplate.State.Active); + template.setFormat(ImageFormat.QCOW2); + template = _templateDao.persist(template); +// _templateDao.remove(template.getId()); + return template; + } } diff --git a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java index db636c7f0f42..ac5476a2e122 100644 --- a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java @@ -2411,8 +2411,10 @@ public BackupResponse createBackupResponse(Backup backup, Boolean listVmDetails) backedUpVolumes = new Gson().toJson(backup.getBackedUpVolumes().toArray(), Backup.VolumeInfo[].class); } response.setVolumes(backedUpVolumes); - response.setBackupOfferingId(offering.getUuid()); - response.setBackupOffering(offering.getName()); + if (offering != null) { + response.setBackupOfferingId(offering.getUuid()); + response.setBackupOffering(offering.getName()); + } response.setAccountId(account.getUuid()); response.setAccount(account.getAccountName()); response.setDomainId(domain.getUuid()); @@ -2430,6 +2432,13 @@ public BackupResponse createBackupResponse(Backup backup, Boolean listVmDetails) response.setVmDetails(vmDetails); } + if (backup.getFromCheckpointId() != null) { + response.setFromCheckpointId(backup.getFromCheckpointId()); + } + if (backup.getToCheckpointId() != null) { + response.setToCheckpointId(backup.getToCheckpointId()); + } + response.setObjectName("backup"); return response; } diff --git a/server/src/main/java/org/apache/cloudstack/backup/KVMBackupExportServiceImpl.java b/server/src/main/java/org/apache/cloudstack/backup/KVMBackupExportServiceImpl.java new file mode 100644 index 000000000000..3b160ce4885a --- /dev/null +++ b/server/src/main/java/org/apache/cloudstack/backup/KVMBackupExportServiceImpl.java @@ -0,0 +1,1073 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package org.apache.cloudstack.backup; + +import static org.apache.cloudstack.backup.BackupManager.BackupFrameworkEnabled; +import static org.apache.cloudstack.backup.BackupManager.BackupProviderPlugin; + +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Timer; +import java.util.TimerTask; +import java.util.UUID; +import java.util.stream.Collectors; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.command.admin.backup.CreateImageTransferCmd; +import org.apache.cloudstack.api.command.admin.backup.DeleteVmCheckpointCmd; +import org.apache.cloudstack.api.command.admin.backup.FinalizeBackupCmd; +import org.apache.cloudstack.api.command.admin.backup.FinalizeImageTransferCmd; +import org.apache.cloudstack.api.command.admin.backup.ListImageTransfersCmd; +import org.apache.cloudstack.api.command.admin.backup.ListVmCheckpointsCmd; +import org.apache.cloudstack.api.command.admin.backup.StartBackupCmd; +import org.apache.cloudstack.api.response.CheckpointResponse; +import org.apache.cloudstack.api.response.ImageTransferResponse; +import org.apache.cloudstack.backup.dao.BackupDao; +import org.apache.cloudstack.backup.dao.ImageTransferDao; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.jobs.AsyncJobExecutionContext; +import org.apache.cloudstack.framework.jobs.AsyncJobManager; +import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO; +import org.apache.cloudstack.jobs.JobInfo; +import org.apache.cloudstack.managed.context.ManagedContextTimerTask; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.commons.collections.MapUtils; +import org.apache.commons.lang3.StringUtils; +import org.joda.time.DateTime; +import org.springframework.stereotype.Component; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.api.ApiDBUtils; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeDetailVO; +import com.cloud.storage.VolumeStats; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeDetailsDao; +import com.cloud.user.Account; +import com.cloud.user.AccountService; +import com.cloud.user.User; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; +import com.cloud.utils.ReflectionUse; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VMInstanceDetailVO; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachine.State; +import com.cloud.vm.VmDetailConstants; +import com.cloud.vm.VmWork; +import com.cloud.vm.VmWorkConstants; +import com.cloud.vm.VmWorkJobHandler; +import com.cloud.vm.VmWorkJobHandlerProxy; +import com.cloud.vm.VmWorkSerializer; +import com.cloud.vm.dao.VMInstanceDao; +import com.cloud.vm.dao.VMInstanceDetailsDao; + +@Component +public class KVMBackupExportServiceImpl extends ManagerBase implements KVMBackupExportService, VmWorkJobHandler { + public static final String VM_WORK_JOB_HANDLER = KVMBackupExportServiceImpl.class.getSimpleName(); + private static final long BACKUP_FINALIZE_WAIT_CHECK_INTERVAL = 15 * 1000L; + + @Inject + private VMInstanceDao vmInstanceDao; + + @Inject + private VMInstanceDetailsDao vmInstanceDetailsDao; + + @Inject + private BackupDao backupDao; + + @Inject + private ImageTransferDao imageTransferDao; + + @Inject + private VolumeDao volumeDao; + + @Inject + private VolumeDetailsDao volumeDetailsDao; + + @Inject + private AgentManager agentManager; + + @Inject + private HostDao hostDao; + + @Inject + private PrimaryDataStoreDao primaryDataStoreDao; + + @Inject + private StoragePoolHostDao storagePoolHostDao; + + @Inject + AccountService accountService; + + @Inject + AsyncJobManager asyncJobManager; + + private Timer imageTransferTimer; + + VmWorkJobHandlerProxy jobHandlerProxy = new VmWorkJobHandlerProxy(this); + + private boolean isKVMBackupExportServiceSupported(Long zoneId) { + return !BackupFrameworkEnabled.value() || StringUtils.equals("dummy", BackupProviderPlugin.valueIn(zoneId)); + } + + @Override + public Backup createBackup(StartBackupCmd cmd) { + Long vmId = cmd.getVmId(); + + VMInstanceVO vm = vmInstanceDao.findById(vmId); + if (vm == null) { + throw new CloudRuntimeException("VM not found: " + vmId); + } + + if (!isKVMBackupExportServiceSupported(vm.getDataCenterId())) { + throw new CloudRuntimeException("Veeam-KVM integration can not be used along with the " + BackupProviderPlugin.valueIn(vm.getDataCenterId()) + + " backup provider. Either set backup.framework.enabled to false or set the Zone level config backup.framework.provider.plugin to \"dummy\"."); + } + + if (vm.getState() != State.Running && vm.getState() != State.Stopped) { + throw new CloudRuntimeException("VM must be running or stopped to start backup"); + } + + Backup existingBackup = backupDao.findByVmId(vmId); + if (existingBackup != null && existingBackup.getStatus() == Backup.Status.BackingUp) { + throw new CloudRuntimeException("Backup already in progress for VM: " + vmId); + } + + BackupVO backup = new BackupVO(); + backup.setVmId(vmId); + String name = cmd.getName(); + if (StringUtils.isEmpty(name)) { + name = vmId + "-" + DateTime.now(); + } + backup.setName(name); + final String description = cmd.getDescription(); + if (StringUtils.isNotEmpty(description)) { + backup.setDescription(description); + } + backup.setAccountId(vm.getAccountId()); + backup.setDomainId(vm.getDomainId()); + backup.setZoneId(vm.getDataCenterId()); + backup.setStatus(Backup.Status.Queued); + backup.setBackupOfferingId(0L); + backup.setDate(new Date()); + + String toCheckpointId = "ckp-" + UUID.randomUUID().toString().substring(0, 8); + Map vmDetails = vmInstanceDetailsDao.listDetailsKeyPairs(vmId); + String fromCheckpointId = vmDetails.get(VmDetailConstants.ACTIVE_CHECKPOINT_ID); + + backup.setToCheckpointId(toCheckpointId); + backup.setFromCheckpointId(fromCheckpointId); + backup.setType("FULL"); + + Long hostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId(); + backup.setHostId(hostId); + + return backupDao.persist(backup); + } + + protected void removeFailedBackup(BackupVO backup) { + backup.setStatus(Backup.Status.Error); + backupDao.update(backup.getId(), backup); + backupDao.remove(backup.getId()); + } + + protected void queueBackupFinalizeWaitWorkJob(final VMInstanceVO vm, final BackupVO backup) { + final CallContext context = CallContext.current(); + final Account callingAccount = context.getCallingAccount(); + final long callingUserId = context.getCallingUserId(); + + VmWorkJobVO workJob = new VmWorkJobVO(context.getContextId()); + workJob.setDispatcher(VmWorkConstants.VM_WORK_JOB_DISPATCHER); + workJob.setCmd(VmWorkWaitForBackupFinalize.class.getName()); + workJob.setAccountId(callingAccount.getId()); + workJob.setUserId(callingUserId); + workJob.setStep(VmWorkJobVO.Step.Starting); + workJob.setVmType(VirtualMachine.Type.User); + workJob.setVmInstanceId(vm.getId()); + workJob.setRelated(AsyncJobExecutionContext.getOriginJobId()); + + VmWorkWaitForBackupFinalize workInfo = new VmWorkWaitForBackupFinalize( + callingUserId, callingAccount.getId(), vm.getId(), VM_WORK_JOB_HANDLER, backup.getId()); + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + asyncJobManager.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); + } + + @Override + public Backup startBackup(StartBackupCmd cmd) { + BackupVO backup = backupDao.findById(cmd.getEntityId()); + Long vmId = cmd.getVmId(); + VMInstanceVO vm = vmInstanceDao.findById(vmId); + if (vm == null) { + throw new CloudRuntimeException("VM not found: " + vmId); + } + List volumes = volumeDao.findByInstance(vmId); + Map diskPathUuidMap = new HashMap<>(); + for (Volume vol : volumes) { + String volumePath = getVolumePathForFileBasedBackend(vol); + diskPathUuidMap.put(volumePath, vol.getUuid()); + } + long hostId = backup.getHostId(); + + VMInstanceDetailVO lastCheckpointId = vmInstanceDetailsDao.findDetail(vmId, VmDetailConstants.LAST_CHECKPOINT_ID); + if (lastCheckpointId != null) { + try { + sendDeleteCheckpointCommand(vm, lastCheckpointId.getValue()); + } catch (CloudRuntimeException e) { + logger.warn("Failed to delete last checkpoint {} for VM {}, proceeding with backup start", lastCheckpointId.getValue(), vmId, e); + } + } + + Host host = hostDao.findById(hostId); + Map vmDetails = vmInstanceDetailsDao.listDetailsKeyPairs(vmId); + String activeCkpCreateTimeStr = vmDetails.get(VmDetailConstants.ACTIVE_CHECKPOINT_CREATE_TIME); + Long fromCheckpointCreateTime = activeCkpCreateTimeStr != null ? NumbersUtil.parseLong(activeCkpCreateTimeStr, 0L) : null; + StartBackupCommand startCmd = new StartBackupCommand( + vm.getInstanceName(), + backup.getToCheckpointId(), + backup.getFromCheckpointId(), + fromCheckpointCreateTime, + backup.getUuid(), + diskPathUuidMap, + vm.getState() == State.Stopped + ); + + StartBackupAnswer answer; + try { + answer = (StartBackupAnswer) agentManager.send(hostId, startCmd); + } catch (AgentUnavailableException | OperationTimedoutException e) { + removeFailedBackup(backup); + logger.error("Failed to communicate with agent on {} for {} start", host, backup, e); + throw new CloudRuntimeException("Failed to communicate with agent", e); + } + + if (!answer.getResult()) { + removeFailedBackup(backup); + logger.error("Failed to start {} due to: {}", backup, answer.getDetails()); + throw new CloudRuntimeException("Failed to start backup: " + answer.getDetails()); + } + + // Update backup with checkpoint creation time + backup.setCheckpointCreateTime(answer.getCheckpointCreateTime()); + updateBackupState(backup, Backup.Status.ReadyForTransfer); + queueBackupFinalizeWaitWorkJob(vm, backup); + return backup; + } + + protected void updateBackupState(BackupVO backup, Backup.Status newStatus) { + backup.setStatus(newStatus); + backupDao.update(backup.getId(), backup); + } + + private void updateVmCheckpoints(Long vmId, BackupVO backup) { + Map vmDetails = vmInstanceDetailsDao.listDetailsKeyPairs(vmId); + String oldCheckpointId = vmDetails.get(VmDetailConstants.ACTIVE_CHECKPOINT_ID); + String oldCreateTimeStr = vmDetails.get(VmDetailConstants.ACTIVE_CHECKPOINT_CREATE_TIME); + if (oldCheckpointId != null && oldCreateTimeStr != null) { + vmInstanceDetailsDao.addDetail(vmId, VmDetailConstants.LAST_CHECKPOINT_ID, oldCheckpointId, false); + vmInstanceDetailsDao.addDetail(vmId, VmDetailConstants.LAST_CHECKPOINT_CREATE_TIME, oldCreateTimeStr, false); + } + String newCheckpointId = backup.getToCheckpointId(); + Long newCreateTime = backup.getCheckpointCreateTime(); + if (newCheckpointId != null && newCreateTime != null) { + vmInstanceDetailsDao.addDetail(vmId, VmDetailConstants.ACTIVE_CHECKPOINT_ID, backup.getToCheckpointId(), false); + vmInstanceDetailsDao.addDetail(vmId, VmDetailConstants.ACTIVE_CHECKPOINT_CREATE_TIME, String.valueOf(newCreateTime), false); + } else { + logger.error("New checkpoint details are missing for backup {} and vm {}", backup.getId(), vmId); + } + } + + @Override + public Backup finalizeBackup(FinalizeBackupCmd cmd) { + Long vmId = cmd.getVmId(); + Long backupId = cmd.getBackupId(); + + // Get backup + BackupVO backup = backupDao.findById(backupId); + if (backup == null) { + throw new CloudRuntimeException("Backup not found: " + backupId); + } + + if (!backup.getVmId().equals(vmId)) { + throw new CloudRuntimeException("Backup does not belong to VM: " + vmId); + } + + VMInstanceVO vm = vmInstanceDao.findById(vmId); + if (vm == null) { + throw new CloudRuntimeException("VM not found: " + vmId); + } + + updateBackupState(backup, Backup.Status.FinalizingTransfer); + + List transfers = imageTransferDao.listByBackupId(backupId); + for (ImageTransferVO transfer : transfers) { + if (transfer.getPhase() != ImageTransferVO.Phase.finished) { + logger.warn("Finalize called for backup {} while Image transfer {} is not finalized, attempting to finalize it", backup.getUuid(), transfer.getUuid()); + finalizeImageTransfer(transfer.getId()); + } + } + + if (vm.getState() == State.Running) { + StopBackupCommand stopCmd = new StopBackupCommand(vm.getInstanceName(), vmId, backupId); + + StopBackupAnswer answer; + try { + answer = (StopBackupAnswer) agentManager.send(backup.getHostId(), stopCmd); + } catch (AgentUnavailableException | OperationTimedoutException e) { + removeFailedBackup(backup); + throw new CloudRuntimeException("Failed to communicate with agent", e); + } + + if (!answer.getResult()) { + removeFailedBackup(backup); + throw new CloudRuntimeException("Failed to stop backup: " + answer.getDetails()); + } + } + + updateVmCheckpoints(vmId, backup); + + updateBackupState(backup, Backup.Status.BackedUp); + backupDao.remove(backup.getId()); + + return backup; + + } + + private ImageTransferVO createDownloadImageTransfer(Long backupId, VolumeVO volume, ImageTransfer.Backend backend) { + final String direction = ImageTransfer.Direction.download.toString(); + BackupVO backup = backupDao.findById(backupId); + if (backup == null) { + throw new CloudRuntimeException("Backup not found: " + backupId); + } + if (ImageTransfer.Backend.file.equals(backend)) { + throw new CloudRuntimeException("File backend is not supported for download"); + } + + String transferId = UUID.randomUUID().toString(); + + String socket = backup.getUuid(); + VMInstanceVO vm = vmInstanceDao.findById(backup.getVmId()); + if (vm.getState() == State.Stopped) { + Map vmDetails = vmInstanceDetailsDao.listDetailsKeyPairs(backup.getVmId()); + String volumePath = getVolumePathForFileBasedBackend(volume); + startNBDServer(transferId, direction, backup.getHostId(), volume.getUuid(), volumePath, vmDetails.get(VmDetailConstants.ACTIVE_CHECKPOINT_ID)); + socket = transferId; + } + + HostVO backupHost = hostDao.findById(backup.getHostId()); + if (backupHost == null) { + throw new CloudRuntimeException("Host not found for backup: " + backupId); + } + int idleTimeoutSec = ImageTransferIdleTimeoutSeconds.valueIn(backupHost.getDataCenterId()); + CreateImageTransferCommand transferCmd = new CreateImageTransferCommand( + transferId, + direction, + volume.getUuid(), + socket, + backup.getFromCheckpointId(), + idleTimeoutSec); + + try { + CreateImageTransferAnswer answer; + answer = (CreateImageTransferAnswer) agentManager.send(backup.getHostId(), transferCmd); + + if (!answer.getResult()) { + throw new CloudRuntimeException("Failed to create image transfer: " + answer.getDetails()); + } + + ImageTransferVO imageTransfer = new ImageTransferVO( + transferId, + backupId, + volume.getId(), + backup.getHostId(), + socket, + ImageTransferVO.Phase.transferring, + ImageTransfer.Direction.download, + backup.getAccountId(), + backup.getDomainId(), + backup.getZoneId() + ); + imageTransfer.setTransferUrl(answer.getTransferUrl()); + imageTransfer.setSignedTicketId(answer.getImageTransferId()); + imageTransfer = imageTransferDao.persist(imageTransfer); + return imageTransfer; + + } catch (AgentUnavailableException | OperationTimedoutException e) { + throw new CloudRuntimeException("Failed to communicate with agent", e); + } + } + + private HostVO getRandomHostFromStoragePool(StoragePoolVO storagePool) { + List hosts; + switch (storagePool.getScope()) { + case CLUSTER: + hosts = hostDao.findByClusterId(storagePool.getClusterId()); + Collections.shuffle(hosts); + return hosts.get(0); + case ZONE: + hosts = hostDao.findByDataCenterId(storagePool.getDataCenterId()); + Collections.shuffle(hosts); + return hosts.get(0); + case HOST: + List storagePoolHostVOs = storagePoolHostDao.listByPoolId(storagePool.getId()); + Collections.shuffle(storagePoolHostVOs); + return hostDao.findById(storagePoolHostVOs.get(0).getHostId()); + default: + throw new CloudRuntimeException("Unsupported storage pool scope: " + storagePool.getScope()); + } + } + + private void startNBDServer(String transferId, String direction, Long hostId, String exportName, String volumePath, String checkpointId) { + StartNBDServerAnswer nbdServerAnswer; + if (hostId == null) { + throw new CloudRuntimeException("Host cannot be determined for starting NBD server"); + } + HostVO host = hostDao.findById(hostId); + if (host == null) { + throw new CloudRuntimeException("Host cannot be found for starting NBD server with ID: " + hostId); + } + StartNBDServerCommand nbdServerCmd = new StartNBDServerCommand( + transferId, + exportName, + volumePath, + transferId, + direction, + checkpointId + ); + try { + nbdServerAnswer = (StartNBDServerAnswer) agentManager.send(hostId, nbdServerCmd); + } catch (AgentUnavailableException | OperationTimedoutException e) { + throw new CloudRuntimeException("Failed to communicate with agent", e); + } + if (!nbdServerAnswer.getResult()) { + throw new CloudRuntimeException("Failed to start the NBD server"); + } + } + + private String getVolumePathPrefix(StoragePoolVO storagePool) { + if (ScopeType.HOST.equals(storagePool.getScope())) { + return storagePool.getPath(); + } + switch (storagePool.getPoolType()) { + case NetworkFilesystem: + return String.format("/mnt/%s", storagePool.getUuid()); + case SharedMountPoint: + return storagePool.getPath(); + default: + throw new CloudRuntimeException("Unsupported storage pool type for file based image transfer: " + storagePool.getPoolType()); + } + } + + private String getVolumePathForFileBasedBackend(Volume volume) { + StoragePoolVO storagePool = primaryDataStoreDao.findById(volume.getPoolId()); + String volumePathPrefix = getVolumePathPrefix(storagePool); + return volumePathPrefix + "/" + volume.getPath(); + } + + private ImageTransferVO createUploadImageTransfer(VolumeVO volume, ImageTransfer.Backend backend) { + final String direction = ImageTransfer.Direction.upload.toString(); + String transferId = UUID.randomUUID().toString(); + + Long poolId = volume.getPoolId(); + StoragePoolVO storagePool = poolId == null ? null : primaryDataStoreDao.findById(poolId); + if (storagePool == null) { + throw new CloudRuntimeException("Storage pool cannot be determined for volume: " + volume.getUuid()); + } + + Host host = getRandomHostFromStoragePool(storagePool); + String volumePath = getVolumePathForFileBasedBackend(volume); + int idleTimeoutSec = ImageTransferIdleTimeoutSeconds.valueIn(host.getDataCenterId()); + + ImageTransferVO imageTransfer; + CreateImageTransferCommand transferCmd; + if (backend.equals(ImageTransfer.Backend.file)) { + imageTransfer = new ImageTransferVO( + transferId, + volume.getId(), + host.getId(), + volumePath, + ImageTransferVO.Phase.transferring, + ImageTransfer.Direction.upload, + volume.getAccountId(), + volume.getDomainId(), + volume.getDataCenterId()); + + transferCmd = new CreateImageTransferCommand( + transferId, + direction, + transferId, + volumePath, + idleTimeoutSec); + + } else { + startNBDServer(transferId, direction, host.getId(), volume.getUuid(), volumePath, null); + imageTransfer = new ImageTransferVO( + transferId, + null, + volume.getId(), + host.getId(), + transferId, + ImageTransferVO.Phase.transferring, + ImageTransfer.Direction.upload, + volume.getAccountId(), + volume.getDomainId(), + volume.getDataCenterId()); + + transferCmd = new CreateImageTransferCommand( + transferId, + direction, + volume.getUuid(), + transferId, + null, + idleTimeoutSec); + } + CreateImageTransferAnswer transferAnswer; + try { + transferAnswer = (CreateImageTransferAnswer) agentManager.send(imageTransfer.getHostId(), transferCmd); + } catch (AgentUnavailableException | OperationTimedoutException e) { + throw new CloudRuntimeException("Failed to communicate with agent", e); + } + + if (!transferAnswer.getResult()) { + if (!backend.equals(ImageTransfer.Backend.file)) { + stopNBDServer(imageTransfer); + } + throw new CloudRuntimeException("Failed to create image transfer: " + transferAnswer.getDetails()); + } + + imageTransfer.setTransferUrl(transferAnswer.getTransferUrl()); + imageTransfer.setSignedTicketId(transferAnswer.getImageTransferId()); + imageTransfer = imageTransferDao.persist(imageTransfer); + return imageTransfer; + + } + + private ImageTransfer.Backend getImageTransferBackend(ImageTransfer.Format format, ImageTransfer.Direction direction) { + if (ImageTransfer.Format.cow.equals(format)) { + if (ImageTransfer.Direction.download.equals(direction)) { + logger.debug("Using NBD backend for download"); + return ImageTransfer.Backend.nbd; + } + return ImageTransfer.Backend.file; + } else { + return ImageTransfer.Backend.nbd; + } + } + + @Override + public ImageTransferResponse createImageTransfer(CreateImageTransferCmd cmd) { + ImageTransfer imageTransfer = createImageTransfer(cmd.getVolumeId(), cmd.getBackupId(), cmd.getDirection(), cmd.getFormat()); + if (imageTransfer instanceof ImageTransferVO) { + ImageTransferVO imageTransferVO = (ImageTransferVO) imageTransfer; + return toImageTransferResponse(imageTransferVO); + } + return toImageTransferResponse(imageTransferDao.findById(imageTransfer.getId())); + } + + @Override + public ImageTransfer createImageTransfer(long volumeId, Long backupId, ImageTransfer.Direction direction, ImageTransfer.Format format) { + User callingUser = CallContext.current().getCallingUser(); + ImageTransfer imageTransfer; + VolumeVO volume = volumeDao.findById(volumeId); + accountService.checkAccess(callingUser, volume); + + if (volume == null) { + throw new CloudRuntimeException("Volume not found with the specified Id"); + } + + if (!isKVMBackupExportServiceSupported(volume.getDataCenterId())) { + throw new CloudRuntimeException("Veeam-KVM integration can not be used along with the " + BackupProviderPlugin.valueIn(volume.getDataCenterId()) + + " backup provider. Either set backup.framework.enabled to false or set the Zone level config backup.framework.provider.plugin to \"dummy\"."); + } + + ImageTransferVO existingTransfer = imageTransferDao.findUnfinishedByVolume(volume.getId()); + if (existingTransfer != null) { + throw new CloudRuntimeException("Image transfer already in progress for volume: " + volume.getUuid()); + } + + ImageTransfer.Backend backend = getImageTransferBackend(format, direction); + if (ImageTransfer.Direction.upload.equals(direction)) { + imageTransfer = createUploadImageTransfer(volume, backend); + } else if (ImageTransfer.Direction.download.equals(direction)) { + imageTransfer = createDownloadImageTransfer(backupId, volume, backend); + } else { + throw new CloudRuntimeException("Invalid direction: " + direction); + } + + return imageTransferDao.findById(imageTransfer.getId()); + } + + @Override + public boolean cancelImageTransfer(long imageTransferId) { + finalizeImageTransfer(imageTransferId); + return true; + } + + private void finalizeDownloadImageTransfer(ImageTransferVO imageTransfer) { + + String transferId = imageTransfer.getUuid(); + FinalizeImageTransferCommand finalizeCmd = new FinalizeImageTransferCommand(transferId); + + BackupVO backup = backupDao.findById(imageTransfer.getBackupId()); + + Answer answer; + try { + answer = agentManager.send(backup.getHostId(), finalizeCmd); + } catch (AgentUnavailableException | OperationTimedoutException e) { + throw new CloudRuntimeException("Failed to communicate with agent", e); + } + + if (!answer.getResult()) { + throw new CloudRuntimeException("Failed to finalize image transfer: " + answer.getDetails()); + } + + VMInstanceVO vm = vmInstanceDao.findById(backup.getVmId()); + if (vm.getState() == State.Stopped) { + boolean stopNbdServerResult = stopNBDServer(imageTransfer); + if (!stopNbdServerResult) { + throw new CloudRuntimeException("Failed to stop the nbd server"); + } + } + } + + private boolean stopNBDServer(ImageTransferVO imageTransfer) { + String transferId = imageTransfer.getUuid(); + String direction = imageTransfer.getDirection().toString(); + StopNBDServerCommand stopNbdServerCommand = new StopNBDServerCommand(transferId, direction); + Answer answer; + try { + answer = agentManager.send(imageTransfer.getHostId(), stopNbdServerCommand); + } catch (AgentUnavailableException | OperationTimedoutException e) { + logger.error("Failed to stop NBD server on image transfer finalization", e); + return false; + } + return answer.getResult(); + } + + private void finalizeUploadImageTransfer(ImageTransferVO imageTransfer) { + String transferId = imageTransfer.getUuid(); + + boolean stopNbdServerResult = stopNBDServer(imageTransfer); + if (!stopNbdServerResult) { + throw new CloudRuntimeException("Failed to stop the nbd server"); + } + + FinalizeImageTransferCommand finalizeCmd = new FinalizeImageTransferCommand(transferId); + Answer answer; + try { + answer = agentManager.send(imageTransfer.getHostId(), finalizeCmd); + } catch (AgentUnavailableException | OperationTimedoutException e) { + throw new CloudRuntimeException("Failed to communicate with agent", e); + } + + if (!answer.getResult()) { + throw new CloudRuntimeException("Failed to finalize image transfer: " + answer.getDetails()); + } + } + + @Override + public boolean finalizeImageTransfer(FinalizeImageTransferCmd cmd) { + return finalizeImageTransfer(cmd.getImageTransferId()); + } + + @Override + public boolean finalizeImageTransfer(final long imageTransferId) { + ImageTransferVO imageTransfer = imageTransferDao.findById(imageTransferId); + if (imageTransfer == null) { + throw new CloudRuntimeException("Image transfer not found: " + imageTransferId); + } + + if (imageTransfer.getDirection().equals(ImageTransfer.Direction.download)) { + finalizeDownloadImageTransfer(imageTransfer); + } else { + finalizeUploadImageTransfer(imageTransfer); + } + imageTransfer.setPhase(ImageTransferVO.Phase.finished); + imageTransferDao.update(imageTransfer.getId(), imageTransfer); + imageTransferDao.remove(imageTransfer.getId()); + return true; + } + + @Override + public List listImageTransfers(ListImageTransfersCmd cmd) { + Long id = cmd.getId(); + Long backupId = cmd.getBackupId(); + + List transfers; + if (id != null) { + transfers = List.of(imageTransferDao.findById(id)); + } else if (backupId != null) { + transfers = imageTransferDao.listByBackupId(backupId); + } else { + transfers = imageTransferDao.listAll(); + } + + return transfers.stream().map(this::toImageTransferResponse).collect(Collectors.toList()); + } + + private CheckpointResponse createCheckpointResponse(String checkpointId, String createTime, boolean isActive) { + CheckpointResponse response = new CheckpointResponse(); + response.setObjectName("checkpoint"); + response.setId(checkpointId); + Long createTimeSeconds = createTime != null ? NumbersUtil.parseLong(createTime, 0L) : 0L; + response.setCreated(Date.from(Instant.ofEpochSecond(createTimeSeconds))); + response.setIsActive(isActive); + return response; + } + + @Override + public List listVmCheckpoints(ListVmCheckpointsCmd cmd) { + Long vmId = cmd.getVmId(); + VMInstanceVO vm = vmInstanceDao.findById(vmId); + if (vm == null) { + throw new CloudRuntimeException("VM not found: " + vmId); + } + List responses = new ArrayList<>(); + + Map details = vmInstanceDetailsDao.listDetailsKeyPairs(vmId); + String activeCheckpointId = details.get(VmDetailConstants.ACTIVE_CHECKPOINT_ID); + if (activeCheckpointId != null) { + responses.add(createCheckpointResponse(activeCheckpointId, details.get(VmDetailConstants.ACTIVE_CHECKPOINT_CREATE_TIME), true)); + } + String lastCheckpointId = details.get(VmDetailConstants.LAST_CHECKPOINT_ID); + if (lastCheckpointId != null) { + responses.add(createCheckpointResponse(lastCheckpointId, details.get(VmDetailConstants.LAST_CHECKPOINT_CREATE_TIME), false)); + } + return responses; + } + + private void sendDeleteCheckpointCommand(VMInstanceVO vm, String checkpointId) { + Long hostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId(); + + Map diskPathUuidMap = new HashMap<>(); + if (vm.getState() == State.Stopped) { + List volumes = volumeDao.findByInstance(vm.getId()); + for (Volume vol : volumes) { + diskPathUuidMap.put(getVolumePathForFileBasedBackend(vol), vol.getUuid()); + } + } + + DeleteVmCheckpointCommand deleteCmd = new DeleteVmCheckpointCommand( + vm.getInstanceName(), + checkpointId, + diskPathUuidMap, + vm.getState() == State.Stopped); + + Answer answer; + try { + answer = agentManager.send(hostId, deleteCmd); + } catch (AgentUnavailableException | OperationTimedoutException e) { + logger.error("Failed to communicate with agent to delete checkpoint for VM {}", vm.getId(), e); + throw new CloudRuntimeException("Failed to communicate with agent", e); + } + + if (answer == null || !answer.getResult()) { + String err = answer != null ? answer.getDetails() : "null answer"; + throw new CloudRuntimeException("Failed to delete checkpoint: " + err); + } + } + + @Override + public boolean deleteVmCheckpoint(DeleteVmCheckpointCmd cmd) { + VMInstanceVO vm = vmInstanceDao.findById(cmd.getVmId()); + if (vm == null) { + throw new CloudRuntimeException("VM not found: " + cmd.getVmId()); + } + if (!isKVMBackupExportServiceSupported(vm.getDataCenterId())) { + throw new CloudRuntimeException("Veeam-KVM integration can not be used along with the " + BackupProviderPlugin.valueIn(vm.getDataCenterId()) + + " backup provider. Either set backup.framework.enabled to false or set the Zone level config backup.framework.provider.plugin to \"dummy\"."); + } + + if (vm.getState() != State.Running && vm.getState() != State.Stopped) { + throw new CloudRuntimeException("VM must be running or stopped to delete checkpoint"); + } + + long vmId = cmd.getVmId(); + Map details = vmInstanceDetailsDao.listDetailsKeyPairs(vmId); + String activeCheckpointId = details.get(VmDetailConstants.ACTIVE_CHECKPOINT_ID); + if (activeCheckpointId == null || !activeCheckpointId.equals(cmd.getCheckpointId())) { + logger.error("Checkpoint ID {} to delete does not match active checkpoint ID for VM {}", cmd.getCheckpointId(), vmId); + return true; + } + + sendDeleteCheckpointCommand(vm, activeCheckpointId); + revertVmCheckpointDetailsAfterActiveDelete(vmId, details); + + return true; + } + + private void revertVmCheckpointDetailsAfterActiveDelete(long vmId, Map detailsBeforeDelete) { + String lastId = detailsBeforeDelete.get(VmDetailConstants.LAST_CHECKPOINT_ID); + String lastTime = detailsBeforeDelete.get(VmDetailConstants.LAST_CHECKPOINT_CREATE_TIME); + if (lastId != null) { + vmInstanceDetailsDao.addDetail(vmId, VmDetailConstants.ACTIVE_CHECKPOINT_ID, lastId, false); + vmInstanceDetailsDao.addDetail(vmId, VmDetailConstants.ACTIVE_CHECKPOINT_CREATE_TIME, lastTime, false); + vmInstanceDetailsDao.removeDetail(vmId, VmDetailConstants.LAST_CHECKPOINT_ID); + vmInstanceDetailsDao.removeDetail(vmId, VmDetailConstants.LAST_CHECKPOINT_CREATE_TIME); + } else { + vmInstanceDetailsDao.removeDetail(vmId, VmDetailConstants.ACTIVE_CHECKPOINT_ID); + vmInstanceDetailsDao.removeDetail(vmId, VmDetailConstants.ACTIVE_CHECKPOINT_CREATE_TIME); + } + } + + @Override + public List> getCommands() { + List> cmdList = new ArrayList<>(); + if (ExposeKVMBackupExportServiceApis.value()) { + cmdList.add(StartBackupCmd.class); + cmdList.add(FinalizeBackupCmd.class); + cmdList.add(CreateImageTransferCmd.class); + cmdList.add(FinalizeImageTransferCmd.class); + cmdList.add(ListImageTransfersCmd.class); + cmdList.add(ListVmCheckpointsCmd.class); + cmdList.add(DeleteVmCheckpointCmd.class); + } + return cmdList; + } + + private ImageTransferResponse toImageTransferResponse(ImageTransferVO imageTransferVO) { + ImageTransferResponse response = new ImageTransferResponse(); + response.setId(imageTransferVO.getUuid()); + Long backupId = imageTransferVO.getBackupId(); + if (backupId != null) { + Backup backup = backupDao.findByIdIncludingRemoved(backupId); + response.setBackupId(backup.getUuid()); + } + Long volumeId = imageTransferVO.getDiskId(); + Volume volume = volumeDao.findByIdIncludingRemoved(volumeId); + response.setDiskId(volume.getUuid()); + response.setTransferUrl(imageTransferVO.getTransferUrl()); + response.setPhase(imageTransferVO.getPhase().toString()); + response.setProgress(imageTransferVO.getProgress()); + response.setDirection(imageTransferVO.getDirection().toString()); + response.setCreated(imageTransferVO.getCreated()); + return response; + } + + @Override + public boolean start() { + final TimerTask imageTransferPollTask = new ManagedContextTimerTask() { + @Override + protected void runInContext() { + try { + pollImageTransferProgress(); + } catch (final Throwable t) { + logger.warn("Catch throwable in image transfer poll task ", t); + } + } + }; + + imageTransferTimer = new Timer("ImageTransferPollTask"); + long pollingInterval = ImageTransferPollingInterval.value() * 1000L; + imageTransferTimer.schedule(imageTransferPollTask, pollingInterval, pollingInterval); + return true; + } + + @Override + public boolean stop() { + if (imageTransferTimer != null) { + imageTransferTimer.cancel(); + imageTransferTimer = null; + } + return true; + } + + @ReflectionUse + public Pair orchestrateWaitForBackupFinalize(VmWorkWaitForBackupFinalize work) { + return waitForBackupTerminalState(work.getBackupId()); + } + + @Override + public Pair handleVmWorkJob(VmWork work) throws Exception { + return jobHandlerProxy.handleVmWorkJob(work); + } + + protected Pair waitForBackupTerminalState(final long backupId) { + while (true) { + final BackupVO backup = backupDao.findByIdIncludingRemoved(backupId); + if (backup == null) { + RuntimeException ex = new CloudRuntimeException(String.format("Backup %d not found while waiting for finalize", backupId)); + return new Pair<>(JobInfo.Status.FAILED, asyncJobManager.marshallResultObject(ex)); + } + + if (backup.getStatus() == Backup.Status.BackedUp) { + return new Pair<>(JobInfo.Status.SUCCEEDED, asyncJobManager.marshallResultObject(backup.getId())); + } + + if (backup.getStatus() == Backup.Status.Failed || backup.getStatus() == Backup.Status.Error) { + RuntimeException ex = new CloudRuntimeException(String.format("Backup %d reached terminal failure state: %s", backupId, backup.getStatus())); + return new Pair<>(JobInfo.Status.FAILED, asyncJobManager.marshallResultObject(ex)); + } + logger.debug("{} is not in a terminal state, current state: {}, waiting {}ms to check again", + backup, backup.getStatus(), BACKUP_FINALIZE_WAIT_CHECK_INTERVAL); + try { + Thread.sleep(BACKUP_FINALIZE_WAIT_CHECK_INTERVAL); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + RuntimeException ex = new CloudRuntimeException(String.format("Interrupted while waiting for backup %d finalize", backupId), e); + return new Pair<>(JobInfo.Status.FAILED, asyncJobManager.marshallResultObject(ex)); + } + } + } + + private void pollImageTransferProgress() { + try { + List transferringTransfers = imageTransferDao.listByPhaseAndDirection( + ImageTransfer.Phase.transferring, ImageTransfer.Direction.upload); + if (transferringTransfers == null || transferringTransfers.isEmpty()) { + return; + } + + Map> transfersByHost = transferringTransfers.stream() + .collect(Collectors.groupingBy(ImageTransferVO::getHostId)); + Map transferVolumeMap = new HashMap<>(); + + for (Map.Entry> entry : transfersByHost.entrySet()) { + Long hostId = entry.getKey(); + List hostTransfers = entry.getValue(); + + try { + List transferIds = new ArrayList<>(); + Map volumePaths = new HashMap<>(); + Map volumeSizes = new HashMap<>(); + + for (ImageTransferVO transfer : hostTransfers) { + VolumeVO volume = volumeDao.findById(transfer.getDiskId()); + if (volume == null) { + logger.warn("Volume not found for image transfer: {}", transfer.getUuid()); + imageTransferDao.remove(transfer.getId()); // ToDo: confirm if this enough? + continue; + } + transferVolumeMap.put(transfer.getId(), volume); + + String transferId = transfer.getUuid(); + transferIds.add(transferId); + + if (volume.getPath() == null) { + logger.warn("Volume path is null for image transfer: {}", transfer.getUuid()); + continue; + } + String volumePath = getVolumePathForFileBasedBackend(volume); + volumePaths.put(transferId, volumePath); + volumeSizes.put(transferId, volume.getSize()); + } + + if (transferIds.isEmpty()) { + continue; + } + + GetImageTransferProgressCommand cmd = new GetImageTransferProgressCommand(transferIds, volumePaths, volumeSizes); + GetImageTransferProgressAnswer answer = (GetImageTransferProgressAnswer) agentManager.send(hostId, cmd); + + if (answer == null || !answer.getResult() || MapUtils.isEmpty(answer.getProgressMap())) { + logger.warn("Failed to get progress for transfers on host {}: {}", hostId, + answer != null ? answer.getDetails() : "null answer"); + return; // ToDo: return on continue? + } + for (ImageTransferVO transfer : hostTransfers) { + String transferId = transfer.getUuid(); + Long currentSize = answer.getProgressMap().get(transferId); + if (currentSize == null) { + continue; + } + VolumeVO volume = transferVolumeMap.get(transfer.getId()); + long totalSize = getVolumeTotalSize(volume); + int progress = Math.max((int)((currentSize * 100) / totalSize), 100); + transfer.setProgress(progress); + if (currentSize >= 100) { + transfer.setPhase(ImageTransfer.Phase.finished); + logger.debug("Updated phase for image transfer {} to finished", transferId); + } + imageTransferDao.update(transfer.getId(), transfer); + logger.debug("Updated progress for image transfer {}: {}%", transferId, progress); + } + + } catch (AgentUnavailableException | OperationTimedoutException e) { + logger.warn("Failed to communicate with host {} for image transfer progress", hostId); + } catch (Exception e) { + logger.error("Error polling image transfer progress for host " + hostId, e); + } + } + + } catch (Exception e) { + logger.error("Error in pollImageTransferProgress", e); + } + } + + private long getVolumeTotalSize(VolumeVO volume) { + VolumeDetailVO detail = volumeDetailsDao.findDetail(volume.getId(), ApiConstants.VIRTUAL_SIZE); + if (detail != null) { + long size = NumbersUtil.parseLong(detail.getValue(), 0L); + if (size > 0) { + return size; + } + } + ApiDBUtils.getVolumeStatistics(volume.getPath()); + VolumeStats vs = null; + if (List.of(Storage.ImageFormat.VHD, Storage.ImageFormat.QCOW2, Storage.ImageFormat.RAW).contains(volume.getFormat())) { + if (volume.getPath() != null) { + vs = ApiDBUtils.getVolumeStatistics(volume.getPath()); + } + } else if (volume.getFormat() == Storage.ImageFormat.OVA) { + if (volume.getChainInfo() != null) { + vs = ApiDBUtils.getVolumeStatistics(volume.getChainInfo()); + } + } + if (vs != null && vs.getPhysicalSize() > 0) { + return vs.getPhysicalSize(); + } + return volume.getSize(); + } + + @Override + public String getConfigComponentName() { + return KVMBackupExportService.class.getSimpleName(); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[]{ + ImageTransferPollingInterval, + ImageTransferIdleTimeoutSeconds, + ExposeKVMBackupExportServiceApis + }; + } +} diff --git a/server/src/main/java/org/apache/cloudstack/backup/VmWorkWaitForBackupFinalize.java b/server/src/main/java/org/apache/cloudstack/backup/VmWorkWaitForBackupFinalize.java new file mode 100644 index 000000000000..ac64b47aa3ea --- /dev/null +++ b/server/src/main/java/org/apache/cloudstack/backup/VmWorkWaitForBackupFinalize.java @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.backup; + +import com.cloud.vm.VmWork; + +public class VmWorkWaitForBackupFinalize extends VmWork { + private static final long serialVersionUID = 2209426364298601717L; + + private final long backupId; + + public VmWorkWaitForBackupFinalize(long userId, long accountId, long vmId, String handlerName, long backupId) { + super(userId, accountId, vmId, handlerName); + this.backupId = backupId; + } + + public long getBackupId() { + return backupId; + } +} diff --git a/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml b/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml index 37d32c0f3905..48fe5bb415df 100644 --- a/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml +++ b/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml @@ -347,6 +347,8 @@ + + diff --git a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java index 0575b430ef10..e014ad72cfc0 100644 --- a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java +++ b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java @@ -597,26 +597,22 @@ public void testTakeSnapshotF2() throws ResourceAllocationException { @Test public void testNullGetVolumeNameFromCmd() { - when(createVol.getVolumeName()).thenReturn(null); - Assert.assertNotNull(volumeApiServiceImpl.getVolumeNameFromCommand(createVol)); + Assert.assertNotNull(volumeApiServiceImpl.getVolumeNameFromCommand(null)); } @Test public void testEmptyGetVolumeNameFromCmd() { - when(createVol.getVolumeName()).thenReturn(""); - Assert.assertNotNull(volumeApiServiceImpl.getVolumeNameFromCommand(createVol)); + Assert.assertNotNull(volumeApiServiceImpl.getVolumeNameFromCommand("")); } @Test public void testBlankGetVolumeNameFromCmd() { - when(createVol.getVolumeName()).thenReturn(" "); - Assert.assertNotNull(volumeApiServiceImpl.getVolumeNameFromCommand(createVol)); + Assert.assertNotNull(volumeApiServiceImpl.getVolumeNameFromCommand(" ")); } @Test public void testNonEmptyGetVolumeNameFromCmd() { - when(createVol.getVolumeName()).thenReturn("abc"); - Assert.assertSame(volumeApiServiceImpl.getVolumeNameFromCommand(createVol), "abc"); + Assert.assertSame(volumeApiServiceImpl.getVolumeNameFromCommand("abc"), "abc"); } @Test diff --git a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java index 1a38c1b0a06f..cd102a07ee5c 100644 --- a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java +++ b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java @@ -846,9 +846,7 @@ public void configureCustomRootDiskSizeTestEmptyParametersAndOfferingRootSize() private void prepareAndRunConfigureCustomRootDiskSizeTest(Map customParameters, long expectedRootDiskSize, int timesVerifyIfHypervisorSupports, Long offeringRootDiskSize) { VMTemplateVO template = Mockito.mock(VMTemplateVO.class); - Mockito.when(template.getId()).thenReturn(1l); Mockito.when(template.getSize()).thenReturn(99L * GiB_TO_BYTES); - Mockito.when(templateDao.findById(Mockito.anyLong())).thenReturn(template); DiskOfferingVO diskfferingVo = Mockito.mock(DiskOfferingVO.class); diff --git a/server/src/test/java/com/cloud/vpc/dao/MockNetworkDaoImpl.java b/server/src/test/java/com/cloud/vpc/dao/MockNetworkDaoImpl.java index 8a0bec56df7b..adcbc5d01734 100644 --- a/server/src/test/java/com/cloud/vpc/dao/MockNetworkDaoImpl.java +++ b/server/src/test/java/com/cloud/vpc/dao/MockNetworkDaoImpl.java @@ -16,6 +16,11 @@ // under the License. package com.cloud.vpc.dao; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; + import com.cloud.network.Network; import com.cloud.network.Network.GuestType; import com.cloud.network.Networks.TrafficType; @@ -23,14 +28,10 @@ import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkVO; import com.cloud.utils.db.DB; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Set; - @DB() public class MockNetworkDaoImpl extends GenericDaoBase implements NetworkDao { @@ -160,11 +161,22 @@ public boolean update(final Long networkId, final NetworkVO network, final Map listByZoneAndTrafficType(final long zoneId, final TrafficType trafficType, Filter filter) { + return null; + } + @Override public List listByZoneAndTrafficType(final long zoneId, final TrafficType trafficType) { return null; } + @Override + public List listByTrafficTypeAndOwners(final TrafficType trafficType, List accountIds, + List domainIds, Filter filter) { + return null; + } + @Override public void setCheckForGc(final long networkId) { } diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index 8dd2fa23169b..db95a58f222f 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -54,7 +54,6 @@ import javax.naming.ConfigurationException; -import com.cloud.agent.api.ConvertSnapshotCommand; import org.apache.cloudstack.framework.security.keystore.KeystoreManager; import org.apache.cloudstack.storage.NfsMountManagerImpl.PathParser; import org.apache.cloudstack.storage.command.CopyCmdAnswer; @@ -97,8 +96,8 @@ import org.apache.http.client.methods.HttpGet; import org.apache.http.client.utils.URLEncodedUtils; import org.apache.http.impl.client.DefaultHttpClient; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.joda.time.DateTime; import org.joda.time.format.ISODateTimeFormat; @@ -108,6 +107,7 @@ import com.cloud.agent.api.CheckHealthCommand; import com.cloud.agent.api.Command; import com.cloud.agent.api.ComputeChecksumCommand; +import com.cloud.agent.api.ConvertSnapshotCommand; import com.cloud.agent.api.DeleteSnapshotsDirCommand; import com.cloud.agent.api.GetStorageStatsAnswer; import com.cloud.agent.api.GetStorageStatsCommand; diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index 292f52d809bf..9c521caf1f4c 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -223,6 +223,15 @@ 'Management': 'Management', 'Backup' : 'Backup and Recovery', 'Restore' : 'Backup and Recovery', + 'startBackup' : 'Backup and Recovery', + 'finalizeBackup' : 'Backup and Recovery', + 'createImageTransfer' : 'Backup and Recovery', + 'finalizeImageTransfer' : 'Backup and Recovery', + 'listImageTransfers' : 'Backup and Recovery', + 'listVmCheckpoints' : 'Backup and Recovery', + 'deleteVmCheckpoint' : 'Backup and Recovery', + 'ImageTransfer' : 'Backup and Recovery', + 'VmCheckpoint' : 'Backup and Recovery', 'UnmanagedInstance': 'Virtual Machine', 'KubernetesSupportedVersion': 'Kubernetes Service', 'KubernetesCluster': 'Kubernetes Service', diff --git a/utils/src/main/java/org/apache/cloudstack/utils/server/ServerPropertiesUtil.java b/utils/src/main/java/org/apache/cloudstack/utils/server/ServerPropertiesUtil.java index 14d24dbb6410..52642cf03707 100644 --- a/utils/src/main/java/org/apache/cloudstack/utils/server/ServerPropertiesUtil.java +++ b/utils/src/main/java/org/apache/cloudstack/utils/server/ServerPropertiesUtil.java @@ -30,8 +30,11 @@ public class ServerPropertiesUtil { private static final Logger logger = LoggerFactory.getLogger(ServerPropertiesUtil.class); + protected static final String PROPERTIES_FILE = "server.properties"; protected static final AtomicReference propertiesRef = new AtomicReference<>(); + protected static final String KEYSTORE_FILE = "https.keystore"; + protected static final String KEYSTORE_PASSWORD = "https.keystore.password"; public static String getProperty(String name) { Properties props = propertiesRef.get(); @@ -55,4 +58,12 @@ public static String getProperty(String name) { } return tempProps.getProperty(name); } + + public static String getKeystoreFile() { + return getProperty(KEYSTORE_FILE); + } + + public static String getKeystorePassword() { + return getProperty(KEYSTORE_PASSWORD); + } }