diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 000000000000..18b43dea030c
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,384 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+image: khos2ow/ci-cd-tools:latest
+
+# Define CI stages
+stages:
+ - test
+ - archive
+ - integration
+ - deploy
+
+# Global Variables
+variables:
+ GIT_DEPTH: "40"
+ MAVEN_OPTS: '-Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=INFO
+ -Dorg.slf4j.simpleLogger.showDateTime=true
+ -Djava.awt.headless=true
+ -Dmaven.repo.local=/root/.m2/repository'
+
+.load_rpm_swift_function: &LOAD_REPO_SWIFT |
+ function load_swift() {
+ export OS_USERNAME=cloudops-pdion
+ export OS_TENANT_NAME=cloudops-jenkins-swift
+ export OS_PASSWORD=Vr8ySJw8PosU6s1V
+ export OS_AUTH_URL=https://auth-qc.cloud.ca/v2.0
+ export OS_REGION_NAME=east
+ }
+
+.load_template_swift_function: &LOAD_TEMPLATE_SWIFT |
+ function load_swift() {
+ export OS_USERNAME=cloudops-pdion
+ export OS_TENANT_NAME=cloudops-jenkins
+ export OS_PASSWORD=Vr8ySJw8PosU6s1V
+ export OS_AUTH_URL=https://auth-qc.cloud.ca/v2.0
+ export OS_REGION_NAME=east
+ }
+
+.yum_repo_path_function: &YUM_REPO_PATH |
+ function yum_repo_path() {
+ if [ "${CI_PROJECT_NAMESPACE}" = "dev" ]; then
+ if [ -n "${CI_COMMIT_TAG}" ]; then
+ local version=$(cd ${CI_PROJECT_DIR}; mvn -q -Dexec.executable="echo" -Dexec.args='${project.version}' --non-recursive org.codehaus.mojo:exec-maven-plugin:1.3.1:exec)
+ local major_version=`echo ${version} | cut -d. -f1`.`echo ${version} | cut -d. -f2`
+ local os_target="centos7"
+
+ case "$version" in
+ *"-SNAPSHOT") local stable_prefix="unstable" ;;
+ *) local stable_prefix="stable" ;;
+ esac
+
+ echo "${stable_prefix}/${major_version}/${os_target}"
+ elif [ "${CI_COMMIT_REF_NAME}" = "cca_4.12" -o "${CI_COMMIT_REF_NAME}" = "cca_4.11" -o "${CI_COMMIT_REF_NAME}" = "cca_4.10" ]; then
+ local version=$(cd ${CI_PROJECT_DIR}; mvn -q -Dexec.executable="echo" -Dexec.args='${project.version}' --non-recursive org.codehaus.mojo:exec-maven-plugin:1.3.1:exec)
+ local major_version=`echo ${version} | cut -d. -f1`.`echo ${version} | cut -d. -f2`
+ local os_target="centos7"
+
+ case "$version" in
+ *"-SNAPSHOT") local stable_prefix="unstable" ;;
+ *) local stable_prefix="stable" ;;
+ esac
+
+ echo "${stable_prefix}/${major_version}/${os_target}"
+ else
+ echo "dev/${CI_COMMIT_REF_NAME}"
+ fi
+ else
+ echo "dev/${CI_COMMIT_REF_NAME}"
+ fi
+ }
+
+.template_path_function: &TEMPLATE_PATH |
+ function template_path() {
+ if [ "${CI_PROJECT_NAMESPACE}" = "dev" ]; then
+ if [ "${CI_COMMIT_REF_NAME}" = "cca_4.12" -o "${CI_COMMIT_REF_NAME}" = "cca_4.11" -o "${CI_COMMIT_REF_NAME}" = "cca_4.10" ]; then
+ local version=$(cd ${CI_PROJECT_DIR}; mvn -q -Dexec.executable="echo" -Dexec.args='${project.version}' --non-recursive org.codehaus.mojo:exec-maven-plugin:1.3.1:exec)
+ local major_version=`echo ${version} | cut -d. -f1`.`echo ${version} | cut -d. -f2`
+
+ case "$version" in
+ *"-SNAPSHOT") local stable_prefix="unstable" ;;
+ *) local stable_prefix="stable" ;;
+ esac
+
+ echo "${stable_prefix}/${major_version}"
+ else
+ echo "dev/${CI_COMMIT_REF_NAME}"
+ fi
+ else
+ echo "dev/${CI_COMMIT_REF_NAME}"
+ fi
+ }
+
+# RAT checks
+Audit:
+ image:
+ name: khos2ow/cloudstack-rpm-builder:centos7
+ entrypoint: ["/bin/bash", "-l", "-c"]
+ stage: test
+ before_script:
+ - environment-info.sh
+ script:
+ - mvn --activate-profiles developer,systemvm -Dsimulator --projects='org.apache.cloudstack:cloudstack' clean org.apache.rat:apache-rat-plugin:0.12:check
+ artifacts:
+ name: "audit_report_${CI_BUILD_REF_SLUG}"
+ paths:
+ - "target/rat.txt"
+ when: on_failure
+ expire_in: 1 day
+ only:
+ - branches
+ tags:
+ - java
+
+# Archive RPMs to object storage
+RPM:
+ image:
+ name: khos2ow/cloudstack-rpm-builder:centos7
+ entrypoint: ["/bin/bash", "-l", "-c"]
+ stage: archive
+ before_script:
+ - environment-info.sh
+ - pip3 install python-swiftclient
+ - pip3 install python-keystoneclient
+ - *LOAD_REPO_SWIFT
+ - *YUM_REPO_PATH
+ script:
+ # worksaround for SSLHandshakeException issue, https://github.com/apache/cloudstack/issues/2682#issuecomment-392973706
+ - cat ${CI_PROJECT_DIR}/client/conf/java.security.ciphers.in >> /usr/lib/jvm/java-1.8.0-openjdk/jre/lib/security/java.security
+
+ # download required vhd-util file, if it doesn't exist
+ - wget http://download.cloudstack.org/tools/vhd-util --directory-prefix=${CI_PROJECT_DIR}/scripts/vm/hypervisor/xenserver
+
+ # do the packaging and creating RPMs
+ - /usr/local/bin/docker-entrypoint.sh --workspace-path ${CI_PROJECT_DIR} --distribution centos7 --use-timestamp
+
+ # upload RPMs to swift object storage
+ - |
+ load_swift
+
+ swift_path=`yum_repo_path`
+ namespace="cloudstack"
+
+ # upload to swift
+ swift post ${namespace} -r '.r:*,.rlistings'
+ swift upload ${namespace} --object-name ${swift_path} ${CI_PROJECT_DIR}/dist/rpmbuild/RPMS
+
+ mkdir -p ${CI_PROJECT_DIR}/target/rpms
+
+ # fix repo metadata
+ if [ -n "${swift_path}" ]; then
+ swift download ${namespace} --prefix ${swift_path} --output-dir=${CI_PROJECT_DIR}/target/rpms
+ createrepo --update ${CI_PROJECT_DIR}/target/rpms/${swift_path}
+ swift delete ${namespace} --prefix ${swift_path}/repodata
+ swift upload ${namespace} --object-name ${swift_path}/repodata/ ${CI_PROJECT_DIR}/target/rpms/${swift_path}/repodata/
+ fi
+ artifacts:
+ name: "test_report_${CI_BUILD_REF_SLUG}"
+ paths:
+ - "*/target/surefire-reports"
+ - "*/*/target/surefire-reports"
+ - "*/*/*/target/surefire-reports"
+ - "*/*/*/*/target/surefire-reports"
+
+ - "*/target/checkstyle-result.xml"
+ - "*/*/target/checkstyle-result.xml"
+ - "*/*/*/target/checkstyle-result.xml"
+ - "*/*/*/*/target/checkstyle-result.xml"
+ when: on_failure
+ expire_in: 1 day
+ only:
+ - branches
+ tags:
+ - java
+
+# Archive SystemVM Template to object storage
+SysVM Template:
+ stage: archive
+ when: manual
+ variables:
+ MAVEN_OPTS: '-Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=INFO
+ -Dorg.slf4j.simpleLogger.showDateTime=true
+ -Djava.awt.headless=true
+ -Dmaven.repo.local=~/maven-repo/repository'
+ before_script:
+ - *LOAD_TEMPLATE_SWIFT
+ - *TEMPLATE_PATH
+ script:
+ - |
+ export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
+
+ # get the version before going any further
+ VERSION=$(mvn -q -Dexec.executable="echo" -Dexec.args='${project.version}' --non-recursive org.codehaus.mojo:exec-maven-plugin:1.3.1:exec)
+ TIMESTAMP=$(date +%s)
+
+ # clear out BUILD_NUMBER to not be shown in system vm template file name
+ BUILD_NUMBER=""
+ MAJOR_VERSION=`echo ${VERSION} | cut -d. -f1`.`echo ${VERSION} | cut -d. -f2`
+
+ cd ${CI_PROJECT_DIR}/tools/appliance
+
+ build_appliance="systemvmtemplate"
+ build_version=$(echo $VERSION | sed 's/\-SNAPSHOT/\-'${TIMESTAMP}'/g')
+ build_branch=
+
+ chmod +x build.sh
+ ./build.sh "$build_appliance" "$build_version" "$build_branch"
+
+ # upload templates to swift object storage
+ - |
+ load_swift
+
+ swift_path=`template_path`
+ namespace="systemvm"
+
+ # move file to end destination
+ mkdir -p ${CI_PROJECT_DIR}/target/templates
+ mv ${CI_PROJECT_DIR}/tools/appliance/dist/*-xen.vhd.bz2 ${CI_PROJECT_DIR}/target/templates
+ mv ${CI_PROJECT_DIR}/tools/appliance/dist/*-kvm.qcow2.bz2 ${CI_PROJECT_DIR}/target/templates
+
+ # create md5sum file
+ full_name=`basename ${CI_PROJECT_DIR}/target/templates/*.qcow2.bz2 | sed 's/\-kvm\.qcow2\.bz2//g'`
+ partial_name=$(echo ${full_name} | sed 's/systemvmtemplate\-//g' | sed 's/systemvm64template\-//g')
+ md5sum=$(cd ${CI_PROJECT_DIR}/target/templates; md5sum * > ${full_name}.md5sum; cat ${full_name}.md5sum)
+
+ # upload to swift
+ swift post ${namespace} -r '.r:*,.rlistings'
+ swift upload ${namespace} --object-name ${swift_path} ${CI_PROJECT_DIR}/target/templates | xargs -n1 swift stat -v ${namespace} | grep URL | sort | sed 's/URL\://g' | tr -d ' '
+ after_script:
+ - git clean -fdx
+ only:
+ - branches
+ tags:
+ - systemvm
+
+# Run integration tests against live build
+Integration Test:
+ stage: integration
+ when: manual
+ before_script:
+ - environment-info.sh
+ script:
+ - echo "TODO"
+ only:
+ - branches
+ except:
+ - master
+ - cca_4.10
+ - cca_4.11
+ - cca_4.12
+
+.deploy: &DEPLOY
+ stage: deploy
+ when: manual
+ before_script:
+ - environment-info.sh
+ - *YUM_REPO_PATH
+ script:
+ - |
+ mkdir -p ~/.ssh
+
+ echo "${SSH_PRIVATE_KEY}" > ~/.ssh/id_rsa
+ chmod 700 ~/.ssh/id_rsa
+
+ ssh-keyscan ${LAB_ENV_IP} >> ~/.ssh/known_hosts
+ chmod 644 ~/.ssh/known_hosts
+
+ export REPO_BASE="https://objects-east.cloud.ca/v1/a8286006ae394ede8bc081f586ae048d/cloudstack/"
+ export REPO_PATH="`yum_repo_path`"
+
+ ssh -T ${SSH_USER}@${LAB_ENV_IP} << EOF
+ sudo su -
+
+ set -e
+
+ # stop services
+ echo -e "stopping chef-client...\n"
+ systemctl stop chef-client
+
+ echo -e "stopping cloudstack-usage...\n"
+ systemctl stop cloudstack-usage
+
+ echo -e "stopping cloudstack-management...\n"
+ systemctl stop cloudstack-management
+
+ # update yum repo
+ echo -e "updating /etc/yum.repos.d/cloudstack.repo...\n"
+
+ sed -i "s|^baseurl=${REPO_BASE}.*|baseurl=${REPO_BASE}${REPO_PATH}|gI" /etc/yum.repos.d/cloudstack.repo
+ sed -i "s/^enabled=0/enabled=1/gI" /etc/yum.repos.d/cloudstack.repo
+
+ cat /etc/yum.repos.d/cloudstack.repo
+ echo ""
+
+ # upgrade cloudstack
+ echo -e "updating cloudstack rpm...\n"
+ yum clean metadata
+ yum upgrade --assumeyes cloudstack-*
+
+ # start services
+ echo -e "starting cloudstack-management...\n"
+ systemctl start cloudstack-management
+
+ echo -e "starting cloudstack-usage...\n"
+ systemctl start cloudstack-usage
+
+ echo -e "starting chef-client...\n"
+ systemctl start chef-client
+ EOF
+ environment:
+ name: ${LAB_ENV_NAME}
+
+ccd-r1-acs1-acs01:
+ <<: *DEPLOY
+ variables:
+ LAB_ENV_NAME: "ccd-r1-acs1-acs01"
+ LAB_ENV_IP: "10.218.184.158"
+
+ccd-r1-acs2-acs01:
+ <<: *DEPLOY
+ variables:
+ LAB_ENV_NAME: "ccd-r1-acs2-acs01"
+ LAB_ENV_IP: "10.218.184.40"
+
+ccd-r1-bm1-acs01:
+ <<: *DEPLOY
+ variables:
+ LAB_ENV_NAME: "ccd-r1-bm1-acs01"
+ LAB_ENV_IP: "10.218.184.74"
+
+ccd-r1-dev1-acs01:
+ <<: *DEPLOY
+ variables:
+ LAB_ENV_NAME: "ccd-r1-dev1-acs01"
+ LAB_ENV_IP: "10.218.184.249"
+
+ccd-r1-dev2-acs01:
+ <<: *DEPLOY
+ variables:
+ LAB_ENV_NAME: "ccd-r1-dev2-acs01"
+ LAB_ENV_IP: "10.218.184.116"
+
+ccd-r1-stg-acs01:
+ <<: *DEPLOY
+ variables:
+ LAB_ENV_NAME: "ccd-r1-stg-acs01"
+ LAB_ENV_IP: "10.218.184.166"
+
+ccd-r1-stg-acs02:
+ <<: *DEPLOY
+ variables:
+ LAB_ENV_NAME: "ccd-r1-stg-acs02"
+ LAB_ENV_IP: "10.218.184.207"
+
+cca-r1-beta02-mtg02:
+ <<: *DEPLOY
+ variables:
+ LAB_ENV_NAME: "cca-r1-beta02-mtg02"
+ LAB_ENV_IP: "172.27.2.119"
+
+ccd-r1-acs3-acs01:
+ <<: *DEPLOY
+ variables:
+ LAB_ENV_NAME: "ccd-r1-acs3-acs01"
+ LAB_ENV_IP: "10.218.184.156"
+
+ccd-r1-acs4:
+ <<: *DEPLOY
+ variables:
+ LAB_ENV_NAME: "ccd-r1-acs4-acs01"
+ LAB_ENV_IP: "10.218.184.142"
diff --git a/api/pom.xml b/api/pom.xml
index 9e4b646eec67..b1879ead164e 100644
--- a/api/pom.xml
+++ b/api/pom.xml
@@ -66,6 +66,10 @@
cloud-framework-direct-download
${project.version}
+
+ com.bettercloud
+ vault-java-driver
+
diff --git a/api/src/main/java/com/cloud/agent/api/to/DataStoreTO.java b/api/src/main/java/com/cloud/agent/api/to/DataStoreTO.java
index 1c901a647cbc..7be8d3770aae 100644
--- a/api/src/main/java/com/cloud/agent/api/to/DataStoreTO.java
+++ b/api/src/main/java/com/cloud/agent/api/to/DataStoreTO.java
@@ -28,4 +28,8 @@ public interface DataStoreTO {
String getUrl();
String getPathSeparator();
+
+ default boolean isPartialBackupCapable() {
+ return true;
+ }
}
diff --git a/api/src/main/java/com/cloud/agent/api/to/DataTO.java b/api/src/main/java/com/cloud/agent/api/to/DataTO.java
index af43aa102e84..94fc89da004e 100644
--- a/api/src/main/java/com/cloud/agent/api/to/DataTO.java
+++ b/api/src/main/java/com/cloud/agent/api/to/DataTO.java
@@ -32,5 +32,7 @@ public interface DataTO {
*/
String getPath();
+ String getName();
+
long getId();
}
diff --git a/api/src/main/java/com/cloud/agent/api/to/S3TO.java b/api/src/main/java/com/cloud/agent/api/to/S3TO.java
index 233238cf793d..e3c3d40c06ce 100644
--- a/api/src/main/java/com/cloud/agent/api/to/S3TO.java
+++ b/api/src/main/java/com/cloud/agent/api/to/S3TO.java
@@ -239,6 +239,11 @@ public String getPathSeparator() {
return pathSeparator;
}
+ @Override
+ public boolean isPartialBackupCapable() {
+ return false;
+ }
+
@Override
public boolean equals(final Object thatObject) {
diff --git a/api/src/main/java/com/cloud/agent/api/to/StorageFilerTO.java b/api/src/main/java/com/cloud/agent/api/to/StorageFilerTO.java
index 8f58c9e1c917..ec33b9763827 100644
--- a/api/src/main/java/com/cloud/agent/api/to/StorageFilerTO.java
+++ b/api/src/main/java/com/cloud/agent/api/to/StorageFilerTO.java
@@ -20,6 +20,7 @@
import com.cloud.storage.StoragePool;
public class StorageFilerTO {
+ boolean isManaged;
long id;
String uuid;
String host;
@@ -36,6 +37,7 @@ public StorageFilerTO(StoragePool pool) {
this.type = pool.getPoolType();
this.uuid = pool.getUuid();
this.userInfo = pool.getUserInfo();
+ this.isManaged = pool.isManaged();
}
public long getId() {
@@ -66,6 +68,10 @@ public StoragePoolType getType() {
return type;
}
+ public boolean isManaged(){
+ return isManaged;
+ }
+
protected StorageFilerTO() {
}
diff --git a/api/src/main/java/com/cloud/agent/api/to/SwiftTO.java b/api/src/main/java/com/cloud/agent/api/to/SwiftTO.java
index b89dfea40e0c..e4aa8f276212 100644
--- a/api/src/main/java/com/cloud/agent/api/to/SwiftTO.java
+++ b/api/src/main/java/com/cloud/agent/api/to/SwiftTO.java
@@ -89,4 +89,9 @@ public String getUuid() {
public String getPathSeparator() {
return pathSeparator;
}
+
+ @Override
+ public boolean isPartialBackupCapable() {
+ return false;
+ }
}
diff --git a/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java b/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java
index 5fc248343ecc..0f263de1a9f0 100644
--- a/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java
+++ b/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java
@@ -16,11 +16,12 @@
// under the License.
package com.cloud.agent.api.to;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.HashMap;
import com.cloud.network.element.NetworkElement;
+import com.cloud.storage.Storage;
import com.cloud.template.VirtualMachineTemplate.BootloaderType;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachine.Type;
@@ -82,6 +83,8 @@ public class VirtualMachineTO {
Map extraConfig = new HashMap<>();
DeployAsIsInfoTO deployAsIsInfo;
+ Storage.ImageFormat format;
+
public VirtualMachineTO(long id, String instanceName, VirtualMachine.Type type, int cpus, Integer speed, long minRam, long maxRam, BootloaderType bootloader,
String os, boolean enableHA, boolean limitCpuUse, String vncPassword) {
this.id = id;
@@ -422,4 +425,12 @@ public void setDeployAsIsInfo(DeployAsIsInfoTO deployAsIsInfo) {
public String toString() {
return String.format("VM {id: \"%s\", name: \"%s\", uuid: \"%s\", type: \"%s\"}", id, name, uuid, type);
}
+
+ public Storage.ImageFormat getFormat() {
+ return format;
+ }
+
+ public void setFormat(Storage.ImageFormat format) {
+ this.format = format;
+ }
}
diff --git a/api/src/main/java/com/cloud/event/EventTypes.java b/api/src/main/java/com/cloud/event/EventTypes.java
index 289276fe663e..a8b5fbb371d7 100644
--- a/api/src/main/java/com/cloud/event/EventTypes.java
+++ b/api/src/main/java/com/cloud/event/EventTypes.java
@@ -501,6 +501,9 @@ public class EventTypes {
public static final String EVENT_VPC_OFFERING_UPDATE = "VPC.OFFERING.UPDATE";
public static final String EVENT_VPC_OFFERING_DELETE = "VPC.OFFERING.DELETE";
+ // VPC source NAT
+ public static final String EVENT_VPC_SOURCE_NAT_UPDATE = "VPC.SOURCE.NAT.UPDATE";
+
// Private gateway
public static final String EVENT_PRIVATE_GATEWAY_CREATE = "PRIVATE.GATEWAY.CREATE";
public static final String EVENT_PRIVATE_GATEWAY_DELETE = "PRIVATE.GATEWAY.DELETE";
diff --git a/api/src/main/java/com/cloud/exception/RemoteAccessVpnException.java b/api/src/main/java/com/cloud/exception/RemoteAccessVpnException.java
new file mode 100644
index 000000000000..93a34d8ce3a8
--- /dev/null
+++ b/api/src/main/java/com/cloud/exception/RemoteAccessVpnException.java
@@ -0,0 +1,28 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.exception;
+
+/**
+ * @since 4.10.0.228-cca
+ */
+public class RemoteAccessVpnException extends ManagementServerException {
+ private static final long serialVersionUID = -5851224796385227880L;
+
+ public RemoteAccessVpnException(String message) {
+ super(message);
+ }
+}
diff --git a/api/src/main/java/com/cloud/gpu/GPU.java b/api/src/main/java/com/cloud/gpu/GPU.java
index 8aa54c0c0f87..e36d62ddbf92 100644
--- a/api/src/main/java/com/cloud/gpu/GPU.java
+++ b/api/src/main/java/com/cloud/gpu/GPU.java
@@ -32,6 +32,34 @@ public enum GPUType {
GRID_K220Q("GRID K220Q"),
GRID_K240Q("GRID K240Q"),
GRID_K260("GRID K260Q"),
+ GRID_V100D_32A("GRID V100D-32A"),
+ GRID_V100D_8Q("GRID V100D-8Q"),
+ GRID_V100D_4A("GRID V100D-4A"),
+ GRID_V100D_1B("GRID V100D-1B"),
+ GRID_V100D_2Q("GRID V100D-2Q"),
+ GRID_V100D_4Q("GRID V100D-4Q"),
+ GRID_V100D_2A("GRID V100D-2A"),
+ GRID_V100D_2B("GRID V100D-2B"),
+ GRID_V100D_32Q("GRID V100D-32Q"),
+ GRID_V100D_16A("GRID V100D-16A"),
+ GRID_V100D_1Q("GRID V100D-1Q"),
+ GRID_V100D_2B4("GRID V100D-2B4"),
+ GRID_V100D_16Q("GRID V100D-16Q"),
+ GRID_V100D_8A("GRID V100D-8A"),
+ GRID_V100D_1A("GRID V100D-1A"),
+ GRID_T4_16A("GRID T4-16A"),
+ GRID_T4_2B4("GRID T4-2B4"),
+ GRID_T4_4Q("GRID T4-4Q"),
+ GRID_T4_16Q("GRID T4-16Q"),
+ GRID_T4_4A("GRID T4-4A"),
+ GRID_T4_1A("GRID T4-1A"),
+ GRID_T4_2Q("GRID T4-2Q"),
+ GRID_T4_2B("GRID T4-2B"),
+ GRID_T4_8Q("GRID T4-8Q"),
+ GRID_T4_2A("GRID T4-2A"),
+ GRID_T4_1B("GRID T4-1B"),
+ GRID_T4_1Q("GRID T4-1Q"),
+ GRID_T4_8A("GRID T4-8A"),
passthrough("passthrough");
private String type;
diff --git a/api/src/main/java/com/cloud/network/RemoteAccessVpn.java b/api/src/main/java/com/cloud/network/RemoteAccessVpn.java
index 25b4fbbcdeba..52257e58bbea 100644
--- a/api/src/main/java/com/cloud/network/RemoteAccessVpn.java
+++ b/api/src/main/java/com/cloud/network/RemoteAccessVpn.java
@@ -32,6 +32,8 @@ enum State {
String getIpsecPresharedKey();
+ String getCaCertificate();
+
String getLocalIp();
Long getNetworkId();
@@ -42,4 +44,6 @@ enum State {
@Override
boolean isDisplay();
+
+ String getVpnType();
}
diff --git a/api/src/main/java/com/cloud/network/vpc/Vpc.java b/api/src/main/java/com/cloud/network/vpc/Vpc.java
index 432c8839ad89..985046cb37fe 100644
--- a/api/src/main/java/com/cloud/network/vpc/Vpc.java
+++ b/api/src/main/java/com/cloud/network/vpc/Vpc.java
@@ -16,12 +16,12 @@
// under the License.
package com.cloud.network.vpc;
+import java.util.Date;
+
import org.apache.cloudstack.acl.ControlledEntity;
import org.apache.cloudstack.api.Identity;
import org.apache.cloudstack.api.InternalIdentity;
-import java.util.Date;
-
public interface Vpc extends ControlledEntity, Identity, InternalIdentity {
public enum State {
@@ -56,6 +56,12 @@ public enum State {
*/
long getVpcOfferingId();
+ /**
+ *
+ * @return Network boot ip
+ */
+ String getNetworkBootIp();
+
/**
*
* @return VPC display text
@@ -95,4 +101,6 @@ public enum State {
void setRollingRestart(boolean rollingRestart);
Date getCreated();
+
+ void setNetworkBootIp(String networkBootIp);
}
diff --git a/api/src/main/java/com/cloud/network/vpc/VpcService.java b/api/src/main/java/com/cloud/network/vpc/VpcService.java
index 088239708f19..45d19cd2c9d9 100644
--- a/api/src/main/java/com/cloud/network/vpc/VpcService.java
+++ b/api/src/main/java/com/cloud/network/vpc/VpcService.java
@@ -50,7 +50,7 @@ public interface VpcService {
* @return
* @throws ResourceAllocationException TODO
*/
- public Vpc createVpc(long zoneId, long vpcOffId, long vpcOwnerId, String vpcName, String displayText, String cidr, String networkDomain, Boolean displayVpc)
+ public Vpc createVpc(long zoneId, long vpcOffId, long vpcOwnerId, String vpcName, String displayText, String cidr, String networkDomain, Boolean displayVpc, String networkBootIp)
throws ResourceAllocationException;
/**
@@ -74,7 +74,7 @@ public Vpc createVpc(long zoneId, long vpcOffId, long vpcOwnerId, String vpcName
* @param displayVpc TODO
* @return
*/
- public Vpc updateVpc(long vpcId, String vpcName, String displayText, String customId, Boolean displayVpc);
+ public Vpc updateVpc(long vpcId, String vpcName, String displayText, String customId, Boolean displayVpc, String networkBootip);
/**
* Lists VPC(s) based on the parameters passed to the method call
@@ -97,12 +97,12 @@ public Vpc createVpc(long zoneId, long vpcOffId, long vpcOwnerId, String vpcName
* @param tags TODO
* @param projectId TODO
* @param display TODO
- * @param vpc
+ * @param networkBootIp
* @return
*/
public Pair, Integer> listVpcs(Long id, String vpcName, String displayText, List supportedServicesStr, String cidr, Long vpcOffId, String state,
String accountName, Long domainId, String keyword, Long startIndex, Long pageSizeVal, Long zoneId, Boolean isRecursive, Boolean listAll, Boolean restartRequired,
- Map tags, Long projectId, Boolean display);
+ Map tags, Long projectId, Boolean display, String networkBootIp);
/**
* Starts VPC which includes starting VPC provider and applying all the neworking rules on the backend
@@ -132,12 +132,14 @@ public Pair, Integer> listVpcs(Long id, String vpcName, Stri
* @param id
* @param cleanUp
* @param makeredundant
+ * @param migrateVpn
* @return
* @throws InsufficientCapacityException
*/
+
boolean restartVpc(RestartVPCCmd cmd) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException;
- boolean restartVpc(Long networkId, boolean cleanup, boolean makeRedundant, boolean livePatch, User user) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException;
+ boolean restartVpc(Long networkId, boolean cleanup, boolean makeRedundant, boolean livePatch, User user, boolean migrateVpn) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException;
/**
* Returns a Private gateway found in the VPC by id
@@ -260,4 +262,11 @@ IpAddress associateIPToVpc(long ipId, long vpcId) throws ResourceAllocationExcep
*/
public boolean applyStaticRoute(long routeId) throws ResourceUnavailableException;
+ /**
+ *
+ * @param vpcId
+ * @return
+ * @throws InsufficientAddressCapacityException
+ */
+ boolean updateVpcSourceNAT(final long vpcId) throws InsufficientCapacityException, ResourceUnavailableException;
}
diff --git a/api/src/main/java/com/cloud/network/vpn/RemoteAccessVpnService.java b/api/src/main/java/com/cloud/network/vpn/RemoteAccessVpnService.java
index bbb9771d27aa..5152b348c056 100644
--- a/api/src/main/java/com/cloud/network/vpn/RemoteAccessVpnService.java
+++ b/api/src/main/java/com/cloud/network/vpn/RemoteAccessVpnService.java
@@ -22,6 +22,7 @@
import org.apache.cloudstack.api.command.user.vpn.ListVpnUsersCmd;
import com.cloud.exception.NetworkRuleConflictException;
+import com.cloud.exception.RemoteAccessVpnException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.network.RemoteAccessVpn;
import com.cloud.network.VpnUser;
@@ -29,9 +30,15 @@
import com.cloud.utils.Pair;
public interface RemoteAccessVpnService {
- static final String RemoteAccessVpnClientIpRangeCK = "remote.access.vpn.client.iprange";
+ enum Type {
+ L2TP, IKEV2
+ }
- RemoteAccessVpn createRemoteAccessVpn(long vpnServerAddressId, String ipRange, boolean openFirewall, Boolean forDisplay) throws NetworkRuleConflictException;
+ String RemoteAccessVpnTypeConfigKey = "remote.access.vpn.type";
+ String RemoteAccessVpnClientIpRangeCK = "remote.access.vpn.client.iprange";
+
+ RemoteAccessVpn createRemoteAccessVpn(long vpnServerAddressId, String ipRange, boolean openFirewall, Boolean forDisplay)
+ throws NetworkRuleConflictException, RemoteAccessVpnException;
boolean destroyRemoteAccessVpnForIp(long ipId, Account caller, boolean forceCleanup) throws ResourceUnavailableException;
@@ -59,4 +66,6 @@ public interface RemoteAccessVpnService {
RemoteAccessVpn updateRemoteAccessVpn(long id, String customId, Boolean forDisplay);
+ boolean migrateRemoteAccessVpn(long accountId, long vpcId);
+
}
diff --git a/api/src/main/java/com/cloud/offering/DiskOffering.java b/api/src/main/java/com/cloud/offering/DiskOffering.java
index 8f2a0c9f761c..a1b8a133e627 100644
--- a/api/src/main/java/com/cloud/offering/DiskOffering.java
+++ b/api/src/main/java/com/cloud/offering/DiskOffering.java
@@ -16,13 +16,12 @@
// under the License.
package com.cloud.offering;
-import java.util.Date;
-
+import com.cloud.storage.Storage.ProvisioningType;
import org.apache.cloudstack.acl.InfrastructureEntity;
import org.apache.cloudstack.api.Identity;
import org.apache.cloudstack.api.InternalIdentity;
-import com.cloud.storage.Storage.ProvisioningType;
+import java.util.Date;
/**
* Represents a disk offering that specifies what the end user needs in
@@ -138,6 +137,22 @@ public String toString() {
Long getIopsWriteRateMaxLength();
+ Long getMinIopsPerGb();
+
+ void setMinIopsPerGb(Long minIopsPerGB);
+
+ Long getMaxIopsPerGb();
+
+ void setMaxIopsPerGb(Long maxIopsPerGB);
+
+ Long getHighestMinIops();
+
+ void setHighestMinIops(Long highestMinIops);
+
+ Long getHighestMaxIops();
+
+ void setHighestMaxIops(Long highestMaxIops);
+
void setHypervisorSnapshotReserve(Integer hypervisorSnapshotReserve);
Integer getHypervisorSnapshotReserve();
diff --git a/api/src/main/java/com/cloud/storage/Storage.java b/api/src/main/java/com/cloud/storage/Storage.java
index 300944559d62..fd9b9f737550 100644
--- a/api/src/main/java/com/cloud/storage/Storage.java
+++ b/api/src/main/java/com/cloud/storage/Storage.java
@@ -34,7 +34,8 @@ public static enum ImageFormat {
VDI(true, true, false, "vdi"),
TAR(false, false, false, "tar"),
ZIP(false, false, false, "zip"),
- DIR(false, false, false, "dir");
+ DIR(false, false, false, "dir"),
+ PXEBOOT(false, false, false, "PXEBOOT");
private final boolean supportThinProvisioning;
private final boolean supportSparse;
diff --git a/api/src/main/java/com/cloud/template/VirtualMachineTemplate.java b/api/src/main/java/com/cloud/template/VirtualMachineTemplate.java
index 95d1ebf0b87a..d7b3d6428ea5 100644
--- a/api/src/main/java/com/cloud/template/VirtualMachineTemplate.java
+++ b/api/src/main/java/com/cloud/template/VirtualMachineTemplate.java
@@ -97,6 +97,8 @@ public enum TemplateFilter {
boolean isRequiresHvm();
+ String getBootFilename();
+
String getDisplayText();
boolean isEnablePassword();
diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java
index 55002f70b1b2..3b8b0b2205fd 100644
--- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java
+++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java
@@ -45,6 +45,7 @@ public class ApiConstants {
public static final String BASE64_IMAGE = "base64image";
public static final String BITS = "bits";
public static final String BOOTABLE = "bootable";
+ public static final String BOOT_FILENAME = "bootfilename";
public static final String BIND_DN = "binddn";
public static final String BIND_PASSWORD = "bindpass";
public static final String BYTES_READ_RATE = "bytesreadrate";
@@ -668,6 +669,7 @@ public class ApiConstants {
public static final String REGION_ID = "regionid";
public static final String VPC_OFF_ID = "vpcofferingid";
public static final String VPC_OFF_NAME = "vpcofferingname";
+ public static final String NETWORK_BOOT_IP = "networkbootip";
public static final String NETWORK = "network";
public static final String VPC_ID = "vpcid";
public static final String VPC_NAME = "vpcname";
@@ -679,6 +681,7 @@ public class ApiConstants {
public static final String S2S_VPN_GATEWAY_ID = "s2svpngatewayid";
public static final String S2S_CUSTOMER_GATEWAY_ID = "s2scustomergatewayid";
public static final String IPSEC_PSK = "ipsecpsk";
+ public static final String MIGRATE_VPN = "migratevpn";
public static final String GUEST_IP = "guestip";
public static final String REMOVED = "removed";
public static final String COMPLETED = "completed";
@@ -742,6 +745,19 @@ public class ApiConstants {
public static final String AUTOSCALE_USER_ID = "autoscaleuserid";
public static final String BAREMETAL_DISCOVER_NAME = "baremetaldiscovername";
public static final String BAREMETAL_RCT_URL = "baremetalrcturl";
+ public static final String BAREMETAL_MAAS = "baremetalmaas";
+ public static final String BAREMETAL_MAAS_ACTION = "baremetalmaasaction";
+ public static final String BAREMETAL_MAAS_ACTION_CREATE = "baremetalmaascreate";
+ public static final String BAREMETAL_MAAS_ACTION_IMPORT = "baremetalmaasimport";
+ public static final String BAREMETAL_MAAS_HOST = "baremetalmaashost";
+ public static final String BAREMETAL_MAAS_KEY = "baremetalmaaskey";
+ public static final String BAREMETAL_MAAS_POOL = "baremetalmaaspool";
+ public static final String BAREMETAL_MAAS_NODE_ID = "baremetalmaasnodeid";
+ public static final String BAREMETAL_MAAS_OFFERING_ID = "offeringid";
+ public static final String BAREMETAL_MAAS_OFFERING_NAME = "offeringname";
+ public static final String BAREMETAL_MAAS_AVIALBALE_COUNT = "available";
+ public static final String BAREMETAL_MAAS_TOTAL_COUNT = "total";
+ public static final String BAREMETAL_MAAS_ERASING_COUNT = "erasing";
public static final String UCS_DN = "ucsdn";
public static final String GSLB_PROVIDER = "gslbprovider";
public static final String EXCLUSIVE_GSLB_PROVIDER = "isexclusivegslbprovider";
@@ -835,6 +851,10 @@ public class ApiConstants {
public static final String NETSCALER_CONTROLCENTER_ID = "netscalercontrolcenterid";
public static final String NETSCALER_SERVICEPACKAGE_ID = "netscalerservicepackageid";
public static final String FETCH_ROUTER_HEALTH_CHECK_RESULTS = "fetchhealthcheckresults";
+ public static final String MIN_IOPS_PER_GB = "miniopspergb";
+ public static final String MAX_IOPS_PER_GB = "maxiopspergb";
+ public static final String HIGHEST_MIN_IOPS = "highestminiops";
+ public static final String HIGHEST_MAX_IOPS = "highestmaxiops";
public static final String ZONE_ID_LIST = "zoneids";
public static final String DESTINATION_ZONE_ID_LIST = "destzoneids";
diff --git a/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java
index 08f390f19724..85394a24bd25 100644
--- a/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java
@@ -79,6 +79,9 @@ public abstract class BaseUpdateTemplateOrIsoCmd extends BaseCmd {
description = "optional boolean field, which indicates if details should be cleaned up or not (if set to true, details removed for this resource, details field ignored; if false or not set, no action)")
private Boolean cleanupDetails;
+ @Parameter(name = ApiConstants.BOOT_FILENAME, type = CommandType.STRING, description = "PXE boot filename on the TFTP server.")
+ private String bootFilename;
+
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@@ -143,4 +146,6 @@ public Map getDetails() {
public boolean isCleanupDetails(){
return cleanupDetails == null ? false : cleanupDetails.booleanValue();
}
+
+ public String getBootFilename() { return bootFilename; }
}
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java
index e258d72ca381..782088df3b60 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java
@@ -111,10 +111,22 @@ public class AddClusterCmd extends BaseCmd {
@Parameter(name = ApiConstants.OVM3_POOL, type = CommandType.STRING, required = false, description = "Ovm3 native pooling enabled for cluster")
private String ovm3pool;
+
@Parameter(name = ApiConstants.OVM3_CLUSTER, type = CommandType.STRING, required = false, description = "Ovm3 native OCFS2 clustering enabled for cluster")
private String ovm3cluster;
+
@Parameter(name = ApiConstants.OVM3_VIP, type = CommandType.STRING, required = false, description = "Ovm3 vip to use for pool (and cluster)")
private String ovm3vip;
+
+ @Parameter(name = ApiConstants.BAREMETAL_MAAS_HOST, type = CommandType.STRING, required = false, description = "The hostname or IP address of the MaaS server")
+ private String baremetalMaasHost;
+
+ @Parameter(name = ApiConstants.BAREMETAL_MAAS_KEY, type = CommandType.STRING, required = false, description = "Administrator API key to access MaaS server")
+ private String baremetalMaasKey;
+
+ @Parameter(name = ApiConstants.BAREMETAL_MAAS_POOL, type = CommandType.STRING, required = false, description = "Pool name in MaaS server to correspond this cluster with")
+ private String baremetalMaasPool;
+
public String getOvm3Pool() {
return ovm3pool;
}
@@ -207,6 +219,18 @@ public void setAllocationState(String allocationState) {
this.allocationState = allocationState;
}
+ public String getBaremetalMaasHost() {
+ return baremetalMaasHost;
+ }
+
+ public String getBaremetalMaasKey() {
+ return baremetalMaasKey;
+ }
+
+ public String getBaremetalMaasPool() {
+ return baremetalMaasPool;
+ }
+
@Override
public ApiCommandResourceType getApiResourceType() {
return ApiCommandResourceType.Cluster;
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java
index b628ce44f1aa..91f05fd23f2b 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java
@@ -144,6 +144,18 @@ public class CreateDiskOfferingCmd extends BaseCmd {
@Parameter(name = ApiConstants.MAX_IOPS, type = CommandType.LONG, required = false, description = "max iops of the disk offering")
private Long maxIops;
+ @Parameter(name = ApiConstants.MIN_IOPS_PER_GB, type = CommandType.LONG, required = false, description = "IOPS/GB rate for min IOPS. miniops = size * miniopspergb")
+ private Long minIopsPerGb;
+
+ @Parameter(name = ApiConstants.MAX_IOPS_PER_GB, type = CommandType.LONG, required = false, description = "IOPS/GB rate for max IOPS. maxiops = size * maxiopspergb")
+ private Long maxIopsPerGb;
+
+ @Parameter(name = ApiConstants.HIGHEST_MIN_IOPS, type = CommandType.LONG, required = false, description = "Highest Min IOPS value that is allowed for this offering")
+ private Long highestMinIops;
+
+ @Parameter(name = ApiConstants.HIGHEST_MAX_IOPS, type = CommandType.LONG, required = false, description = "Highest Max IOPS value that is allowed for this offering")
+ private Long highestMaxIops;
+
@Parameter(name = ApiConstants.HYPERVISOR_SNAPSHOT_RESERVE,
type = CommandType.INTEGER,
required = false,
@@ -310,6 +322,21 @@ public boolean getDiskSizeStrictness() {
return diskSizeStrictness != null ? diskSizeStrictness : false;
}
+ public Long getMinIopsPerGb() {
+ return minIopsPerGb;
+ }
+
+ public Long getMaxIopsPerGb() {
+ return maxIopsPerGb;
+ }
+
+ public Long getHighestMinIops() {
+ return highestMinIops;
+ }
+
+ public Long getHighestMaxIops() {
+ return highestMaxIops;
+ }
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCSourceNATCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCSourceNATCmd.java
new file mode 100644
index 000000000000..2d3a9db08841
--- /dev/null
+++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCSourceNATCmd.java
@@ -0,0 +1,88 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.api.command.admin.vpc;
+
+import com.cloud.event.EventTypes;
+import com.cloud.exception.InsufficientCapacityException;
+import com.cloud.exception.ResourceUnavailableException;
+import com.cloud.user.Account;
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.ApiErrorCode;
+import org.apache.cloudstack.api.BaseAsyncCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.response.SuccessResponse;
+import org.apache.cloudstack.api.response.VpcResponse;
+import org.apache.log4j.Logger;
+
+@APICommand(name = "updateVPCSourceNAT", description = "Updates VPC Source NAT", responseObject = SuccessResponse.class,
+ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
+public class UpdateVPCSourceNATCmd extends BaseAsyncCmd {
+ public static final Logger s_logger = Logger.getLogger(UpdateVPCSourceNATCmd.class.getName());
+ private static final String s_name = "updatevpcsourcenatresponse";
+
+ /////////////////////////////////////////////////////
+ //////////////// API parameters /////////////////////
+ /////////////////////////////////////////////////////
+
+ @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = VpcResponse.class, required = true, description = "the UUID of the VPC")
+ private long id;
+
+
+ /////////////////////////////////////////////////////
+ /////////////////// Accessors ///////////////////////
+ /////////////////////////////////////////////////////
+
+ public long getId() {
+ return id;
+ }
+
+ /////////////////////////////////////////////////////
+ /////////////// API Implementation///////////////////
+ /////////////////////////////////////////////////////
+ @Override
+ public String getCommandName() {
+ return s_name;
+ }
+
+ @Override
+ public long getEntityOwnerId() {
+ return Account.ACCOUNT_ID_SYSTEM;
+ }
+
+ @Override
+ public void execute() throws InsufficientCapacityException, ResourceUnavailableException {
+ boolean result = _vpcService.updateVpcSourceNAT(getId());
+ if (result) {
+ SuccessResponse response = new SuccessResponse(getCommandName());
+ setResponseObject(response);
+ } else {
+ throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to update vpc source nat.");
+ }
+ }
+
+ @Override
+ public String getEventType() {
+ return EventTypes.EVENT_VPC_SOURCE_NAT_UPDATE;
+ }
+
+ @Override
+ public String getEventDescription() {
+ return "Updating VPC Source NAT id=" + getId();
+ }
+}
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java
index 255b11aaa248..ada0ee0634b4 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java
@@ -104,9 +104,12 @@ public class RegisterTemplateCmd extends BaseCmd implements UserCmd {
@Parameter(name = ApiConstants.REQUIRES_HVM, type = CommandType.BOOLEAN, description = "true if this template requires HVM")
private Boolean requiresHvm;
+ @Parameter(name = ApiConstants.BOOT_FILENAME, type = CommandType.STRING, description = "PXE boot filename on the TFTP server.")
+ private String bootFilename;
+
@Parameter(name = ApiConstants.URL,
type = CommandType.STRING,
- required = true,
+ required = false,
length = 2048,
description = "the URL of where the template is hosted. Possible URL include http:// and https://")
private String url;
@@ -219,6 +222,10 @@ public Boolean getRequiresHvm() {
return requiresHvm;
}
+ public String getBootFilename() {
+ return bootFilename;
+ }
+
public String getUrl() {
return url;
}
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java
index 8f6568fbe594..45dcad6a9aef 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java
@@ -83,6 +83,9 @@ public class CreateVPCCmd extends BaseAsyncCreateCmd implements UserCmd {
required = true, description = "the ID of the VPC offering")
private Long vpcOffering;
+ @Parameter(name = ApiConstants.NETWORK_BOOT_IP, type = CommandType.STRING, description = "the network boot ip of the VPC.")
+ private String networkBootIp;
+
@Parameter(name = ApiConstants.NETWORK_DOMAIN, type = CommandType.STRING,
description = "VPC network domain. All networks inside the VPC will belong to this domain")
private String networkDomain;
@@ -127,6 +130,8 @@ public Long getVpcOffering() {
return vpcOffering;
}
+ public String getNetworkBootIp() { return networkBootIp; }
+
public String getNetworkDomain() {
return networkDomain;
}
@@ -144,7 +149,7 @@ public Boolean getDisplayVpc() {
@Override
public void create() throws ResourceAllocationException {
- Vpc vpc = _vpcService.createVpc(getZoneId(), getVpcOffering(), getEntityOwnerId(), getVpcName(), getDisplayText(), getCidr(), getNetworkDomain(), getDisplayVpc());
+ Vpc vpc = _vpcService.createVpc(getZoneId(), getVpcOffering(), getEntityOwnerId(), getVpcName(), getDisplayText(), getCidr(), getNetworkDomain(), getDisplayVpc(), getNetworkBootIp());
if (vpc != null) {
setEntityId(vpc.getId());
setEntityUuid(vpc.getUuid());
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/ListVPCsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/ListVPCsCmd.java
index b230603f852f..0f01cb05c2d0 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/ListVPCsCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/ListVPCsCmd.java
@@ -19,8 +19,6 @@
import java.util.ArrayList;
import java.util.List;
-import com.cloud.server.ResourceIcon;
-import com.cloud.server.ResourceTag;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
@@ -36,6 +34,8 @@
import org.apache.log4j.Logger;
import com.cloud.network.vpc.Vpc;
+import com.cloud.server.ResourceIcon;
+import com.cloud.server.ResourceTag;
import com.cloud.utils.Pair;
@@ -67,6 +67,9 @@ public class ListVPCsCmd extends BaseListTaggedResourcesCmd implements UserCmd {
@Parameter(name = ApiConstants.VPC_OFF_ID, type = CommandType.UUID, entityType = VpcOfferingResponse.class, description = "list by ID of the VPC offering")
private Long VpcOffId;
+ @Parameter(name = ApiConstants.NETWORK_BOOT_IP, type = CommandType.STRING, description = "the network boot ip of the VPC.")
+ private String networkBootIp;
+
@Parameter(name = ApiConstants.SUPPORTED_SERVICES, type = CommandType.LIST, collectionType = CommandType.STRING, description = "list VPC supporting certain services")
private List supportedServices;
@@ -107,6 +110,8 @@ public Long getVpcOffId() {
return VpcOffId;
}
+ public String getNetworkBootIp() { return networkBootIp; }
+
public Long getId() {
return id;
}
@@ -144,7 +149,7 @@ public void execute() {
Pair, Integer> vpcs =
_vpcService.listVpcs(getId(), getVpcName(), getDisplayText(), getSupportedServices(), getCidr(), getVpcOffId(), getState(), getAccountName(), getDomainId(),
getKeyword(), getStartIndex(), getPageSizeVal(), getZoneId(), isRecursive(), listAll(), getRestartRequired(), getTags(),
- getProjectId(), getDisplay());
+ getProjectId(), getDisplay(), getNetworkBootIp());
ListResponse response = new ListResponse();
List vpcResponses = new ArrayList();
for (Vpc vpc : vpcs.first()) {
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/RestartVPCCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/RestartVPCCmd.java
index a3fd58aa0704..a9aa3e3c2d1b 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/RestartVPCCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/RestartVPCCmd.java
@@ -52,6 +52,9 @@ public class RestartVPCCmd extends BaseAsyncCmd {
@Parameter(name = ApiConstants.CLEANUP, type = CommandType.BOOLEAN, required = false, description = "If cleanup old network elements")
private Boolean cleanup = false;
+ @Parameter(name = ApiConstants.MIGRATE_VPN, type = CommandType.BOOLEAN, required = false, description = "If migrate remote access VPN config")
+ private Boolean mirgateVpn;
+
@Parameter(name = ApiConstants.MAKEREDUNDANT, type = CommandType.BOOLEAN, required = false, description = "Turn a single VPC into a redundant one.")
private Boolean makeredundant = false;
@@ -72,6 +75,13 @@ public Boolean getCleanup() {
return cleanup;
}
+ public Boolean isMigrateVpn() {
+ if (mirgateVpn != null) {
+ return mirgateVpn;
+ }
+ return true;
+ }
+
public Boolean getMakeredundant() {
return makeredundant;
}
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/UpdateVPCCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/UpdateVPCCmd.java
index 190a0e5cbc5d..909b86a6ae99 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/UpdateVPCCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/UpdateVPCCmd.java
@@ -59,6 +59,9 @@ public class UpdateVPCCmd extends BaseAsyncCustomIdCmd implements UserCmd {
@Parameter(name = ApiConstants.FOR_DISPLAY, type = CommandType.BOOLEAN, description = "an optional field, whether to the display the vpc to the end user or not", since = "4.4", authorized = {RoleType.Admin})
private Boolean display;
+ @Parameter(name = ApiConstants.NETWORK_BOOT_IP, type = CommandType.STRING, description = "the network boot ip of the VPC.")
+ private String networkBootIp;
+
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@@ -79,6 +82,8 @@ public Boolean isDisplayVpc() {
return display;
}
+ public String getNetworkBootIp() { return networkBootIp; }
+
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@@ -99,7 +104,7 @@ public long getEntityOwnerId() {
@Override
public void execute() {
- Vpc result = _vpcService.updateVpc(getId(), getVpcName(), getDisplayText(), getCustomId(), isDisplayVpc());
+ Vpc result = _vpcService.updateVpc(getId(), getVpcName(), getDisplayText(), getCustomId(), isDisplayVpc(), getNetworkBootIp());
if (result != null) {
VpcResponse response = _responseGenerator.createVpcResponse(getResponseView(), result);
response.setResponseName(getCommandName());
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java
index b6ea5cc426cc..dc9af9e1de49 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java
@@ -34,6 +34,7 @@
import com.cloud.event.EventTypes;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.NetworkRuleConflictException;
+import com.cloud.exception.RemoteAccessVpnException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.network.IpAddress;
import com.cloud.network.RemoteAccessVpn;
@@ -157,6 +158,10 @@ public void create() {
s_logger.info("Network rule conflict: " + e.getMessage());
s_logger.trace("Network Rule Conflict: ", e);
throw new ServerApiException(ApiErrorCode.NETWORK_RULE_CONFLICT_ERROR, e.getMessage());
+ } catch (RemoteAccessVpnException e) {
+ s_logger.info("Create vpn internal error: " + e.getMessage());
+ s_logger.trace("Create vpn internal error: ", e);
+ throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
}
}
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnCaCertificateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnCaCertificateCmd.java
new file mode 100644
index 000000000000..88b35f2156ac
--- /dev/null
+++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnCaCertificateCmd.java
@@ -0,0 +1,104 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.api.command.user.vpn;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.acl.RoleType;
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.BaseCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.response.CertificateResponse;
+import org.apache.cloudstack.context.CallContext;
+import org.apache.cloudstack.pki.PkiDetail;
+import org.apache.cloudstack.pki.PkiManager;
+
+import com.cloud.domain.Domain;
+import com.cloud.exception.RemoteAccessVpnException;
+import com.cloud.user.DomainService;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+/**
+ * @author Khosrow Moossavi
+ * @since 4.10.0.228-cca
+ */
+@APICommand(name = ListVpnCaCertificateCmd.APINAME,
+ description = "Lists the CA public certificate(s) as support by the configured/provided CA plugin",
+ responseObject = CertificateResponse.class,
+ requestHasSensitiveInfo = false,
+ responseHasSensitiveInfo = false,
+ since = "4.10.0.228-cca",
+ authorized = {
+ RoleType.Admin,
+ RoleType.ResourceAdmin,
+ RoleType.DomainAdmin,
+ RoleType.User
+})
+public class ListVpnCaCertificateCmd extends BaseCmd {
+ public static final String APINAME = "listVpnCaCertificate";
+
+ @Inject
+ private DomainService domainService;
+
+ @Inject
+ private PkiManager pkiManager;
+
+ /////////////////////////////////////////////////////
+ //////////////// API parameters /////////////////////
+ /////////////////////////////////////////////////////
+
+ @Parameter(name = ApiConstants.DOMAIN, type = CommandType.STRING, description = "Name of the CA service provider, otherwise the default configured provider plugin will be used")
+ private String domain;
+
+ /////////////////////////////////////////////////////
+ /////////////////// Accessors ///////////////////////
+ /////////////////////////////////////////////////////
+
+ public String getDomain() {
+ return domain;
+ }
+
+ /////////////////////////////////////////////////////
+ /////////////// API Implementation///////////////////
+ /////////////////////////////////////////////////////
+
+ @Override
+ public void execute() {
+ final PkiDetail certificate;
+ try {
+ Domain domain = domainService.getDomain(getDomain());
+ certificate = pkiManager.getCertificate(domain);
+ } catch (final RemoteAccessVpnException e) {
+ throw new CloudRuntimeException("Failed to get CA certificates for given domain");
+ }
+ final CertificateResponse certificateResponse = new CertificateResponse("cacertificates");
+ certificateResponse.setCertificate(certificate.getIssuingCa());
+ certificateResponse.setResponseName(getCommandName());
+ setResponseObject(certificateResponse);
+ }
+
+ @Override
+ public String getCommandName() {
+ return APINAME.toLowerCase() + BaseCmd.RESPONSE_SUFFIX;
+ }
+
+ @Override
+ public long getEntityOwnerId() {
+ return CallContext.current().getCallingAccountId();
+ }
+}
diff --git a/api/src/main/java/org/apache/cloudstack/api/response/CertificateResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/CertificateResponse.java
index f8c3ecc74044..292d45d9710b 100644
--- a/api/src/main/java/org/apache/cloudstack/api/response/CertificateResponse.java
+++ b/api/src/main/java/org/apache/cloudstack/api/response/CertificateResponse.java
@@ -23,6 +23,9 @@
import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
+/**
+ * @since 4.10.0.228-cca
+ */
public class CertificateResponse extends BaseResponse {
@SerializedName(ApiConstants.CERTIFICATE)
@Param(description = "The client certificate")
diff --git a/api/src/main/java/org/apache/cloudstack/api/response/DiskOfferingResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/DiskOfferingResponse.java
index 1bea164d359b..bee386193310 100644
--- a/api/src/main/java/org/apache/cloudstack/api/response/DiskOfferingResponse.java
+++ b/api/src/main/java/org/apache/cloudstack/api/response/DiskOfferingResponse.java
@@ -164,6 +164,22 @@ public class DiskOfferingResponse extends BaseResponseWithAnnotations {
@Param(description = "additional key/value details tied with this disk offering", since = "4.17")
private Map details;
+ @SerializedName(ApiConstants.MIN_IOPS_PER_GB)
+ @Param(description = "IOPS/GB rate for min IOPS. miniops = size * miniopspergb")
+ private Long minIopsPerGb;
+
+ @SerializedName(ApiConstants.MAX_IOPS_PER_GB)
+ @Param(description = "IOPS/GB rate for max IOPS. miniops = size * miniopspergb")
+ private Long maxIopsPerGb;
+
+ @SerializedName(ApiConstants.HIGHEST_MIN_IOPS)
+ @Param(description = "Highest Min IOPS value that is allowed for this offering")
+ private Long highestMinIops;
+
+ @SerializedName(ApiConstants.HIGHEST_MAX_IOPS)
+ @Param(description = "Highest Max IOPS value that is allowed for this offering")
+ private Long highestMaxIops;
+
public Boolean getDisplayOffering() {
return displayOffering;
}
@@ -289,6 +305,38 @@ public Integer getHypervisorSnapshotReserve() {
return hypervisorSnapshotReserve;
}
+ public Long getMinIopsPerGb() {
+ return minIopsPerGb;
+ }
+
+ public void setMinIopsPerGb(Long minIopsPerGb) {
+ this.minIopsPerGb = minIopsPerGb;
+ }
+
+ public Long getMaxIopsPerGb() {
+ return maxIopsPerGb;
+ }
+
+ public void setMaxIopsPerGb(Long maxIopsPerGb) {
+ this.maxIopsPerGb = maxIopsPerGb;
+ }
+
+ public Long getHighestMinIops() {
+ return highestMinIops;
+ }
+
+ public void setHighestMinIops(Long highestMinIops) {
+ this.highestMinIops = highestMinIops;
+ }
+
+ public Long getHighestMaxIops() {
+ return highestMaxIops;
+ }
+
+ public void setHighestMaxIops(Long highestMaxIops) {
+ this.highestMaxIops = highestMaxIops;
+ }
+
public void setHypervisorSnapshotReserve(Integer hypervisorSnapshotReserve) {
this.hypervisorSnapshotReserve = hypervisorSnapshotReserve;
}
diff --git a/api/src/main/java/org/apache/cloudstack/api/response/RemoteAccessVpnResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/RemoteAccessVpnResponse.java
index 0e078bea5bd7..baf2e7623f36 100644
--- a/api/src/main/java/org/apache/cloudstack/api/response/RemoteAccessVpnResponse.java
+++ b/api/src/main/java/org/apache/cloudstack/api/response/RemoteAccessVpnResponse.java
@@ -77,6 +77,14 @@ public class RemoteAccessVpnResponse extends BaseResponse implements ControlledE
@Param(description = "is vpn for display to the regular user", since = "4.4", authorized = {RoleType.Admin})
private Boolean forDisplay;
+ @SerializedName(ApiConstants.TYPE)
+ @Param(description = "the type of remote access vpn implementation")
+ private String type;
+
+ @SerializedName(ApiConstants.CERTIFICATE)
+ @Param(description = "the client certificate")
+ private String certificate;
+
public void setPublicIp(String publicIp) {
this.publicIp = publicIp;
}
@@ -129,4 +137,12 @@ public void setId(String id) {
public void setForDisplay(Boolean forDisplay) {
this.forDisplay = forDisplay;
}
+
+ public void setType(String type) {
+ this.type = type;
+ }
+
+ public void setCertificate(String certificate) {
+ this.certificate = certificate;
+ }
}
diff --git a/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java
index 892b5b85262d..d4961103dcbb 100644
--- a/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java
+++ b/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java
@@ -227,6 +227,10 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements
@Param(description = "Base64 string representation of the resource icon", since = "4.16.0.0")
ResourceIconResponse icon;
+ @SerializedName(ApiConstants.BOOT_FILENAME)
+ @Param(description = "The boot file name to use when pxe booting.")
+ private String bootFilename;
+
public TemplateResponse() {
tags = new LinkedHashSet<>();
}
@@ -467,4 +471,12 @@ public void setUrl(String url) {
public void setResourceIconResponse(ResourceIconResponse icon) {
this.icon = icon;
}
+
+ public String getBootFilename() {
+ return bootFilename;
+ }
+
+ public void setBootFilename(String bootFilename) {
+ this.bootFilename = bootFilename;
+ }
}
diff --git a/api/src/main/java/org/apache/cloudstack/api/response/VpcResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/VpcResponse.java
index 3b5661f8a80c..d34969bfb42b 100644
--- a/api/src/main/java/org/apache/cloudstack/api/response/VpcResponse.java
+++ b/api/src/main/java/org/apache/cloudstack/api/response/VpcResponse.java
@@ -72,6 +72,10 @@ public class VpcResponse extends BaseResponseWithAnnotations implements Controll
@Param(description = "vpc offering name the VPC is created from", since = "4.13.2")
private String vpcOfferingName;
+ @SerializedName(ApiConstants.NETWORK_BOOT_IP)
+ @Param(description = "The network boot ip of VPC")
+ private String networkBootIp;
+
@SerializedName(ApiConstants.CREATED)
@Param(description = "the date this VPC was created")
private Date created;
@@ -205,6 +209,8 @@ public void setVpcOfferingName(final String vpcOfferingName) {
this.vpcOfferingName = vpcOfferingName;
}
+ public void setNetworkBootIp(String networkBootIp) { this.networkBootIp = networkBootIp; }
+
public List getNetworks() {
return networks;
}
diff --git a/api/src/main/java/org/apache/cloudstack/pki/PkiDetail.java b/api/src/main/java/org/apache/cloudstack/pki/PkiDetail.java
new file mode 100644
index 000000000000..9c7b1a592e80
--- /dev/null
+++ b/api/src/main/java/org/apache/cloudstack/pki/PkiDetail.java
@@ -0,0 +1,74 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.pki;
+
+/**
+ * @author Khosrow Moossavi
+ * @since 4.10.0.228-cca
+ */
+public class PkiDetail {
+ private String certificate;
+ private String issuingCa;
+ private String privateKey;
+ private String privateKeyType;
+ private String serialNumber;
+
+ public PkiDetail certificate(final String certificate) {
+ this.certificate = certificate;
+ return this;
+ }
+
+ public PkiDetail issuingCa(final String issuingCa) {
+ this.issuingCa = issuingCa;
+ return this;
+ }
+
+ public PkiDetail privateKey(final String privateKey) {
+ this.privateKey = privateKey;
+ return this;
+ }
+
+ public PkiDetail privateKeyType(final String privateKeyType) {
+ this.privateKeyType = privateKeyType;
+ return this;
+ }
+
+ public PkiDetail serialNumber(final String serialNumber) {
+ this.serialNumber = serialNumber;
+ return this;
+ }
+
+ public String getCertificate() {
+ return certificate;
+ }
+
+ public String getIssuingCa() {
+ return issuingCa;
+ }
+
+ public String getPrivateKey() {
+ return privateKey;
+ }
+
+ public String getPrivateKeyType() {
+ return privateKeyType;
+ }
+
+ public String getSerialNumber() {
+ return serialNumber;
+ }
+}
\ No newline at end of file
diff --git a/api/src/main/java/org/apache/cloudstack/pki/PkiManager.java b/api/src/main/java/org/apache/cloudstack/pki/PkiManager.java
new file mode 100644
index 000000000000..b3686ba7dd78
--- /dev/null
+++ b/api/src/main/java/org/apache/cloudstack/pki/PkiManager.java
@@ -0,0 +1,55 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.pki;
+
+import com.cloud.domain.Domain;
+import com.cloud.exception.RemoteAccessVpnException;
+import com.cloud.utils.net.Ip;
+
+/**
+ * @author Khosrow Moossavi
+ * @since 4.10.0.228-cca
+ */
+public interface PkiManager {
+ String CREDENTIAL_ISSUING_CA = "credential.issuing.ca";
+ String CREDENTIAL_SERIAL_NUMBER = "credential.serial.number";
+ String CREDENTIAL_CERTIFICATE = "credential.certificate";
+ String CREDENTIAL_PRIVATE_KEY = "credential.private.key";
+
+ /**
+ * Issue a Certificate for specific IP and specific Domain act as the CA
+ *
+ * @param domain object to extract name and id to be used to issuing CA
+ * @param publicIp to be included in the certificate
+ *
+ * @return detail about just signed PKI, including issuing CA, certificate, private key and serial number
+ *
+ * @throws RemoteAccessVpnException
+ */
+ PkiDetail issueCertificate(Domain domain, Ip publicIp) throws RemoteAccessVpnException;
+
+ /**
+ * Get a Certificate for specific Domain act as the CA
+ *
+ * @param domain object to extract its id to be find the issuing CA
+ *
+ * @return details about signed PKI, including issuing CA, certificate and serial number
+ *
+ * @throws RemoteAccessVpnException
+ */
+ PkiDetail getCertificate(Domain domain) throws RemoteAccessVpnException;
+}
diff --git a/core/src/main/java/com/cloud/agent/api/baremetal/DestroyCommand.java b/core/src/main/java/com/cloud/agent/api/baremetal/DestroyCommand.java
new file mode 100644
index 000000000000..6440b6f9f844
--- /dev/null
+++ b/core/src/main/java/com/cloud/agent/api/baremetal/DestroyCommand.java
@@ -0,0 +1,45 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+package com.cloud.agent.api.baremetal;
+
+import com.cloud.agent.api.Command;
+import com.cloud.agent.api.to.VirtualMachineTO;
+
+public class DestroyCommand extends Command {
+
+ VirtualMachineTO vm;
+ boolean executeInSequence;
+
+ public DestroyCommand(VirtualMachineTO vm, boolean executeInSequence) {
+ this.vm = vm;
+ this.executeInSequence = executeInSequence;
+ }
+
+ @Override
+ public boolean executeInSequence() {
+
+ if (vm.getName() != null && vm.getName().startsWith("r-")) {
+ return false;
+ }
+ return executeInSequence;
+ }
+
+ public VirtualMachineTO getVm() {
+ return vm;
+ }
+}
diff --git a/core/src/main/java/com/cloud/agent/api/routing/DhcpEntryCommand.java b/core/src/main/java/com/cloud/agent/api/routing/DhcpEntryCommand.java
index 7fb65fe15cf9..06aab4a6e14c 100644
--- a/core/src/main/java/com/cloud/agent/api/routing/DhcpEntryCommand.java
+++ b/core/src/main/java/com/cloud/agent/api/routing/DhcpEntryCommand.java
@@ -37,6 +37,9 @@ public class DhcpEntryCommand extends NetworkElementCommand {
boolean executeInSequence = false;
boolean remove;
+ private String bootFilename;
+ private String networkBootIp;
+
public boolean isRemove() {
return remove;
}
@@ -152,4 +155,20 @@ public boolean isDefault() {
public void setDefault(boolean isDefault) {
this.isDefault = isDefault;
}
+
+ public String getBootFilename() {
+ return bootFilename;
+ }
+
+ public void setBootFilename(String bootFilename) {
+ this.bootFilename = bootFilename;
+ }
+
+ public String getNetworkBootIp() {
+ return networkBootIp;
+ }
+
+ public void setNetworkBootIp(String networkBootIp) {
+ this.networkBootIp = networkBootIp;
+ }
}
diff --git a/core/src/main/java/com/cloud/agent/api/routing/RemoteAccessVpnCfgCommand.java b/core/src/main/java/com/cloud/agent/api/routing/RemoteAccessVpnCfgCommand.java
index c7dabe5b14d8..cb85597fbd1b 100644
--- a/core/src/main/java/com/cloud/agent/api/routing/RemoteAccessVpnCfgCommand.java
+++ b/core/src/main/java/com/cloud/agent/api/routing/RemoteAccessVpnCfgCommand.java
@@ -30,6 +30,12 @@ public class RemoteAccessVpnCfgCommand extends NetworkElementCommand {
private String localCidr;
private String publicInterface;
+ // items related to VPN IKEv2 implementation
+ private String vpnType;
+ private String caCert;
+ private String serverCert;
+ private String serverKey;
+
protected RemoteAccessVpnCfgCommand() {
this.create = false;
}
@@ -43,7 +49,7 @@ public boolean executeInSequence() {
return true;
}
- public RemoteAccessVpnCfgCommand(boolean create, String vpnServerAddress, String localIp, String ipRange, String ipsecPresharedKey, boolean vpcEnabled) {
+ public RemoteAccessVpnCfgCommand(boolean create, String vpnServerAddress, String localIp, String ipRange, String ipsecPresharedKey, boolean vpcEnabled, String vpnType, String caCert, String serverCert, String serverKey) {
this.vpnServerIp = vpnServerAddress;
this.ipRange = ipRange;
this.presharedKey = ipsecPresharedKey;
@@ -55,6 +61,10 @@ public RemoteAccessVpnCfgCommand(boolean create, String vpnServerAddress, String
} else {
this.setPublicInterface("eth2");
}
+ this.vpnType = vpnType;
+ this.caCert = caCert;
+ this.serverCert = serverCert;
+ this.serverKey = serverKey;
}
public String getVpnServerIp() {
@@ -109,4 +119,36 @@ public void setPublicInterface(String publicInterface) {
this.publicInterface = publicInterface;
}
+ public String getVpnType() {
+ return vpnType;
+ }
+
+ public void setVpnType(String vpnType) {
+ this.vpnType = vpnType;
+ }
+
+ public String getCaCert() {
+ return caCert;
+ }
+
+ public void setCaCert(String caCert) {
+ this.caCert = caCert;
+ }
+
+ public String getServerCert() {
+ return serverCert;
+ }
+
+ public void setServerCert(String serverCert) {
+ this.serverCert = serverCert;
+ }
+
+ public String getServerKey() {
+ return serverKey;
+ }
+
+ public void setServerKey(String serverKey) {
+ this.serverKey = serverKey;
+ }
+
}
diff --git a/core/src/main/java/com/cloud/agent/api/routing/VpnUsersCfgCommand.java b/core/src/main/java/com/cloud/agent/api/routing/VpnUsersCfgCommand.java
index 3510d14fad52..7ca4bd72ace3 100644
--- a/core/src/main/java/com/cloud/agent/api/routing/VpnUsersCfgCommand.java
+++ b/core/src/main/java/com/cloud/agent/api/routing/VpnUsersCfgCommand.java
@@ -79,12 +79,13 @@ public String getUsernamePassword() {
}
UsernamePassword[] userpwds;
+ private String vpnType;
protected VpnUsersCfgCommand() {
}
- public VpnUsersCfgCommand(List addUsers, List removeUsers) {
+ public VpnUsersCfgCommand(List addUsers, List removeUsers, String vpnType) {
userpwds = new UsernamePassword[addUsers.size() + removeUsers.size()];
int i = 0;
for (VpnUser vpnUser : removeUsers) {
@@ -93,6 +94,8 @@ public VpnUsersCfgCommand(List addUsers, List removeUsers) {
for (VpnUser vpnUser : addUsers) {
userpwds[i++] = new UsernamePassword(vpnUser.getUsername(), vpnUser.getPassword(), true);
}
+
+ this.vpnType = vpnType;
}
@Override
@@ -104,4 +107,7 @@ public UsernamePassword[] getUserpwds() {
return userpwds;
}
+ public String getVpnType() {
+ return vpnType;
+ }
}
diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/DhcpEntryConfigItem.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/DhcpEntryConfigItem.java
index 0710ecc1dcd5..01f86790824b 100644
--- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/DhcpEntryConfigItem.java
+++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/DhcpEntryConfigItem.java
@@ -37,6 +37,9 @@ public List generateConfig(final NetworkElementCommand cmd) {
final VmDhcpConfig vmDhcpConfig = new VmDhcpConfig(command.getVmName(), command.getVmMac(), command.getVmIpAddress(), command.getVmIp6Address(), command.getDuid(), command.getDefaultDns(),
command.getDefaultRouter(), command.getStaticRoutes(), command.isDefault(), command.isRemove());
+ vmDhcpConfig.setBootFilename(command.getBootFilename());
+ vmDhcpConfig.setNetworkBootIp(command.getNetworkBootIp());
+
return generateConfigItems(vmDhcpConfig);
}
diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/RemoteAccessVpnConfigItem.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/RemoteAccessVpnConfigItem.java
index be51c30745b0..3586eecdbb95 100644
--- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/RemoteAccessVpnConfigItem.java
+++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/RemoteAccessVpnConfigItem.java
@@ -32,10 +32,21 @@ public class RemoteAccessVpnConfigItem extends AbstractConfigItemFacade {
@Override
public List generateConfig(final NetworkElementCommand cmd) {
- final RemoteAccessVpnCfgCommand command = (RemoteAccessVpnCfgCommand) cmd;
+ final RemoteAccessVpnCfgCommand command = (RemoteAccessVpnCfgCommand)cmd;
+
+ final RemoteAccessVpn remoteAccessVpn = new RemoteAccessVpn(
+ command.isCreate(),
+ command.getIpRange(),
+ command.getPresharedKey(),
+ command.getVpnServerIp(),
+ command.getLocalIp(),
+ command.getLocalCidr(),
+ command.getPublicInterface(),
+ command.getVpnType(),
+ command.getCaCert(),
+ command.getServerCert(),
+ command.getServerKey());
- final RemoteAccessVpn remoteAccessVpn = new RemoteAccessVpn(command.isCreate(), command.getIpRange(), command.getPresharedKey(), command.getVpnServerIp(), command.getLocalIp(), command.getLocalCidr(),
- command.getPublicInterface());
return generateConfigItems(remoteAccessVpn);
}
diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/VpnUsersConfigItem.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/VpnUsersConfigItem.java
index c98a93e2d3d0..2dd87c6c1810 100644
--- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/VpnUsersConfigItem.java
+++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/VpnUsersConfigItem.java
@@ -41,7 +41,7 @@ public List generateConfig(final NetworkElementCommand cmd) {
vpnUsers.add(new VpnUser(userpwd.getUsername(), userpwd.getPassword(), userpwd.isAdd()));
}
- final VpnUserList vpnUserList = new VpnUserList(vpnUsers);
+ final VpnUserList vpnUserList = new VpnUserList(vpnUsers, command.getVpnType());
return generateConfigItems(vpnUserList);
}
diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/RemoteAccessVpn.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/RemoteAccessVpn.java
index 5b5c05bf7fd7..e025b915814c 100644
--- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/RemoteAccessVpn.java
+++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/RemoteAccessVpn.java
@@ -24,11 +24,17 @@ public class RemoteAccessVpn extends ConfigBase {
public boolean create;
public String ipRange, presharedKey, vpnServerIp, localIp, localCidr, publicInterface;
+ // items related to VPN IKEv2 implementation
+ private String vpnType;
+ private String caCert;
+ private String serverCert;
+ private String serverKey;
+
public RemoteAccessVpn() {
super(ConfigBase.REMOTEACCESSVPN);
}
- public RemoteAccessVpn(boolean create, String ipRange, String presharedKey, String vpnServerIp, String localIp, String localCidr, String publicInterface) {
+ public RemoteAccessVpn(boolean create, String ipRange, String presharedKey, String vpnServerIp, String localIp, String localCidr, String publicInterface, String vpnType, String caCert, String serverCert, String serverKey) {
super(ConfigBase.REMOTEACCESSVPN);
this.create = create;
this.ipRange = ipRange;
@@ -37,6 +43,10 @@ public RemoteAccessVpn(boolean create, String ipRange, String presharedKey, Stri
this.localIp = localIp;
this.localCidr = localCidr;
this.publicInterface = publicInterface;
+ this.vpnType = vpnType;
+ this.caCert = caCert;
+ this.serverCert = serverCert;
+ this.serverKey = serverKey;
}
public boolean isCreate() {
@@ -95,4 +105,36 @@ public void setPublicInterface(String publicInterface) {
this.publicInterface = publicInterface;
}
+ public String getVpnType() {
+ return vpnType;
+ }
+
+ public void setVpnType(String vpnType) {
+ this.vpnType = vpnType;
+ }
+
+ public String getCaCert() {
+ return caCert;
+ }
+
+ public void setCaCert(String caCert) {
+ this.caCert = caCert;
+ }
+
+ public String getServerCert() {
+ return serverCert;
+ }
+
+ public void setServerCert(String serverCert) {
+ this.serverCert = serverCert;
+ }
+
+ public String getServerKey() {
+ return serverKey;
+ }
+
+ public void setServerKey(String serverKey) {
+ this.serverKey = serverKey;
+ }
+
}
diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/VmDhcpConfig.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/VmDhcpConfig.java
index d9cb8b0b2645..f4f13048a570 100644
--- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/VmDhcpConfig.java
+++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/VmDhcpConfig.java
@@ -30,6 +30,9 @@ public class VmDhcpConfig extends ConfigBase {
private String staticRoutes;
private boolean defaultEntry;
+ private String bootFilename;
+ private String networkBootIp;
+
// Indicate if the entry should be removed when set to true
private boolean remove;
@@ -132,4 +135,19 @@ public void setDefaultEntry(boolean defaultEntry) {
this.defaultEntry = defaultEntry;
}
+ public String getBootFilename() {
+ return bootFilename;
+ }
+
+ public void setBootFilename(String bootFilename) {
+ this.bootFilename = bootFilename;
+ }
+
+ public String getNetworkBootIp() {
+ return networkBootIp;
+ }
+
+ public void setNetworkBootIp(String networkBootIp) {
+ this.networkBootIp = networkBootIp;
+ }
}
diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/VpnUserList.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/VpnUserList.java
index 115fcc9bd1ef..b3e5c0e5df47 100644
--- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/VpnUserList.java
+++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/VpnUserList.java
@@ -23,14 +23,16 @@
public class VpnUserList extends ConfigBase {
private List vpnUsers;
+ private String vpnType;
public VpnUserList() {
super(ConfigBase.VPN_USER_LIST);
}
- public VpnUserList(List vpnUsers) {
+ public VpnUserList(List vpnUsers, String vpnType) {
super(ConfigBase.VPN_USER_LIST);
this.vpnUsers = vpnUsers;
+ this.vpnType = vpnType;
}
public List getVpnUsers() {
@@ -41,4 +43,11 @@ public void setVpnUsers(List vpnUsers) {
this.vpnUsers = vpnUsers;
}
+ public String getVpnType() {
+ return vpnType;
+ }
+
+ public void setVpnType(String vpnType) {
+ this.vpnType = vpnType;
+ }
}
diff --git a/core/src/main/java/com/cloud/network/HAProxyConfigurator.java b/core/src/main/java/com/cloud/network/HAProxyConfigurator.java
index 98eeef8d3aad..5632b5730d9c 100644
--- a/core/src/main/java/com/cloud/network/HAProxyConfigurator.java
+++ b/core/src/main/java/com/cloud/network/HAProxyConfigurator.java
@@ -511,6 +511,9 @@ private List getRulesForPool(final LoadBalancerTO lbTO, final boolean ke
if(lbTO.getLbProtocol() != null && lbTO.getLbProtocol().equals("tcp-proxy")) {
sb.append(" send-proxy");
}
+ else if(lbTO.getLbProtocol() != null && lbTO.getLbProtocol().equals("tcp-proxy-v2")) {
+ sb.append(" send-proxy-v2");
+ }
dstSubRule.add(sb.toString());
if (stickinessSubRule != null) {
sb.append(" cookie ").append(dest.getDestIp().replace(".", "_")).append('-').append(dest.getDestPort()).toString();
diff --git a/core/src/main/java/com/cloud/storage/template/SwiftVolumeDownloader.java b/core/src/main/java/com/cloud/storage/template/SwiftVolumeDownloader.java
new file mode 100644
index 000000000000..01d3be5b99cd
--- /dev/null
+++ b/core/src/main/java/com/cloud/storage/template/SwiftVolumeDownloader.java
@@ -0,0 +1,396 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.storage.template;
+
+import com.cloud.agent.api.to.SwiftTO;
+import org.apache.cloudstack.managed.context.ManagedContextRunnable;
+import org.apache.cloudstack.storage.command.DownloadCommand;
+import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType;
+import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.http.Header;
+import org.apache.http.HttpEntityEnclosingRequest;
+import org.apache.http.HttpRequest;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.HttpRequestRetryHandler;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.client.protocol.HttpClientContext;
+import org.apache.http.conn.ConnectTimeoutException;
+import org.apache.http.conn.ssl.NoopHostnameVerifier;
+import org.apache.http.conn.ssl.TrustStrategy;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClients;
+import org.apache.http.protocol.HttpContext;
+import org.apache.http.ssl.SSLContextBuilder;
+import org.apache.log4j.Logger;
+
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.SSLException;
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InterruptedIOException;
+import java.net.UnknownHostException;
+import java.security.KeyManagementException;
+import java.security.KeyStoreException;
+import java.security.NoSuchAlgorithmException;
+import java.util.Date;
+
+/**
+ * Download a volume file using HTTP(S)
+ *
+ * This class, once instantiated, has the purpose to download a Volume to staging nfs or cache when using as Swift Image Store.
+ *
+ * Execution of the instance is started when runInContext() is called.
+ */
+public class SwiftVolumeDownloader extends ManagedContextRunnable implements TemplateDownloader {
+ private static final Logger LOGGER = Logger.getLogger(SwiftVolumeDownloader.class.getName());
+ private static final int DOWNLOAD_BUFFER_SIZE_BYTES = 1024* 1024;
+
+ private final String downloadUrl;
+ private final String fileName;
+ private final String fileExtension;
+ private final long volumeId;
+ private final CloseableHttpClient httpClient;
+ private final HttpGet httpGet;
+ private final DownloadCompleteCallback downloadCompleteCallback;
+ private final SwiftTO swiftTO;
+ private String errorString = "";
+ private Status status = Status.NOT_STARTED;
+ private final ResourceType resourceType = ResourceType.VOLUME;
+ private long remoteSize;
+ private String md5sum;
+ private long downloadTime;
+ private long totalBytes;
+ private final long maxVolumeSizeInBytes;
+ private final String installPathPrefix;
+ private final String installPath;
+ private File volumeFile;
+ private boolean resume = false;
+
+ public SwiftVolumeDownloader(DownloadCommand cmd, DownloadCompleteCallback downloadCompleteCallback, long maxVolumeSizeInBytes, String installPathPrefix) {
+ this.downloadUrl = cmd.getUrl();
+ this.swiftTO = (SwiftTO) cmd.getDataStore();
+ this.maxVolumeSizeInBytes = maxVolumeSizeInBytes;
+ this.httpClient = initializeHttpClient();
+ this.downloadCompleteCallback = downloadCompleteCallback;
+ this.fileName = cmd.getName();
+ this.fileExtension = cmd.getFormat().getFileExtension();
+ this.volumeId = cmd.getId();
+ this.installPathPrefix = installPathPrefix;
+ this.installPath = cmd.getInstallPath();
+ this.httpGet = new HttpGet(downloadUrl);
+ }
+
+ private CloseableHttpClient initializeHttpClient(){
+
+ CloseableHttpClient client = null;
+ try {
+ //trust all certs
+ SSLContext sslContext = new SSLContextBuilder()
+ .loadTrustMaterial(null, (TrustStrategy) (chain, authType) -> true)
+ .build();
+ client = HttpClients.custom().setSSLContext(sslContext)
+ .setSSLHostnameVerifier(new NoopHostnameVerifier())
+ .setRetryHandler(buildRetryHandler(5))
+ .build();
+ } catch (NoSuchAlgorithmException e) {
+ e.printStackTrace();
+ } catch (KeyManagementException e) {
+ e.printStackTrace();
+ } catch (KeyStoreException e) {
+ e.printStackTrace();
+ }
+
+ return client;
+ }
+
+ private HttpRequestRetryHandler buildRetryHandler(int retryCount){
+
+ HttpRequestRetryHandler customRetryHandler = new HttpRequestRetryHandler() {
+ @Override
+ public boolean retryRequest(
+ IOException exception,
+ int executionCount,
+ HttpContext context) {
+ if (executionCount >= retryCount) {
+ // Do not retry if over max retry count
+ return false;
+ }
+ if (exception instanceof InterruptedIOException) {
+ // Timeout
+ return false;
+ }
+ if (exception instanceof UnknownHostException) {
+ // Unknown host
+ return false;
+ }
+ if (exception instanceof ConnectTimeoutException) {
+ // Connection refused
+ return false;
+ }
+ if (exception instanceof SSLException) {
+ // SSL handshake exception
+ return false;
+ }
+ HttpClientContext clientContext = HttpClientContext.adapt(context);
+ HttpRequest request = clientContext.getRequest();
+ boolean idempotent = !(request instanceof HttpEntityEnclosingRequest);
+ if (idempotent) {
+ // Retry if the request is considered idempotent
+ return true;
+ }
+ return false;
+ }
+
+ };
+ return customRetryHandler;
+ }
+
+ @Override
+ public long download(boolean resume, DownloadCompleteCallback callback) {
+ if (!status.equals(Status.NOT_STARTED)) {
+ // Only start downloading if we haven't started yet.
+ LOGGER.info("Volume download is already started, not starting again. Volume: " + downloadUrl);
+ return 0;
+ }
+
+ HttpResponse response = null;
+ try {
+ response = httpClient.execute(httpGet);
+ } catch (IOException e) {
+ e.printStackTrace();
+ errorString = "Exception while executing HttpMethod " + httpGet.getMethod() + " on URL " + downloadUrl + " "
+ + response.getStatusLine().getStatusCode() + " " + response.getStatusLine().getReasonPhrase();
+ LOGGER.error(errorString);
+ status = Status.UNRECOVERABLE_ERROR;
+ return 0;
+ }
+
+ // Headers
+ long contentLength = response.getEntity().getContentLength();
+ Header contentType = response.getEntity().getContentType();
+
+ // Check the contentLengthHeader and transferEncodingHeader.
+ if (contentLength <= 0) {
+ errorString = "The Content Length of " + downloadUrl + " is <= 0 and content Type is "+contentType.toString();
+ LOGGER.error(errorString);
+ status = Status.UNRECOVERABLE_ERROR;
+ return 0;
+ } else {
+ // The ContentLengthHeader is supplied, parse it's value.
+ remoteSize = contentLength;
+ }
+
+ if (remoteSize > maxVolumeSizeInBytes) {
+ errorString = "Remote size is too large for volume " + downloadUrl + " remote size is " + remoteSize + " max allowed is " + maxVolumeSizeInBytes;
+ LOGGER.error(errorString);
+ status = Status.UNRECOVERABLE_ERROR;
+ return 0;
+ }
+
+ InputStream inputStream;
+ try {
+ inputStream = new BufferedInputStream(response.getEntity().getContent());
+ } catch (IOException e) {
+ errorString = "Exception occurred while opening InputStream for volume from " + downloadUrl;
+ LOGGER.error(errorString);
+ status = Status.UNRECOVERABLE_ERROR;
+ return 0;
+ }
+
+ String filePath = installPathPrefix + File.separator + installPath;
+ File directory = new File(filePath);
+ File srcFile = new File(filePath + File.separator + fileName);
+ try {
+ if (!directory.exists()) {
+ LOGGER.info("Creating directories "+filePath);
+ directory.mkdirs();
+ }
+ if (!srcFile.createNewFile()) {
+ LOGGER.info("Reusing existing file " + srcFile.getPath());
+ }
+ } catch (IOException e) {
+ errorString = "Exception occurred while creating temp file " + srcFile.getPath();
+ LOGGER.error(errorString);
+ status = Status.UNRECOVERABLE_ERROR;
+ return 0;
+ }
+
+ LOGGER.info("Starting download from " + downloadUrl + " to staging with size " + remoteSize + " bytes to "+filePath);
+ final Date downloadStart = new Date();
+
+ try (FileOutputStream fileOutputStream = new FileOutputStream(srcFile);) {
+ BufferedOutputStream outputStream = new BufferedOutputStream(fileOutputStream,DOWNLOAD_BUFFER_SIZE_BYTES);
+ byte[] data = new byte[DOWNLOAD_BUFFER_SIZE_BYTES];
+ int bufferLength = 0;
+ while((bufferLength = inputStream.read(data,0,DOWNLOAD_BUFFER_SIZE_BYTES)) >= 0){
+ totalBytes += bufferLength;
+ outputStream.write(data,0,bufferLength);
+ status = Status.IN_PROGRESS;
+ LOGGER.trace("Download in progress: " + getDownloadPercent() + "%");
+ if(totalBytes >= remoteSize){
+ volumeFile = srcFile;
+ status = Status.DOWNLOAD_FINISHED;
+ }
+ }
+ outputStream.close();
+ inputStream.close();
+ } catch (IOException e) {
+ LOGGER.error("Exception when downloading from url to staging nfs:" + e.getMessage(), e);
+ status = Status.RECOVERABLE_ERROR;
+ return 0;
+ }
+
+ downloadTime = new Date().getTime() - downloadStart.getTime();
+
+ try (FileInputStream fs = new FileInputStream(srcFile)) {
+ md5sum = DigestUtils.md5Hex(fs);
+ } catch (IOException e) {
+ LOGGER.error("Failed to get md5sum: " + srcFile.getAbsoluteFile());
+ }
+
+ if (status == Status.DOWNLOAD_FINISHED) {
+ LOGGER.info("Template download from " + downloadUrl + " to staging nfs, transferred " + totalBytes + " in " + (downloadTime / 1000) + " seconds, completed successfully!");
+ } else {
+ LOGGER.error("Template download from " + downloadUrl + " to staging nfs, transferred " + totalBytes + " in " + (downloadTime / 1000) + " seconds, completed with status " + status.toString());
+ }
+
+ // Close http connection
+ httpGet.releaseConnection();
+
+ // Call the callback!
+ if (callback != null) {
+ callback.downloadComplete(status);
+ }
+
+ return totalBytes;
+ }
+
+ public String getDownloadUrl() {
+ return httpGet.getURI().toString();
+ }
+
+ @Override
+ public Status getStatus() {
+ return status;
+ }
+
+ @Override
+ public long getDownloadTime() {
+ return downloadTime;
+ }
+
+ @Override
+ public long getDownloadedBytes() {
+ return totalBytes;
+ }
+
+ @Override
+ public boolean stopDownload() {
+ switch (status) {
+ case IN_PROGRESS:
+ if (httpGet != null) {
+ httpGet.abort();
+ }
+ break;
+ case UNKNOWN:
+ case NOT_STARTED:
+ case RECOVERABLE_ERROR:
+ case UNRECOVERABLE_ERROR:
+ case ABORTED:
+ case DOWNLOAD_FINISHED:
+ // Remove the object if it already has been uploaded.
+ // SwiftUtil.deleteObject(swiftTO, swiftPath);
+ break;
+ default:
+ break;
+ }
+
+ status = Status.ABORTED;
+ return true;
+ }
+
+ @Override
+ public int getDownloadPercent() {
+ if (remoteSize == 0) {
+ return 0;
+ }
+
+ return (int) (100.0 * totalBytes / remoteSize);
+ }
+
+ @Override
+ protected void runInContext() {
+ LOGGER.info("Starting download in managed context resume = " + resume + " callback = " + downloadCompleteCallback.toString());
+ download(resume, downloadCompleteCallback);
+ }
+
+ @Override
+ public void setStatus(Status status) {
+ this.status = status;
+ }
+
+ public boolean isResume() {
+ return resume;
+ }
+
+ @Override
+ public String getDownloadError() {
+ return errorString;
+ }
+
+ @Override
+ public String getDownloadLocalPath() {
+ return installPath;
+ }
+
+ @Override
+ public void setResume(boolean resume) {
+ this.resume = resume;
+ }
+
+ @Override
+ public void setDownloadError(String error) {
+ errorString = error;
+ }
+
+ @Override
+ public boolean isInited() {
+ return true;
+ }
+
+ public ResourceType getResourceType() {
+ return resourceType;
+ }
+
+ public String getFileExtension() {
+ return fileExtension;
+ }
+
+ public String getMd5sum() { return md5sum; }
+
+ public File getVolumeFile() { return volumeFile; }
+
+ public long getMaxTemplateSizeInBytes() {return maxVolumeSizeInBytes;}
+}
\ No newline at end of file
diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java
index b184a74312b5..ccd0ee15a6a9 100644
--- a/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java
+++ b/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java
@@ -37,6 +37,7 @@ public class TemplateObjectTO implements DataTO {
private long accountId;
private String checksum;
private boolean hvm;
+ private String bootFilename;
private String displayText;
private DataStoreTO imageDataStore;
private String name;
@@ -61,6 +62,7 @@ public TemplateObjectTO(VirtualMachineTemplate template) {
this.displayText = template.getDisplayText();
this.checksum = template.getChecksum();
this.hvm = template.isRequiresHvm();
+ this.bootFilename = template.getBootFilename();
this.accountId = template.getAccountId();
this.name = template.getUniqueName();
this.format = template.getFormat();
@@ -75,6 +77,7 @@ public TemplateObjectTO(TemplateInfo template) {
this.displayText = template.getDisplayText();
this.checksum = template.getChecksum();
this.hvm = template.isRequiresHvm();
+ this.bootFilename = template.getBootFilename();
this.accountId = template.getAccountId();
this.name = template.getUniqueName();
this.format = template.getFormat();
@@ -126,6 +129,14 @@ public void setRequiresHvm(boolean hvm) {
this.hvm = hvm;
}
+ public String getBootFilename() {
+ return bootFilename;
+ }
+
+ public void setBootFilename(String bootFilename) {
+ this.bootFilename = bootFilename;
+ }
+
public String getDescription() {
return displayText;
}
diff --git a/core/src/test/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResourceTest.java b/core/src/test/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResourceTest.java
index 6eb30aeeed9e..2b7a0be8b7b8 100644
--- a/core/src/test/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResourceTest.java
+++ b/core/src/test/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResourceTest.java
@@ -545,21 +545,21 @@ public void testRemoteAccessVpnCfgCommand() {
}
protected RemoteAccessVpnCfgCommand generateRemoteAccessVpnCfgCommand1() {
- final RemoteAccessVpnCfgCommand cmd = new RemoteAccessVpnCfgCommand(true, "124.10.10.10", "10.10.1.1", "10.10.1.10-10.10.1.20", "sharedkey", false);
+ final RemoteAccessVpnCfgCommand cmd = new RemoteAccessVpnCfgCommand(true, "124.10.10.10", "10.10.1.1", "10.10.1.10-10.10.1.20", "sharedkey", false, null, null, null, null);
cmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, ROUTERNAME);
cmd.setLocalCidr("10.1.1.1/24");
return cmd;
}
protected RemoteAccessVpnCfgCommand generateRemoteAccessVpnCfgCommand2() {
- final RemoteAccessVpnCfgCommand cmd = new RemoteAccessVpnCfgCommand(false, "124.10.10.10", "10.10.1.1", "10.10.1.10-10.10.1.20", "sharedkey", false);
+ final RemoteAccessVpnCfgCommand cmd = new RemoteAccessVpnCfgCommand(false, "124.10.10.10", "10.10.1.1", "10.10.1.10-10.10.1.20", "sharedkey", false, null, null, null, null);
cmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, ROUTERNAME);
cmd.setLocalCidr("10.1.1.1/24");
return cmd;
}
protected RemoteAccessVpnCfgCommand generateRemoteAccessVpnCfgCommand3() {
- final RemoteAccessVpnCfgCommand cmd = new RemoteAccessVpnCfgCommand(true, "124.10.10.10", "10.10.1.1", "10.10.1.10-10.10.1.20", "sharedkey", true);
+ final RemoteAccessVpnCfgCommand cmd = new RemoteAccessVpnCfgCommand(true, "124.10.10.10", "10.10.1.1", "10.10.1.10-10.10.1.20", "sharedkey", true, null, null, null, null);
cmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, ROUTERNAME);
cmd.setLocalCidr("10.1.1.1/24");
return cmd;
diff --git a/core/src/test/java/com/cloud/network/HAProxyConfiguratorTest.java b/core/src/test/java/com/cloud/network/HAProxyConfiguratorTest.java
index 2a282cbeca8b..f6fb740aac51 100644
--- a/core/src/test/java/com/cloud/network/HAProxyConfiguratorTest.java
+++ b/core/src/test/java/com/cloud/network/HAProxyConfiguratorTest.java
@@ -21,6 +21,9 @@
import static org.junit.Assert.assertTrue;
+import java.util.ArrayList;
+import java.util.List;
+
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
@@ -32,9 +35,6 @@
import com.cloud.agent.api.to.LoadBalancerTO;
import com.cloud.network.lb.LoadBalancingRule.LbDestination;
-import java.util.List;
-import java.util.ArrayList;
-
/**
* @author dhoogland
*
diff --git a/deps/install-non-oss.sh b/deps/install-non-oss.sh
index c6b91e07cec4..af96c33b3349 100755
--- a/deps/install-non-oss.sh
+++ b/deps/install-non-oss.sh
@@ -24,6 +24,11 @@ mvn install:install-file -Dfile=cloud-iControl.jar -DgroupId=com.cloud.com.
# Version: unknown
mvn install:install-file -Dfile=cloud-netscaler-sdx.jar -DgroupId=com.cloud.com.citrix -DartifactId=netscaler-sdx -Dversion=1.0 -Dpackaging=jar
+# From http://support.netapp.com/ (not available online, contact your support representative)
+# Version: 4.0
+if [ -e cloud-manageontap.jar ]; then mv cloud-manageontap.jar manageontap.jar; fi
+mvn install:install-file -Dfile=manageontap.jar -DgroupId=com.cloud.com.netapp -DartifactId=manageontap -Dversion=4.0 -Dpackaging=jar
+
# From https://my.vmware.com/group/vmware/get-download?downloadGroup=VSP510-WEBSDK-510
# Version: 5.1, Release-date: 2012-09-10, Build: 774886
mvn install:install-file -Dfile=vim25_51.jar -DgroupId=com.cloud.com.vmware -DartifactId=vmware-vim25 -Dversion=5.1 -Dpackaging=jar
@@ -45,3 +50,6 @@ mvn install:install-file -Dfile=pbm_65.jar -DgroupId=com.cloud.com.vmware -Darti
# From https://my.vmware.com/group/vmware/downloads/get-download?downloadGroup=VS-MGMT-SDK67
mvn install:install-file -Dfile=pbm_67.jar -DgroupId=com.cloud.com.vmware -DartifactId=vmware-pbm -Dversion=6.7 -Dpackaging=jar
+
+# From https://github.com/Juniper/netconf-java/releases
+mvn install:install-file -Dfile=Netconf.jar -DgroupId=net.juniper.netconf -DartifactId=netconf-juniper -Dversion=1.0 -Dpackaging=jar
diff --git a/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java b/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java
index 61489e5f7c89..2cdd2469aed0 100644
--- a/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java
+++ b/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java
@@ -182,6 +182,9 @@ void transferPortableIP(long ipAddrId, long currentNetworkId, long newNetworkId)
PublicIp assignDedicateIpAddress(Account owner, Long guestNtwkId, Long vpcId, long dcId, boolean isSourceNat)
throws ConcurrentOperationException, InsufficientAddressCapacityException;
+ PublicIp assignDedicateIpAddress(Account owner, Long guestNtwkId, Long vpcId, long dcId, boolean isSourceNat, String ignoreIp)
+ throws ConcurrentOperationException, InsufficientAddressCapacityException;
+
IpAddress allocateIp(Account ipOwner, boolean isSystem, Account caller, long callerId, DataCenter zone, Boolean displayIp, String ipaddress)
throws ConcurrentOperationException, ResourceAllocationException, InsufficientAddressCapacityException;
diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java
index 40592bea86b8..aa58ce359a27 100644
--- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java
+++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java
@@ -288,7 +288,9 @@ public int registerForInitialConnects(final StartupCommandProcessor creator, fin
@Override
public void unregisterForHostEvents(final int id) {
s_logger.debug("Deregistering " + id);
- _hostMonitors.remove(id);
+ synchronized (_hostMonitors) {
+ _hostMonitors.remove(id);
+ }
}
private AgentControlAnswer handleControlCommand(final AgentAttache attache, final AgentControlCommand cmd) {
@@ -537,55 +539,60 @@ public void removeAgent(final AgentAttache attache, final Status nextState) {
if (removed != null) {
removed.disconnect(nextState);
}
-
- for (final Pair monitor : _hostMonitors) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Sending Disconnect to listener: " + monitor.second().getClass().getName());
+ synchronized (_hostMonitors) {
+ for (final Pair monitor : _hostMonitors) {
+ if (s_logger.isDebugEnabled()) {
+ s_logger.debug("Sending Disconnect to listener: " + monitor.second().getClass().getName());
+ }
+ monitor.second().processDisconnect(hostId, nextState);
}
- monitor.second().processDisconnect(hostId, nextState);
}
}
@Override
public void notifyMonitorsOfNewlyAddedHost(long hostId) {
- for (final Pair monitor : _hostMonitors) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Sending host added to listener: " + monitor.second().getClass().getSimpleName());
- }
+ synchronized (_hostMonitors) {
+ for (final Pair monitor : _hostMonitors) {
+ if (s_logger.isDebugEnabled()) {
+ s_logger.debug("Sending host added to listener: " + monitor.second().getClass().getSimpleName());
+ }
- monitor.second().processHostAdded(hostId);
+ monitor.second().processHostAdded(hostId);
+ }
}
}
protected AgentAttache notifyMonitorsOfConnection(final AgentAttache attache, final StartupCommand[] cmd, final boolean forRebalance) throws ConnectionException {
final long hostId = attache.getId();
final HostVO host = _hostDao.findById(hostId);
- for (final Pair monitor : _hostMonitors) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Sending Connect to listener: " + monitor.second().getClass().getSimpleName());
- }
- for (int i = 0; i < cmd.length; i++) {
- try {
- monitor.second().processConnect(host, cmd[i], forRebalance);
- } catch (final Exception e) {
- if (e instanceof ConnectionException) {
- final ConnectionException ce = (ConnectionException)e;
- if (ce.isSetupError()) {
- s_logger.warn("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage());
- handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true);
- throw ce;
- } else {
- s_logger.info("Monitor " + monitor.second().getClass().getSimpleName() + " says not to continue the connect process for " + hostId + " due to " + e.getMessage());
+ synchronized (_hostMonitors) {
+ for (final Pair monitor : _hostMonitors) {
+ if (s_logger.isDebugEnabled()) {
+ s_logger.debug("Sending Connect to listener: " + monitor.second().getClass().getSimpleName());
+ }
+ for (int i = 0; i < cmd.length; i++) {
+ try {
+ monitor.second().processConnect(host, cmd[i], forRebalance);
+ } catch (final Exception e) {
+ if (e instanceof ConnectionException) {
+ final ConnectionException ce = (ConnectionException) e;
+ if (ce.isSetupError()) {
+ s_logger.warn("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage());
+ handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true);
+ throw ce;
+ } else {
+ s_logger.info("Monitor " + monitor.second().getClass().getSimpleName() + " says not to continue the connect process for " + hostId + " due to " + e.getMessage());
+ handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true);
+ return attache;
+ }
+ } else if (e instanceof HypervisorVersionChangedException) {
handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true);
- return attache;
+ throw new CloudRuntimeException("Unable to connect " + attache.getId(), e);
+ } else {
+ s_logger.error("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage(), e);
+ handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true);
+ throw new CloudRuntimeException("Unable to connect " + attache.getId(), e);
}
- } else if (e instanceof HypervisorVersionChangedException) {
- handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true);
- throw new CloudRuntimeException("Unable to connect " + attache.getId(), e);
- } else {
- s_logger.error("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage(), e);
- handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true);
- throw new CloudRuntimeException("Unable to connect " + attache.getId(), e);
}
}
}
@@ -1034,23 +1041,27 @@ public void reconnect(final long hostId) throws AgentUnavailableException {
@Override
public void notifyMonitorsOfHostAboutToBeRemoved(long hostId) {
- for (final Pair monitor : _hostMonitors) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Sending host about to be removed to listener: " + monitor.second().getClass().getSimpleName());
- }
+ synchronized (_hostMonitors) {
+ for (final Pair monitor : _hostMonitors) {
+ if (s_logger.isDebugEnabled()) {
+ s_logger.debug("Sending host about to be removed to listener: " + monitor.second().getClass().getSimpleName());
+ }
- monitor.second().processHostAboutToBeRemoved(hostId);
+ monitor.second().processHostAboutToBeRemoved(hostId);
+ }
}
}
@Override
public void notifyMonitorsOfRemovedHost(long hostId, long clusterId) {
- for (final Pair monitor : _hostMonitors) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Sending host removed to listener: " + monitor.second().getClass().getSimpleName());
- }
+ synchronized (_hostMonitors) {
+ for (final Pair monitor : _hostMonitors) {
+ if (s_logger.isDebugEnabled()) {
+ s_logger.debug("Sending host removed to listener: " + monitor.second().getClass().getSimpleName());
+ }
- monitor.second().processHostRemoved(hostId, clusterId);
+ monitor.second().processHostRemoved(hostId, clusterId);
+ }
}
}
diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
index a1831a5cacdb..669333bbc7c6 100755
--- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
+++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
@@ -121,6 +121,7 @@
import com.cloud.agent.api.UnPlugNicAnswer;
import com.cloud.agent.api.UnPlugNicCommand;
import com.cloud.agent.api.UnregisterVMCommand;
+import com.cloud.agent.api.baremetal.DestroyCommand;
import com.cloud.agent.api.routing.NetworkElementCommand;
import com.cloud.agent.api.to.DiskTO;
import com.cloud.agent.api.to.DpdkTO;
@@ -486,7 +487,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) throws
}
String rootVolumeName = String.format("ROOT-%s", vmFinal.getId());
- if (template.getFormat() == ImageFormat.ISO) {
+ if (template.getFormat() == ImageFormat.ISO || template.getFormat() == ImageFormat.PXEBOOT) {
volumeMgr.allocateRawVolume(Type.ROOT, rootVolumeName, rootDiskOfferingInfo.getDiskOffering(), rootDiskOfferingInfo.getSize(),
rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), vmFinal, template, owner, null);
} else if (template.getFormat() == ImageFormat.BAREMETAL) {
@@ -1013,6 +1014,11 @@ public void orchestrateStart(final String vmUuid, final Map() {
@Override
public void doInTransactionWithoutResult(final TransactionStatus status) throws CloudRuntimeException {
@@ -2200,6 +2234,29 @@ private void deleteVMSnapshots(VMInstanceVO vm, boolean expunge) {
}
}
+ private VirtualMachineTO toVmTOforBaremetal(VMInstanceVO vm) {
+ VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
+ VirtualMachineTO vmTO = toVmTO(profile);
+ List nicTOs = new ArrayList();
+
+ for (NicVO nicVO: _nicsDao.listByVmId(vm.getId())) {
+ NicTO nicTO = new NicTO();
+ nicTO.setMac(nicVO.getMacAddress());
+ nicTO.setDefaultNic(nicVO.isDefaultNic());
+ nicTO.setBroadcastUri(nicVO.getBroadcastUri());
+ Network nw = _networkDao.findById(nicVO.getNetworkId());
+ if (nw != null) {
+ nicTO.setNetworkUuid(nw.getUuid());
+ }
+
+ nicTOs.add(nicTO);
+ }
+
+ vmTO.setNics(nicTOs.toArray(new NicTO[nicTOs.size()]));
+
+ return vmTO;
+ }
+
protected boolean checkVmOnHost(final VirtualMachine vm, final long hostId) throws AgentUnavailableException, OperationTimedoutException {
final Answer answer = _agentMgr.send(hostId, new CheckVirtualMachineCommand(vm.getInstanceName()));
if (answer == null || !answer.getResult()) {
diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
index bf21e62c6e59..476c0880dd19 100644
--- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
+++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
@@ -570,10 +570,9 @@ public VolumeInfo createVolumeFromSnapshot(Volume volume, Snapshot snapshot, Use
}
protected DiskProfile createDiskCharacteristics(VolumeInfo volumeInfo, VirtualMachineTemplate template, DataCenter dc, DiskOffering diskOffering) {
- if (volumeInfo.getVolumeType() == Type.ROOT && Storage.ImageFormat.ISO != template.getFormat()) {
+ if (volumeInfo.getVolumeType() == Type.ROOT && Storage.ImageFormat.ISO != template.getFormat() && ImageFormat.PXEBOOT != template.getFormat()) {
String templateToString = getReflectOnlySelectedFields(template);
String zoneToString = getReflectOnlySelectedFields(dc);
-
TemplateDataStoreVO ss = _vmTemplateStoreDao.findByTemplateZoneDownloadStatus(template.getId(), dc.getId(), VMTemplateStorageResourceAssoc.Status.DOWNLOADED);
if (ss == null) {
throw new CloudRuntimeException(String.format("Template [%s] has not been completely downloaded to the zone [%s].",
@@ -634,7 +633,8 @@ public VolumeInfo createVolume(VolumeInfo volumeInfo, VirtualMachine vm, Virtual
StoragePool pool = null;
DiskProfile dskCh = null;
- if (volumeInfo.getVolumeType() == Type.ROOT && Storage.ImageFormat.ISO != template.getFormat()) {
+ if (volumeInfo.getVolumeType() == Type.ROOT && Storage.ImageFormat.ISO != template.getFormat() &&
+ !ImageFormat.PXEBOOT.equals(template.getFormat())) {
dskCh = createDiskCharacteristics(volumeInfo, template, dc, diskOffering);
storageMgr.setDiskProfileThrottling(dskCh, offering, diskOffering);
} else {
diff --git a/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnVO.java b/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnVO.java
index 95e3693a99c5..dfc0d271b91c 100644
--- a/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnVO.java
+++ b/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnVO.java
@@ -69,11 +69,18 @@ public class RemoteAccessVpnVO implements RemoteAccessVpn {
@Column(name = "display", updatable = true, nullable = false)
protected boolean display = true;
+ @Column(name = "vpn_type")
+ private String vpnType;
+
+ @Encrypt
+ @Column(name = "ca_certificate", length = 8191)
+ private String caCertificate;
+
public RemoteAccessVpnVO() {
uuid = UUID.randomUUID().toString();
}
- public RemoteAccessVpnVO(long accountId, long domainId, Long networkId, long publicIpId, Long vpcId, String localIp, String ipRange, String presharedKey) {
+ public RemoteAccessVpnVO(long accountId, long domainId, Long networkId, long publicIpId, Long vpcId, String localIp, String ipRange, String presharedKey, String vpnType) {
this.accountId = accountId;
serverAddressId = publicIpId;
this.ipRange = ipRange;
@@ -84,6 +91,7 @@ public RemoteAccessVpnVO(long accountId, long domainId, Long networkId, long pub
state = State.Added;
uuid = UUID.randomUUID().toString();
this.vpcId = vpcId;
+ this.vpnType = vpnType;
}
@Override
@@ -123,6 +131,15 @@ public void setIpsecPresharedKey(String ipsecPresharedKey) {
this.ipsecPresharedKey = ipsecPresharedKey;
}
+ @Override
+ public String getCaCertificate() {
+ return caCertificate;
+ }
+
+ public void setCaCertificate(String caCertificate) {
+ this.caCertificate = caCertificate;
+ }
+
@Override
public String getLocalIp() {
return localIp;
@@ -166,6 +183,15 @@ public boolean isDisplay() {
return display;
}
+ public void setVpnType(String vpnType) {
+ this.vpnType = vpnType;
+ }
+
+ @Override
+ public String getVpnType() {
+ return vpnType;
+ }
+
@Override
public Class> getEntityType() {
return RemoteAccessVpn.class;
diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java
index 83034b3fdbe0..636713f4e705 100644
--- a/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java
+++ b/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java
@@ -68,6 +68,9 @@ public class VpcVO implements Vpc {
@Column(name = "vpc_offering_id")
long vpcOfferingId;
+ @Column(name ="network_boot_ip")
+ String networkBootIp;
+
@Column(name = GenericDao.REMOVED_COLUMN)
Date removed;
@@ -98,7 +101,7 @@ public VpcVO() {
public VpcVO(final long zoneId, final String name, final String displayText, final long accountId, final long domainId,
final long vpcOffId, final String cidr, final String networkDomain, final boolean useDistributedRouter,
- final boolean regionLevelVpc, final boolean isRedundant) {
+ final boolean regionLevelVpc, final boolean isRedundant, final String networkBootIp) {
this.zoneId = zoneId;
this.name = name;
this.displayText = displayText;
@@ -112,6 +115,7 @@ public VpcVO(final long zoneId, final String name, final String displayText, fin
usesDistributedRouter = useDistributedRouter;
this.regionLevelVpc = regionLevelVpc;
redundant = isRedundant;
+ this.networkBootIp = networkBootIp;
}
@Override
@@ -167,6 +171,10 @@ public void setVpcOfferingId(final long vpcOfferingId) {
this.vpcOfferingId = vpcOfferingId;
}
+ public String getNetworkBootIp() { return networkBootIp; }
+
+ public void setNetworkBootIp(String networkBootIp) { this.networkBootIp = networkBootIp; }
+
public Date getRemoved() {
return removed;
}
diff --git a/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java b/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java
index bfdbba2a6d3b..a9c3c56499d8 100644
--- a/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java
+++ b/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java
@@ -130,6 +130,18 @@ public class DiskOfferingVO implements DiskOffering {
private Long iopsWriteRateMaxLength;
+ @Column(name = "min_iops_per_gb")
+ Long minIopsPerGb;
+
+ @Column(name = "max_iops_per_gb")
+ Long maxIopsPerGb;
+
+ @Column(name = "highest_min_iops")
+ Long highestMinIops;
+
+ @Column(name = "highest_max_iops")
+ Long highestMaxIops;
+
@Column(name = "cache_mode", updatable = true, nullable = false)
@Enumerated(value = EnumType.STRING)
private DiskCacheMode cacheMode;
@@ -558,6 +570,46 @@ public void setIopsWriteRateMaxLength(Long iopsWriteRateMaxLength) {
this.iopsWriteRateMaxLength = iopsWriteRateMaxLength;
}
+ @Override
+ public Long getMinIopsPerGb() {
+ return this.minIopsPerGb;
+ }
+
+ @Override
+ public void setMinIopsPerGb(Long minIopsPerGb) {
+ this.minIopsPerGb = minIopsPerGb;
+ }
+
+ @Override
+ public Long getMaxIopsPerGb() {
+ return maxIopsPerGb;
+ }
+
+ @Override
+ public void setMaxIopsPerGb(Long maxIopsPerGb) {
+ this.maxIopsPerGb = maxIopsPerGb;
+ }
+
+ @Override
+ public Long getHighestMinIops() {
+ return this.highestMinIops;
+ }
+
+ @Override
+ public void setHighestMinIops(Long highestMinIops) {
+ this.highestMinIops = highestMinIops;
+ }
+
+ @Override
+ public Long getHighestMaxIops() {
+ return this.highestMaxIops;
+ }
+
+ @Override
+ public void setHighestMaxIops(Long highestMaxIops) {
+ this.highestMaxIops = highestMaxIops;
+ }
+
@Override
public void setHypervisorSnapshotReserve(Integer hypervisorSnapshotReserve) {
this.hypervisorSnapshotReserve = hypervisorSnapshotReserve;
diff --git a/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java b/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java
index 8f66da052e90..817a8093128a 100644
--- a/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java
+++ b/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java
@@ -71,6 +71,9 @@ public class VMTemplateVO implements VirtualMachineTemplate {
@Column(name = "hvm")
private boolean requiresHvm;
+ @Column(name = "boot_filename")
+ private String bootFilename;
+
@Column(name = "bits")
private int bits;
@@ -227,6 +230,37 @@ public VMTemplateVO(long id, String name, ImageFormat format, boolean isPublic,
this.deployAsIs = deployAsIs;
}
+ public VMTemplateVO(long id, String name, ImageFormat format, boolean isPublic, boolean featured,
+ boolean isExtractable, TemplateType type, String url, boolean requiresHvm, int bits, long accountId,
+ String cksum, String displayText, boolean enablePassword, long guestOSId, boolean bootable,
+ HypervisorType hyperType, String templateTag, Map details, boolean sshKeyEnabled,
+ boolean isDynamicallyScalable, boolean directDownload, boolean deployAsIs, String bootFilename) {
+ this(id,
+ name,
+ format,
+ isPublic,
+ featured,
+ isExtractable,
+ type,
+ url,
+ requiresHvm,
+ bits,
+ accountId,
+ cksum,
+ displayText,
+ enablePassword,
+ guestOSId,
+ bootable,
+ hyperType,
+ templateTag,
+ details,
+ sshKeyEnabled,
+ isDynamicallyScalable,
+ directDownload,
+ deployAsIs);
+ this.bootFilename = bootFilename;
+ }
+
public static VMTemplateVO createPreHostIso(Long id, String uniqueName, String name, ImageFormat format, boolean isPublic, boolean featured, TemplateType type,
String url, Date created, boolean requiresHvm, int bits, long accountId, String cksum, String displayText, boolean enablePassword, long guestOSId,
boolean bootable, HypervisorType hyperType) {
@@ -410,6 +444,15 @@ public boolean isRequiresHvm() {
return requiresHvm;
}
+ @Override
+ public String getBootFilename() {
+ return bootFilename;
+ }
+
+ public void setBootFilename(String bootFilename) {
+ this.bootFilename = bootFilename;
+ }
+
public void setRequiresHvm(boolean value) {
requiresHvm = value;
}
diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/GuestOSDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/GuestOSDao.java
index 83e19b17e258..fec6f18e05d1 100644
--- a/engine/schema/src/main/java/com/cloud/storage/dao/GuestOSDao.java
+++ b/engine/schema/src/main/java/com/cloud/storage/dao/GuestOSDao.java
@@ -16,6 +16,8 @@
// under the License.
package com.cloud.storage.dao;
+import java.util.Map;
+
import com.cloud.storage.GuestOSVO;
import com.cloud.utils.db.GenericDao;
@@ -24,4 +26,6 @@ public interface GuestOSDao extends GenericDao {
GuestOSVO listByDisplayName(String displayName);
GuestOSVO findByCategoryIdAndDisplayNameOrderByCreatedDesc(long categoryId, String displayName);
+
+ Map loadDetails(GuestOSVO guestOS);
}
diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/GuestOSDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/GuestOSDaoImpl.java
index 68da2b92acb5..ff06f95ed2b9 100644
--- a/engine/schema/src/main/java/com/cloud/storage/dao/GuestOSDaoImpl.java
+++ b/engine/schema/src/main/java/com/cloud/storage/dao/GuestOSDaoImpl.java
@@ -16,9 +16,12 @@
// under the License.
package com.cloud.storage.dao;
-
import java.util.List;
+import java.util.Map;
+
+import javax.inject.Inject;
+import org.apache.cloudstack.resourcedetail.dao.GuestOsDetailsDao;
import org.apache.commons.collections.CollectionUtils;
import org.springframework.stereotype.Component;
@@ -31,6 +34,9 @@
@Component
public class GuestOSDaoImpl extends GenericDaoBase implements GuestOSDao {
+ @Inject
+ private GuestOsDetailsDao _guestOsDetailsDao;
+
protected final SearchBuilder Search;
public GuestOSDaoImpl() {
@@ -62,4 +68,8 @@ public GuestOSVO findByCategoryIdAndDisplayNameOrderByCreatedDesc(long categoryI
}
return null;
}
+
+ public Map loadDetails(GuestOSVO guestOS) {
+ return _guestOsDetailsDao.listDetailsKeyPairs(guestOS.getId());
+ }
}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java
index 2d351f52fe4f..24b2861b3a99 100644
--- a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java
+++ b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java
@@ -29,11 +29,6 @@
import javax.inject.Inject;
-import com.cloud.upgrade.dao.Upgrade41510to41520;
-import com.cloud.upgrade.dao.Upgrade41600to41610;
-import com.cloud.upgrade.dao.Upgrade41610to41700;
-import com.cloud.upgrade.dao.Upgrade41700to41710;
-import com.cloud.upgrade.dao.Upgrade41710to41800;
import org.apache.cloudstack.utils.CloudStackVersion;
import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger;
@@ -65,18 +60,51 @@
import com.cloud.upgrade.dao.Upgrade307to410;
import com.cloud.upgrade.dao.Upgrade30to301;
import com.cloud.upgrade.dao.Upgrade40to41;
-import com.cloud.upgrade.dao.Upgrade41000to41100;
+import com.cloud.upgrade.dao.Upgrade41000to4100226;
+import com.cloud.upgrade.dao.Upgrade4100226to4100227;
+import com.cloud.upgrade.dao.Upgrade4100227to4100228;
+import com.cloud.upgrade.dao.Upgrade4100228to4100229;
+import com.cloud.upgrade.dao.Upgrade4100229to4100230;
+import com.cloud.upgrade.dao.Upgrade4100230to4100231;
+import com.cloud.upgrade.dao.Upgrade4100231to4100232;
+import com.cloud.upgrade.dao.Upgrade4100232to4100233;
+import com.cloud.upgrade.dao.Upgrade4100233to4100234;
+import com.cloud.upgrade.dao.Upgrade4100234to4100235;
+import com.cloud.upgrade.dao.Upgrade4100235to4100236;
+import com.cloud.upgrade.dao.Upgrade4100236to4100237;
+import com.cloud.upgrade.dao.Upgrade4100237to4100238;
+import com.cloud.upgrade.dao.Upgrade4100238to4100239;
+import com.cloud.upgrade.dao.Upgrade4100239to4100240;
+import com.cloud.upgrade.dao.Upgrade4100240to41100;
import com.cloud.upgrade.dao.Upgrade410to420;
import com.cloud.upgrade.dao.Upgrade41100to41110;
import com.cloud.upgrade.dao.Upgrade41110to41120;
import com.cloud.upgrade.dao.Upgrade41120to41130;
-import com.cloud.upgrade.dao.Upgrade41120to41200;
-import com.cloud.upgrade.dao.Upgrade41200to41300;
+import com.cloud.upgrade.dao.Upgrade41130to41200;
+import com.cloud.upgrade.dao.Upgrade41200to41201;
+import com.cloud.upgrade.dao.Upgrade41201to41202;
+import com.cloud.upgrade.dao.Upgrade41202to41203;
+import com.cloud.upgrade.dao.Upgrade41203to41204;
+import com.cloud.upgrade.dao.Upgrade41204to41205;
+import com.cloud.upgrade.dao.Upgrade41205to41206;
+import com.cloud.upgrade.dao.Upgrade41206to41207;
+import com.cloud.upgrade.dao.Upgrade41207to41208;
+import com.cloud.upgrade.dao.Upgrade41208to41209;
+import com.cloud.upgrade.dao.Upgrade41209to412010;
+import com.cloud.upgrade.dao.Upgrade41210to412011;
+import com.cloud.upgrade.dao.Upgrade41211to412012;
+import com.cloud.upgrade.dao.Upgrade41212to412013;
+import com.cloud.upgrade.dao.Upgrade412025to41300;
import com.cloud.upgrade.dao.Upgrade41300to41310;
import com.cloud.upgrade.dao.Upgrade41310to41400;
import com.cloud.upgrade.dao.Upgrade41400to41500;
import com.cloud.upgrade.dao.Upgrade41500to41510;
+import com.cloud.upgrade.dao.Upgrade41510to41520;
import com.cloud.upgrade.dao.Upgrade41520to41600;
+import com.cloud.upgrade.dao.Upgrade41600to41610;
+import com.cloud.upgrade.dao.Upgrade41610to41700;
+import com.cloud.upgrade.dao.Upgrade41700to41710;
+import com.cloud.upgrade.dao.Upgrade41710to41800;
import com.cloud.upgrade.dao.Upgrade420to421;
import com.cloud.upgrade.dao.Upgrade421to430;
import com.cloud.upgrade.dao.Upgrade430to440;
@@ -192,12 +220,40 @@ public DatabaseUpgradeChecker() {
.next("4.9.2.0" , new Upgrade4920to4930())
.next("4.9.3.0" , new Upgrade4930to41000())
.next("4.9.3.1" , new Upgrade4930to41000())
- .next("4.10.0.0", new Upgrade41000to41100())
+ .next("4.10.0.0", new Upgrade41000to4100226())
+ .next("4.10.0.226", new Upgrade4100226to4100227())
+ .next("4.10.0.227", new Upgrade4100227to4100228())
+ .next("4.10.0.228", new Upgrade4100228to4100229())
+ .next("4.10.0.229", new Upgrade4100229to4100230())
+ .next("4.10.0.230", new Upgrade4100230to4100231())
+ .next("4.10.0.231", new Upgrade4100231to4100232())
+ .next("4.10.0.232", new Upgrade4100232to4100233())
+ .next("4.10.0.233", new Upgrade4100233to4100234())
+ .next("4.10.0.234", new Upgrade4100234to4100235())
+ .next("4.10.0.235", new Upgrade4100235to4100236())
+ .next("4.10.0.236", new Upgrade4100236to4100237())
+ .next("4.10.0.237", new Upgrade4100237to4100238())
+ .next("4.10.0.238", new Upgrade4100238to4100239())
+ .next("4.10.0.239", new Upgrade4100239to4100240())
+ .next("4.10.0.240", new Upgrade4100240to41100())
.next("4.11.0.0", new Upgrade41100to41110())
.next("4.11.1.0", new Upgrade41110to41120())
.next("4.11.2.0", new Upgrade41120to41130())
- .next("4.11.3.0", new Upgrade41120to41200())
- .next("4.12.0.0", new Upgrade41200to41300())
+ .next("4.11.3.0", new Upgrade41130to41200())
+ .next("4.12.0.0", new Upgrade41200to41201())
+ .next("4.12.0.1", new Upgrade41201to41202())
+ .next("4.12.0.2", new Upgrade41202to41203())
+ .next("4.12.0.3", new Upgrade41203to41204())
+ .next("4.12.0.4", new Upgrade41204to41205())
+ .next("4.12.0.5", new Upgrade41205to41206())
+ .next("4.12.0.6", new Upgrade41206to41207())
+ .next("4.12.0.7", new Upgrade41207to41208())
+ .next("4.12.0.8", new Upgrade41208to41209())
+ .next("4.12.0.9", new Upgrade41209to412010())
+ .next("4.12.0.10", new Upgrade41210to412011())
+ .next("4.12.0.11", new Upgrade41211to412012())
+ .next("4.12.0.12", new Upgrade41212to412013())
+ .next("4.12.0.25", new Upgrade412025to41300())
.next("4.13.0.0", new Upgrade41300to41310())
.next("4.13.1.0", new Upgrade41310to41400())
.next("4.14.0.0", new Upgrade41400to41500())
@@ -286,7 +342,7 @@ protected void upgrade(CloudStackVersion dbVersion, CloudStackVersion currentVer
for (DbUpgrade upgrade : upgrades) {
VersionVO version;
s_logger.debug("Running upgrade " + upgrade.getClass().getSimpleName() + " to upgrade from " + upgrade.getUpgradableVersionRange()[0] + "-" + upgrade
- .getUpgradableVersionRange()[1] + " to " + upgrade.getUpgradedVersion());
+ .getUpgradableVersionRange()[1] + " to " + upgrade.getUpgradedVersion());
TransactionLegacy txn = TransactionLegacy.open("Upgrade");
txn.start();
try {
@@ -323,7 +379,7 @@ protected void upgrade(CloudStackVersion dbVersion, CloudStackVersion currentVer
txn = TransactionLegacy.open("Cleanup");
try {
s_logger.info("Cleanup upgrade " + upgrade.getClass().getSimpleName() + " to upgrade from " + upgrade.getUpgradableVersionRange()[0] + "-" + upgrade
- .getUpgradableVersionRange()[1] + " to " + upgrade.getUpgradedVersion());
+ .getUpgradableVersionRange()[1] + " to " + upgrade.getUpgradedVersion());
txn.start();
Connection conn;
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41000to4100226.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41000to4100226.java
new file mode 100644
index 000000000000..819ffd3e7374
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41000to4100226.java
@@ -0,0 +1,71 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class Upgrade41000to4100226 implements DbUpgrade {
+
+ final static Logger LOG = Logger.getLogger(Upgrade41000to4100226.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.10.0.0", "4.10.0.226"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.10.0.226";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-41000to4100226.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-41000to4100226-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100226to4100227.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100226to4100227.java
new file mode 100644
index 000000000000..8d189f3a5e78
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100226to4100227.java
@@ -0,0 +1,69 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class Upgrade4100226to4100227 implements DbUpgrade {
+ final static Logger LOG = Logger.getLogger(Upgrade4100226to4100227.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.10.0.226", "4.10.0.227"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.10.0.227";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-4100226to4100227.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-4100226to4100227-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100227to4100228.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100227to4100228.java
new file mode 100644
index 000000000000..ca5039aef949
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100227to4100228.java
@@ -0,0 +1,69 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class Upgrade4100227to4100228 implements DbUpgrade {
+ final static Logger LOG = Logger.getLogger(Upgrade4100227to4100228.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.10.0.227", "4.10.0.228"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.10.0.228";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-4100227to4100228.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-4100227to4100228-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100228to4100229.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100228to4100229.java
new file mode 100644
index 000000000000..8fd923cf0b75
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100228to4100229.java
@@ -0,0 +1,69 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class Upgrade4100228to4100229 implements DbUpgrade {
+ final static Logger LOG = Logger.getLogger(Upgrade4100228to4100229.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.10.0.228", "4.10.0.229"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.10.0.229";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-4100228to4100229.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-4100228to4100229-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100229to4100230.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100229to4100230.java
new file mode 100644
index 000000000000..41a9d5f9cb34
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100229to4100230.java
@@ -0,0 +1,69 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class Upgrade4100229to4100230 implements DbUpgrade {
+ final static Logger LOG = Logger.getLogger(Upgrade4100229to4100230.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.10.0.229", "4.10.0.230"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.10.0.230";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-4100229to4100230.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-4100229to4100230-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100230to4100231.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100230to4100231.java
new file mode 100644
index 000000000000..411c57ab5ebf
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100230to4100231.java
@@ -0,0 +1,69 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class Upgrade4100230to4100231 implements DbUpgrade {
+ final static Logger LOG = Logger.getLogger(Upgrade4100230to4100231.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.10.0.230", "4.10.0.231"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.10.0.231";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-4100230to4100231.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-4100230to4100231-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100231to4100232.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100231to4100232.java
new file mode 100644
index 000000000000..960537b579d2
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100231to4100232.java
@@ -0,0 +1,69 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class Upgrade4100231to4100232 implements DbUpgrade {
+ final static Logger LOG = Logger.getLogger(Upgrade4100231to4100232.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.10.0.231", "4.10.0.232"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.10.0.232";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-4100231to4100232.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-4100231to4100232-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100232to4100233.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100232to4100233.java
new file mode 100644
index 000000000000..7e4c7b755e99
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100232to4100233.java
@@ -0,0 +1,69 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class Upgrade4100232to4100233 implements DbUpgrade {
+ final static Logger LOG = Logger.getLogger(Upgrade4100232to4100233.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.10.0.232", "4.10.0.233"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.10.0.233";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-4100232to4100233.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-4100232to4100233-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100233to4100234.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100233to4100234.java
new file mode 100644
index 000000000000..be595c68fda6
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100233to4100234.java
@@ -0,0 +1,69 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class Upgrade4100233to4100234 implements DbUpgrade {
+ final static Logger LOG = Logger.getLogger(Upgrade4100233to4100234.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.10.0.233", "4.10.0.234"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.10.0.234";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-4100233to4100234.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-4100233to4100234-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100234to4100235.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100234to4100235.java
new file mode 100644
index 000000000000..19d5b936088e
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100234to4100235.java
@@ -0,0 +1,69 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class Upgrade4100234to4100235 implements DbUpgrade {
+ final static Logger LOG = Logger.getLogger(Upgrade4100234to4100235.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.10.0.234", "4.10.0.235"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.10.0.235";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-4100234to4100235.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-4100234to4100235-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100235to4100236.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100235to4100236.java
new file mode 100644
index 000000000000..3469c6397bbb
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100235to4100236.java
@@ -0,0 +1,69 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class Upgrade4100235to4100236 implements DbUpgrade {
+ final static Logger LOG = Logger.getLogger(Upgrade4100235to4100236.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.10.0.235", "4.10.0.236"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.10.0.236";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-4100235to4100236.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-4100235to4100236-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100236to4100237.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100236to4100237.java
new file mode 100644
index 000000000000..dca9ec9fa554
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100236to4100237.java
@@ -0,0 +1,69 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class Upgrade4100236to4100237 implements DbUpgrade {
+ final static Logger LOG = Logger.getLogger(Upgrade4100236to4100237.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.10.0.236", "4.10.0.237"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.10.0.237";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-4100236to4100237.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-4100236to4100237-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100237to4100238.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100237to4100238.java
new file mode 100644
index 000000000000..4684a3b26820
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100237to4100238.java
@@ -0,0 +1,69 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class Upgrade4100237to4100238 implements DbUpgrade {
+ final static Logger LOG = Logger.getLogger(Upgrade4100237to4100238.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.10.0.237", "4.10.0.238"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.10.0.238";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-4100237to4100238.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-4100237to4100238-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100238to4100239.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100238to4100239.java
new file mode 100644
index 000000000000..9c82fbc06988
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100238to4100239.java
@@ -0,0 +1,69 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class Upgrade4100238to4100239 implements DbUpgrade {
+ final static Logger LOG = Logger.getLogger(Upgrade4100238to4100239.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.10.0.238", "4.10.0.239"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.10.0.239";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-4100238to4100239.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-4100238to4100239-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100239to4100240.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100239to4100240.java
new file mode 100644
index 000000000000..467414514282
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100239to4100240.java
@@ -0,0 +1,69 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class Upgrade4100239to4100240 implements DbUpgrade {
+ final static Logger LOG = Logger.getLogger(Upgrade4100239to4100240.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.10.0.239", "4.10.0.240"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.10.0.240";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-4100239to4100240.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-4100239to4100240-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41000to41100.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100240to41100.java
similarity index 94%
rename from engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41000to41100.java
rename to engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100240to41100.java
index 3900cf0bf82d..457a36e7c9ee 100644
--- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41000to41100.java
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100240to41100.java
@@ -14,7 +14,6 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
-
package com.cloud.upgrade.dao;
import java.io.InputStream;
@@ -30,13 +29,12 @@
import com.cloud.utils.PropertiesUtil;
import com.cloud.utils.exception.CloudRuntimeException;
-public class Upgrade41000to41100 implements DbUpgrade {
-
- final static Logger LOG = Logger.getLogger(Upgrade41000to41100.class);
+public class Upgrade4100240to41100 implements DbUpgrade {
+ final static Logger LOG = Logger.getLogger(Upgrade4100240to41100.class);
@Override
public String[] getUpgradableVersionRange() {
- return new String[] {"4.10.0.0", "4.11.0.0"};
+ return new String[] {"4.10.0.240", "4.11.0.0"};
}
@Override
@@ -51,7 +49,7 @@ public boolean supportsRollingUpgrade() {
@Override
public InputStream[] getPrepareScripts() {
- final String scriptFile = "META-INF/db/schema-41000to41100.sql";
+ final String scriptFile = "META-INF/db/schema-4100240to41100.sql";
final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
if (script == null) {
throw new CloudRuntimeException("Unable to find " + scriptFile);
@@ -66,6 +64,17 @@ public void performDataMigration(Connection conn) {
validateUserDataInBase64(conn);
}
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-4100240to41100-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
private void checkAndEnableDynamicRoles(final Connection conn) {
final Map apiMap = PropertiesUtil.processConfigFile(new String[] { "commands.properties" });
if (apiMap == null || apiMap.isEmpty()) {
@@ -120,15 +129,4 @@ private void validateUserDataInBase64(Connection conn) {
LOG.debug("Done validating base64 content of user data");
}
}
-
- @Override
- public InputStream[] getCleanupScripts() {
- final String scriptFile = "META-INF/db/schema-41000to41100-cleanup.sql";
- final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
- if (script == null) {
- throw new CloudRuntimeException("Unable to find " + scriptFile);
- }
-
- return new InputStream[] {script};
- }
}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41100to41110.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41100to41110.java
index 3703040771bd..4ad08409a0c8 100644
--- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41100to41110.java
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41100to41110.java
@@ -31,7 +31,7 @@
import com.cloud.utils.exception.CloudRuntimeException;
public class Upgrade41100to41110 implements DbUpgrade {
- final static Logger LOG = Logger.getLogger(Upgrade41000to41100.class);
+ final static Logger LOG = Logger.getLogger(Upgrade41100to41110.class);
@Override
public String[] getUpgradableVersionRange() {
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41120to41200.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41130to41200.java
similarity index 93%
rename from engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41120to41200.java
rename to engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41130to41200.java
index f68f04a53aa8..7967c33cbd75 100644
--- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41120to41200.java
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41130to41200.java
@@ -22,16 +22,17 @@
import java.sql.PreparedStatement;
import java.sql.SQLException;
-import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.log4j.Logger;
-public class Upgrade41120to41200 implements DbUpgrade {
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class Upgrade41130to41200 implements DbUpgrade {
- final static Logger LOG = Logger.getLogger(Upgrade41120to41200.class);
+ final static Logger LOG = Logger.getLogger(Upgrade41130to41200.class);
@Override
public String[] getUpgradableVersionRange() {
- return new String[] {"4.11.2.0", "4.12.0.0"};
+ return new String[] {"4.11.3.0", "4.12.0.0"};
}
@Override
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41200to41201.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41200to41201.java
new file mode 100644
index 000000000000..a53d101e484c
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41200to41201.java
@@ -0,0 +1,133 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.util.HashMap;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.log4j.Logger;
+
+public class Upgrade41200to41201 implements DbUpgrade {
+
+ final static Logger LOG = Logger.getLogger(Upgrade41200to41201.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.12.0.0", "4.12.0.1"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.12.0.1";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-41200to41201.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ populateGuestOsDetails(conn);
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-41200to41201-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ private void populateGuestOsDetails(Connection conn){
+ final HashMap xenServerGuestOsMemoryMap = new HashMap(70);
+
+ xenServerGuestOsMemoryMap.put("Ubuntu 18.04 (32-bit)", new MemoryValues(512l, 32 * 1024l));
+ xenServerGuestOsMemoryMap.put("Ubuntu 18.04 (64-bit)", new MemoryValues(512l, 128 * 1024l));
+ xenServerGuestOsMemoryMap.put("Ubuntu 18.10 (32-bit)", new MemoryValues(512l, 32 * 1024l));
+ xenServerGuestOsMemoryMap.put("Ubuntu 18.10 (64-bit)", new MemoryValues(512l, 128 * 1024l));
+ xenServerGuestOsMemoryMap.put("Ubuntu 19.04 (32-bit)", new MemoryValues(512l, 32 * 1024l));
+ xenServerGuestOsMemoryMap.put("Ubuntu 19.04 (64-bit)", new MemoryValues(512l, 128 * 1024l));
+
+ final String insertDynamicMemoryVal = "insert into guest_os_details(guest_os_id, name, value, display) select id,?, ?, 0 from guest_os where display_name = ?";
+
+ PreparedStatement ps = null;
+
+ try {
+ ps = conn.prepareStatement(insertDynamicMemoryVal);
+
+ for (String key: xenServerGuestOsMemoryMap.keySet()){
+ ps.setString(1,"xenserver.dynamicMin");
+ ps.setString(2,String.valueOf(xenServerGuestOsMemoryMap.get(key).getMin()));
+ ps.setString(3, key);
+ ps.executeUpdate();
+
+ ps.setString(1,"xenserver.dynamicMax");
+ ps.setString(2,String.valueOf(xenServerGuestOsMemoryMap.get(key).getMax()));
+ ps.setString(3, key);
+ ps.executeUpdate();
+ }
+ } catch(SQLException e) {
+ throw new CloudRuntimeException("Unable to update guestOs details", e);
+ } finally {
+ try {
+ if (ps != null && !ps.isClosed()) {
+ ps.close();
+ }
+ } catch (SQLException e) {
+ }
+ }
+ }
+
+ private static class MemoryValues {
+ long max;
+ long min;
+
+ public MemoryValues(final long min, final long max) {
+ this.min = min * 1024 * 1024;
+ this.max = max * 1024 * 1024;
+ }
+
+ public long getMax() {
+ return max;
+ }
+
+ public long getMin() {
+ return min;
+ }
+ }
+
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41201to41202.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41201to41202.java
new file mode 100644
index 000000000000..78240b32e916
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41201to41202.java
@@ -0,0 +1,70 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.log4j.Logger;
+
+public class Upgrade41201to41202 implements DbUpgrade {
+
+ final static Logger LOG = Logger.getLogger(Upgrade41201to41202.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.12.0.1", "4.12.0.2"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.12.0.2";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-41201to41202.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-41201to41202-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41200to41300.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade412025to41300.java
similarity index 87%
rename from engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41200to41300.java
rename to engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade412025to41300.java
index 2de8dc983587..ff5028227772 100644
--- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41200to41300.java
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade412025to41300.java
@@ -22,11 +22,11 @@
import com.cloud.utils.exception.CloudRuntimeException;
-public class Upgrade41200to41300 implements DbUpgrade {
+public class Upgrade412025to41300 implements DbUpgrade {
@Override
public String[] getUpgradableVersionRange() {
- return new String[] {"4.12.0.0", "4.13.0.0"};
+ return new String[] {"4.12.0.25", "4.13.0.0"};
}
@Override
@@ -41,7 +41,7 @@ public boolean supportsRollingUpgrade() {
@Override
public InputStream[] getPrepareScripts() {
- final String scriptFile = "META-INF/db/schema-41200to41300.sql";
+ final String scriptFile = "META-INF/db/schema-412025to41300.sql";
final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
if (script == null) {
throw new CloudRuntimeException("Unable to find " + scriptFile);
@@ -56,7 +56,7 @@ public void performDataMigration(Connection conn) {
@Override
public InputStream[] getCleanupScripts() {
- final String scriptFile = "META-INF/db/schema-41200to41300-cleanup.sql";
+ final String scriptFile = "META-INF/db/schema-412025to41300-cleanup.sql";
final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
if (script == null) {
throw new CloudRuntimeException("Unable to find " + scriptFile);
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41202to41203.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41202to41203.java
new file mode 100644
index 000000000000..a67751999ca0
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41202to41203.java
@@ -0,0 +1,70 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.log4j.Logger;
+
+public class Upgrade41202to41203 implements DbUpgrade {
+
+ final static Logger LOG = Logger.getLogger(Upgrade41202to41203.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.12.0.2", "4.12.0.3"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.12.0.3";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-41202to41203.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-41202to41203-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41203to41204.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41203to41204.java
new file mode 100644
index 000000000000..b1933f05a426
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41203to41204.java
@@ -0,0 +1,70 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.log4j.Logger;
+
+public class Upgrade41203to41204 implements DbUpgrade {
+
+ final static Logger LOG = Logger.getLogger(Upgrade41203to41204.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.12.0.3", "4.12.0.4"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.12.0.4";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-41203to41204.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-41203to41204-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41204to41205.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41204to41205.java
new file mode 100644
index 000000000000..75aaa66ad69c
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41204to41205.java
@@ -0,0 +1,70 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.log4j.Logger;
+
+public class Upgrade41204to41205 implements DbUpgrade {
+
+ final static Logger LOG = Logger.getLogger(Upgrade41204to41205.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.12.0.4", "4.12.0.5"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.12.0.5";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-41204to41205.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-41204to41205-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41205to41206.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41205to41206.java
new file mode 100644
index 000000000000..c00ea4f0e849
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41205to41206.java
@@ -0,0 +1,70 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.log4j.Logger;
+
+public class Upgrade41205to41206 implements DbUpgrade {
+
+ final static Logger LOG = Logger.getLogger(Upgrade41205to41206.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.12.0.5", "4.12.0.6"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.12.0.6";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-41205to41206.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-41205to41206-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41206to41207.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41206to41207.java
new file mode 100644
index 000000000000..34ffe6c16100
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41206to41207.java
@@ -0,0 +1,70 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.upgrade.dao;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.log4j.Logger;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+public class Upgrade41206to41207 implements DbUpgrade {
+
+ final static Logger LOG = Logger.getLogger(Upgrade41206to41207.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.12.0.6", "4.12.0.7"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.12.0.7";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-41206to41207.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-41206to41207-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41207to41208.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41207to41208.java
new file mode 100644
index 000000000000..13372dfa1b92
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41207to41208.java
@@ -0,0 +1,70 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.upgrade.dao;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.log4j.Logger;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+public class Upgrade41207to41208 implements DbUpgrade {
+
+ final static Logger LOG = Logger.getLogger(Upgrade41207to41208.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.12.0.7", "4.12.0.8"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.12.0.8";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-41207to41208.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-41207to41208-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41208to41209.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41208to41209.java
new file mode 100644
index 000000000000..f4daaa4a13c4
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41208to41209.java
@@ -0,0 +1,70 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.upgrade.dao;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.log4j.Logger;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+public class Upgrade41208to41209 implements DbUpgrade {
+
+ final static Logger LOG = Logger.getLogger(Upgrade41208to41209.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.12.0.8", "4.12.0.9"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.12.0.9";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-41208to41209.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-41208to41209-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41209to412010.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41209to412010.java
new file mode 100644
index 000000000000..3336a7b88a18
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41209to412010.java
@@ -0,0 +1,71 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class Upgrade41209to412010 implements DbUpgrade {
+
+ final static Logger LOG = Logger.getLogger(Upgrade41209to412010.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.12.0.9", "4.12.0.10"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.12.0.10";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-41209to412010.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-41209to412010-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41210to412011.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41210to412011.java
new file mode 100644
index 000000000000..66eca27847f8
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41210to412011.java
@@ -0,0 +1,70 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.upgrade.dao;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.log4j.Logger;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+public class Upgrade41210to412011 implements DbUpgrade {
+
+ final static Logger LOG = Logger.getLogger(Upgrade41210to412011.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.12.0.10", "4.12.0.11"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.12.0.11";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-41210to412011.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-41210to412011-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41211to412012.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41211to412012.java
new file mode 100644
index 000000000000..dafffa19c4a9
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41211to412012.java
@@ -0,0 +1,71 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class Upgrade41211to412012 implements DbUpgrade {
+
+ final static Logger LOG = Logger.getLogger(Upgrade41211to412012.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.12.0.11", "4.12.0.12"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.12.0.12";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-41211to412012.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-41211to412012-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41212to412013.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41212to412013.java
new file mode 100644
index 000000000000..b86c619ab7a5
--- /dev/null
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41212to412013.java
@@ -0,0 +1,71 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.upgrade.dao;
+
+import java.io.InputStream;
+import java.sql.Connection;
+
+import org.apache.log4j.Logger;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class Upgrade41212to412013 implements DbUpgrade {
+
+ final static Logger LOG = Logger.getLogger(Upgrade41212to412013.class);
+
+ @Override
+ public String[] getUpgradableVersionRange() {
+ return new String[] {"4.12.0.12", "4.12.0.13"};
+ }
+
+ @Override
+ public String getUpgradedVersion() {
+ return "4.12.0.13";
+ }
+
+ @Override
+ public boolean supportsRollingUpgrade() {
+ return false;
+ }
+
+ @Override
+ public InputStream[] getPrepareScripts() {
+ final String scriptFile = "META-INF/db/schema-41212to412013.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+
+ @Override
+ public void performDataMigration(Connection conn) {
+ }
+
+ @Override
+ public InputStream[] getCleanupScripts() {
+ final String scriptFile = "META-INF/db/schema-41212to412013-cleanup.sql";
+ final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
+ if (script == null) {
+ throw new CloudRuntimeException("Unable to find " + scriptFile);
+ }
+
+ return new InputStream[] {script};
+ }
+}
diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/RemoteAccessVpnDetailVO.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/RemoteAccessVpnDetailVO.java
index 5fb01a25c2a9..86e50a38787e 100644
--- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/RemoteAccessVpnDetailVO.java
+++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/RemoteAccessVpnDetailVO.java
@@ -39,7 +39,7 @@ public class RemoteAccessVpnDetailVO implements ResourceDetail {
@Column(name = "name")
private String name;
- @Column(name = "value", length = 1024)
+ @Column(name = "value", length = 8191)
private String value;
@Column(name = "display")
diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/RemoteAccessVpnDetailsDao.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/RemoteAccessVpnDetailsDao.java
index 297b7f614c12..d0fdbef84171 100644
--- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/RemoteAccessVpnDetailsDao.java
+++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/RemoteAccessVpnDetailsDao.java
@@ -16,11 +16,13 @@
// under the License.
package org.apache.cloudstack.resourcedetail.dao;
+import java.util.Map;
+
import org.apache.cloudstack.resourcedetail.RemoteAccessVpnDetailVO;
import org.apache.cloudstack.resourcedetail.ResourceDetailsDao;
import com.cloud.utils.db.GenericDao;
public interface RemoteAccessVpnDetailsDao extends GenericDao, ResourceDetailsDao {
-
+ Map getDetails(long vpnId);
}
diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/RemoteAccessVpnDetailsDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/RemoteAccessVpnDetailsDaoImpl.java
index a71b006254e5..7fe1e08a7d24 100644
--- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/RemoteAccessVpnDetailsDaoImpl.java
+++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/RemoteAccessVpnDetailsDaoImpl.java
@@ -16,17 +16,43 @@
// under the License.
package org.apache.cloudstack.resourcedetail.dao;
+import java.util.Map;
+import java.util.stream.Collectors;
import org.springframework.stereotype.Component;
+import com.cloud.utils.crypt.DBEncryptionUtil;
+import com.cloud.utils.db.SearchBuilder;
+import com.cloud.utils.db.SearchCriteria;
+
import org.apache.cloudstack.resourcedetail.RemoteAccessVpnDetailVO;
import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase;
@Component
public class RemoteAccessVpnDetailsDaoImpl extends ResourceDetailsDaoBase implements RemoteAccessVpnDetailsDao {
+ protected final SearchBuilder vpnSearch;
+
+ public RemoteAccessVpnDetailsDaoImpl() {
+ super();
+
+ vpnSearch = createSearchBuilder();
+ vpnSearch.and("remote_access_vpn", vpnSearch.entity().getResourceId(), SearchCriteria.Op.EQ);
+ vpnSearch.done();
+ }
+
@Override
public void addDetail(long resourceId, String key, String value, boolean display) {
- super.addDetail(new RemoteAccessVpnDetailVO(resourceId, key, value, display));
+ super.addDetail(new RemoteAccessVpnDetailVO(resourceId, key, DBEncryptionUtil.encrypt(value), display));
+ }
+
+ @Override
+ public Map getDetails(long vpnId) {
+ SearchCriteria sc = vpnSearch.create();
+ sc.setParameters("remote_access_vpn", vpnId);
+
+ return listBy(sc).stream().collect(Collectors.toMap(RemoteAccessVpnDetailVO::getName, detail -> {
+ return DBEncryptionUtil.decrypt(detail.getValue());
+ }));
}
}
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41000to4100226-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41000to4100226-cleanup.sql
new file mode 100644
index 000000000000..60c94b7bdf3c
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41000to4100226-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.10.0.0 to 4.10.0.226
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41000to4100226.sql b/engine/schema/src/main/resources/META-INF/db/schema-41000to4100226.sql
new file mode 100644
index 000000000000..a9af8d3cd03c
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41000to4100226.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.10.0.0 to 4.10.0.226
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100226to4100227-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100226to4100227-cleanup.sql
new file mode 100644
index 000000000000..d0e8990dc970
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100226to4100227-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.10.0.226 to 4.10.0.227;
+--;
\ No newline at end of file
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100226to4100227.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100226to4100227.sql
new file mode 100644
index 000000000000..c5be382e2a70
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100226to4100227.sql
@@ -0,0 +1,67 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.10.0.226 to 4.10.0.227;
+--;
+
+-- VDI-per-LUN
+ALTER TABLE `cloud`.`disk_offering` ADD COLUMN `min_iops_per_gb` int unsigned DEFAULT NULL COMMENT 'Min IOPS per GB';
+ALTER TABLE `cloud`.`disk_offering` ADD COLUMN `max_iops_per_gb` int unsigned DEFAULT NULL COMMENT 'Max IOPS per GB';
+ALTER TABLE `cloud`.`disk_offering` ADD COLUMN `highest_min_iops` int unsigned DEFAULT NULL COMMENT 'Highest Min IOPS for the offering';
+ALTER TABLE `cloud`.`disk_offering` ADD COLUMN `highest_max_iops` int unsigned DEFAULT NULL COMMENT 'Highest Max IOPS for the offering';
+DROP VIEW IF EXISTS `cloud`.`disk_offering_view`;
+CREATE VIEW `cloud`.`disk_offering_view` AS
+ select
+ disk_offering.id,
+ disk_offering.uuid,
+ disk_offering.name,
+ disk_offering.display_text,
+ disk_offering.provisioning_type,
+ disk_offering.disk_size,
+ disk_offering.min_iops,
+ disk_offering.max_iops,
+ disk_offering.created,
+ disk_offering.tags,
+ disk_offering.customized,
+ disk_offering.customized_iops,
+ disk_offering.removed,
+ disk_offering.use_local_storage,
+ disk_offering.system_use,
+ disk_offering.hv_ss_reserve,
+ disk_offering.bytes_read_rate,
+ disk_offering.bytes_write_rate,
+ disk_offering.iops_read_rate,
+ disk_offering.iops_write_rate,
+ disk_offering.min_iops_per_gb,
+ disk_offering.max_iops_per_gb,
+ disk_offering.highest_min_iops,
+ disk_offering.highest_max_iops,
+ disk_offering.cache_mode,
+ disk_offering.sort_key,
+ disk_offering.type,
+ disk_offering.display_offering,
+ domain.id domain_id,
+ domain.uuid domain_uuid,
+ domain.name domain_name,
+ domain.path domain_path
+ from
+ `cloud`.`disk_offering`
+ left join
+ `cloud`.`domain` ON disk_offering.domain_id = domain.id
+ where
+ disk_offering.state='ACTIVE';
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100227to4100228-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100227to4100228-cleanup.sql
new file mode 100644
index 000000000000..f7b189eab586
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100227to4100228-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.10.0.227 to 4.10.0.228;
+--;
\ No newline at end of file
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100227to4100228.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100227to4100228.sql
new file mode 100644
index 000000000000..075c70d10b14
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100227to4100228.sql
@@ -0,0 +1,227 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.10.0.227 to 4.10.0.228;
+--;
+
+-- VPN implementation based on IKEv2
+ALTER TABLE `cloud`.`remote_access_vpn` CHANGE COLUMN `ipsec_psk` `ipsec_psk` VARCHAR(256) NULL ;
+ALTER TABLE `cloud`.`remote_access_vpn`
+ ADD COLUMN `vpn_type` VARCHAR(8) NOT NULL AFTER `display`,
+ ADD COLUMN `ca_certificate` VARCHAR(8191) NULL AFTER `vpn_type`;
+
+ALTER TABLE `cloud`.`remote_access_vpn_details` CHANGE COLUMN `value` `value` VARCHAR(8191) NOT NULL ;
+
+-- XenServer 7.1.1 support update
+INSERT INTO `cloud`.`hypervisor_capabilities`(
+ uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, storage_motion_supported)
+values
+ (UUID(), 'XenServer', '7.1.1', 500, 13, 1);
+
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 4.5 (32-bit)', 1, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 4.6 (32-bit)', 2, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 4.7 (32-bit)', 3, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 4.8 (32-bit)', 4, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 5, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 6, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 7, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 8, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 9, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 10, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 11, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 12, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 13, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 14, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 111, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 112, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 141, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 142, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 161, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 162, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 173, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 174, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 175, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 176, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 231, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 232, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 139, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 140, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (32-bit)', 143, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (64-bit)', 144, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (32-bit)', 177, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (64-bit)', 178, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (32-bit)', 179, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (64-bit)', 180, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (32-bit)', 171, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (64-bit)', 172, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (32-bit)', 181, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (64-bit)', 182, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (32-bit)', 227, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (64-bit)', 228, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (32-bit)', 248, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (64-bit)', 249, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 7', 246, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Debian Squeeze 6.0 (32-bit)', 132, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Debian Squeeze 6.0 (64-bit)', 133, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Debian Wheezy 7.0 (32-bit)', 183, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Debian Wheezy 7.0 (64-bit)', 184, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 16, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 17, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 18, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 19, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 20, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 21, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 22, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 23, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 24, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 25, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 134, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 135, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 145, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 146, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 207, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 208, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 209, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 210, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 211, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 212, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 233, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 234, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (32-bit)', 147, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (64-bit)', 148, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (32-bit)', 213, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (64-bit)', 214, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (32-bit)', 215, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (64-bit)', 216, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (32-bit)', 217, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (64-bit)', 218, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (32-bit)', 219, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (64-bit)', 220, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (32-bit)', 235, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (64-bit)', 236, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (32-bit)', 250, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (64-bit)', 251, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Linux 7', 247, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 4.5 (32-bit)', 26, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 4.6 (32-bit)', 27, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 4.7 (32-bit)', 28, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 4.8 (32-bit)', 29, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 30, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 31, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 32, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 33, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 34, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 35, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 36, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 37, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 38, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 39, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 113, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 114, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 149, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 150, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 189, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 190, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 191, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 192, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 193, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 194, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 237, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 238, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (32-bit)', 136, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (64-bit)', 137, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (32-bit)', 195, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (64-bit)', 196, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (32-bit)', 197, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (64-bit)', 198, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (32-bit)', 199, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (64-bit)', 204, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (32-bit)', 205, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (64-bit)', 206, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (32-bit)', 239, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (64-bit)', 240, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 7', 245, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 10 SP1 (32-bit)', 41, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 10 SP1 (64-bit)', 42, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 10 SP2 (32-bit)', 43, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 10 SP2 (64-bit)', 44, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 10 SP3 (32-bit)', 151, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 10 SP3 (64-bit)', 45, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 10 SP4 (32-bit)', 153, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 10 SP4 (64-bit)', 152, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 11 (32-bit)', 46, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 11 (64-bit)', 47, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 11 SP1 (32-bit)', 155, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 11 SP2 (32-bit)', 186, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 11 SP2 (64-bit)', 185, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 11 SP3 (32-bit)', 188, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 11 SP3 (32-bit)', 187, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 12 (64-bit)', 244, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows 7 (32-bit)', 48, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows 7 (64-bit)', 49, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows 8 (32-bit)', 165, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows 8 (64-bit)', 166, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 51, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 87, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 88, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 89, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 90, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows Server 2008 (32-bit)', 52, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows Server 2008 (64-bit)', 53, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows Server 2008 R2 (64-bit)', 54, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows Server 2012 (64-bit)', 167, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows Server 2012 R2 (64-bit)', 168, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 58, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Ubuntu Lucid Lynx 10.04 (32-bit)', 121, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Ubuntu Lucid Lynx 10.04 (64-bit)', 126, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Ubuntu Maverick Meerkat 10.10 (32-bit) (experimental)', 156, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Ubuntu Maverick Meerkat 10.10 (64-bit) (experimental)', 157, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Ubuntu Precise Pangolin 12.04 (32-bit)', 163, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Ubuntu Precise Pangolin 12.04 (64-bit)', 164, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Ubuntu Trusty Tahr 14.04', 241, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Ubuntu Trusty Tahr 14.04', 254, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 169, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 170, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 98, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 99, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 60, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 103, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 200, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 201, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 59, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 100, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 202, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 203, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Ubuntu Trusty Tahr 14.04', 255, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Ubuntu Xenial Xerus 16.04', 256, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows 10 (32-bit)', 257, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows 10 (64-bit)', 258, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows Server 2016 (64-bit)', 259, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 7', 260, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (32-bit)', 261, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (64-bit)', 262, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (32-bit)', 263, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (64-bit)', 264, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (32-bit)', 265, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (64-bit)', 266, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (32-bit)', 267, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (64-bit)', 268, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CoreOS', 271, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 7', 272, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 7', 273, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 7', 274, now(), 0);
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100228to4100229-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100228to4100229-cleanup.sql
new file mode 100644
index 000000000000..3eed7064b625
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100228to4100229-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.10.0.228 to 4.10.0.229;
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100228to4100229.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100228to4100229.sql
new file mode 100644
index 000000000000..ebc46205b7f1
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100228to4100229.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.10.0.228 to 4.10.0.229;
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100229to4100230-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100229to4100230-cleanup.sql
new file mode 100644
index 000000000000..bb33c45055c1
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100229to4100230-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.10.0.229 to 4.10.0.230;
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100229to4100230.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100229to4100230.sql
new file mode 100644
index 000000000000..dd8399b172db
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100229to4100230.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.10.0.229 to 4.10.0.230;
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100230to4100231-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100230to4100231-cleanup.sql
new file mode 100644
index 000000000000..4ac2c75c3c46
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100230to4100231-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.10.0.230 to 4.10.0.231;
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100230to4100231.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100230to4100231.sql
new file mode 100644
index 000000000000..2e52107fb872
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100230to4100231.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.10.0.230 to 4.10.0.231;
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100231to4100232-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100231to4100232-cleanup.sql
new file mode 100644
index 000000000000..302903b70e88
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100231to4100232-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.10.0.231 to 4.10.0.232;
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100231to4100232.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100231to4100232.sql
new file mode 100644
index 000000000000..6aae124bd2a4
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100231to4100232.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.10.0.231 to 4.10.0.232;
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100232to4100233-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100232to4100233-cleanup.sql
new file mode 100644
index 000000000000..72980235e3d1
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100232to4100233-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.10.0.232 to 4.10.0.233;
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100232to4100233.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100232to4100233.sql
new file mode 100644
index 000000000000..e7ede878fe0f
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100232to4100233.sql
@@ -0,0 +1,219 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.10.0.232 to 4.10.0.233;
+--;
+
+-- XenServer 7.6 support update
+INSERT INTO `cloud`.`hypervisor_capabilities`(
+ uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, storage_motion_supported)
+values
+ (UUID(), 'XenServer', '7.6.0', 500, 13, 1);
+
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 4.5 (32-bit)', 1, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 4.6 (32-bit)', 2, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 4.7 (32-bit)', 3, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 4.8 (32-bit)', 4, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 5, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 6, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 7, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 8, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 9, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 10, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 11, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 12, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 13, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 14, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 111, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 112, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 141, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 142, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 161, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 162, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 173, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 174, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 175, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 176, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 231, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 232, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 139, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 140, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (32-bit)', 143, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (64-bit)', 144, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (32-bit)', 177, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (64-bit)', 178, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (32-bit)', 179, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (64-bit)', 180, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (32-bit)', 171, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (64-bit)', 172, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (32-bit)', 181, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (64-bit)', 182, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (32-bit)', 227, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (64-bit)', 228, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (32-bit)', 248, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (64-bit)', 249, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 7', 246, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Squeeze 6.0 (32-bit)', 132, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Squeeze 6.0 (64-bit)', 133, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Wheezy 7.0 (32-bit)', 183, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Wheezy 7.0 (64-bit)', 184, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 16, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 17, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 18, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 19, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 20, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 21, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 22, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 23, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 24, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 25, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 134, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 135, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 145, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 146, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 207, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 208, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 209, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 210, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 211, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 212, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 233, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 234, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (32-bit)', 147, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (64-bit)', 148, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (32-bit)', 213, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (64-bit)', 214, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (32-bit)', 215, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (64-bit)', 216, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (32-bit)', 217, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (64-bit)', 218, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (32-bit)', 219, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (64-bit)', 220, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (32-bit)', 235, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (64-bit)', 236, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (32-bit)', 250, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (64-bit)', 251, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Linux 7', 247, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 4.5 (32-bit)', 26, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 4.6 (32-bit)', 27, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 4.7 (32-bit)', 28, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 4.8 (32-bit)', 29, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 30, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 31, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 32, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 33, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 34, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 35, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 36, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 37, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 38, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 39, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 113, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 114, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 149, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 150, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 189, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 190, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 191, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 192, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 193, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 194, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 237, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 238, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (32-bit)', 136, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (64-bit)', 137, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (32-bit)', 195, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (64-bit)', 196, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (32-bit)', 197, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (64-bit)', 198, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (32-bit)', 199, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (64-bit)', 204, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (32-bit)', 205, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (64-bit)', 206, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (32-bit)', 239, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (64-bit)', 240, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 7', 245, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 10 SP1 (32-bit)', 41, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 10 SP1 (64-bit)', 42, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 10 SP2 (32-bit)', 43, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 10 SP2 (64-bit)', 44, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 10 SP3 (32-bit)', 151, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 10 SP3 (64-bit)', 45, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 10 SP4 (32-bit)', 153, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 10 SP4 (64-bit)', 152, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 11 (32-bit)', 46, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 11 (64-bit)', 47, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 11 SP1 (32-bit)', 155, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 11 SP2 (32-bit)', 186, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 11 SP2 (64-bit)', 185, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 11 SP3 (32-bit)', 188, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 11 SP3 (32-bit)', 187, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 12 (64-bit)', 244, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows 7 (32-bit)', 48, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows 7 (64-bit)', 49, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows 8 (32-bit)', 165, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows 8 (64-bit)', 166, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 51, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 87, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 88, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 89, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 90, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows Server 2008 (32-bit)', 52, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows Server 2008 (64-bit)', 53, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows Server 2008 R2 (64-bit)', 54, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows Server 2012 (64-bit)', 167, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows Server 2012 R2 (64-bit)', 168, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 58, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Lucid Lynx 10.04 (32-bit)', 121, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Lucid Lynx 10.04 (64-bit)', 126, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Maverick Meerkat 10.10 (32-bit) (experimental)', 156, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Maverick Meerkat 10.10 (64-bit) (experimental)', 157, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Precise Pangolin 12.04 (32-bit)', 163, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Precise Pangolin 12.04 (64-bit)', 164, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Trusty Tahr 14.04', 241, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Trusty Tahr 14.04', 254, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 169, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 170, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 98, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 99, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 60, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 103, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 200, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 201, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 59, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 100, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 202, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 203, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Trusty Tahr 14.04', 255, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Xenial Xerus 16.04', 256, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows 10 (32-bit)', 257, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows 10 (64-bit)', 258, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows Server 2016 (64-bit)', 259, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 7', 260, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (32-bit)', 261, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (64-bit)', 262, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (32-bit)', 263, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (64-bit)', 264, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (32-bit)', 265, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (64-bit)', 266, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (32-bit)', 267, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (64-bit)', 268, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CoreOS', 271, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 7', 272, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 7', 273, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 7', 274, now(), 0);
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100233to4100234-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100233to4100234-cleanup.sql
new file mode 100644
index 000000000000..bb04f30d55e0
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100233to4100234-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.10.0.233 to 4.10.0.234;
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100233to4100234.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100233to4100234.sql
new file mode 100644
index 000000000000..e70d255b1987
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100233to4100234.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.10.0.233 to 4.10.0.234;
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100234to4100235-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100234to4100235-cleanup.sql
new file mode 100644
index 000000000000..153707f2d180
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100234to4100235-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.10.0.234 to 4.10.0.235;
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100234to4100235.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100234to4100235.sql
new file mode 100644
index 000000000000..889bcd21c8d9
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100234to4100235.sql
@@ -0,0 +1,219 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.10.0.234 to 4.10.0.235;
+--;
+
+-- XenServer 7.1.2 support update
+INSERT INTO `cloud`.`hypervisor_capabilities`(
+ uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, storage_motion_supported)
+values
+ (UUID(), 'XenServer', '7.1.2', 500, 13, 1);
+
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 4.5 (32-bit)', 1, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 4.6 (32-bit)', 2, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 4.7 (32-bit)', 3, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 4.8 (32-bit)', 4, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 5, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 6, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 7, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 8, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 9, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 10, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 11, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 12, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 13, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 14, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 111, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 112, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 141, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 142, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 161, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 162, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 173, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 174, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 175, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 176, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 231, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 232, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 139, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 140, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (32-bit)', 143, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit)', 144, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (32-bit)', 177, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit)', 178, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (32-bit)', 179, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit)', 180, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (32-bit)', 171, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit)', 172, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (32-bit)', 181, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit)', 182, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (32-bit)', 227, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit)', 228, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (32-bit)', 248, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit)', 249, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 7', 246, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Debian Squeeze 6.0 (32-bit)', 132, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Debian Squeeze 6.0 (64-bit)', 133, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Debian Wheezy 7.0 (32-bit)', 183, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Debian Wheezy 7.0 (64-bit)', 184, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 16, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 17, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 18, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 19, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 20, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 21, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 22, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 23, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 24, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 25, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 134, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 135, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 145, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 146, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 207, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 208, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 209, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 210, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 211, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 212, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 233, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 234, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (32-bit)', 147, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (64-bit)', 148, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (32-bit)', 213, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (64-bit)', 214, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (32-bit)', 215, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (64-bit)', 216, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (32-bit)', 217, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (64-bit)', 218, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (32-bit)', 219, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (64-bit)', 220, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (32-bit)', 235, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (64-bit)', 236, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (32-bit)', 250, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (64-bit)', 251, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Linux 7', 247, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 4.5 (32-bit)', 26, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 4.6 (32-bit)', 27, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 4.7 (32-bit)', 28, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 4.8 (32-bit)', 29, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 30, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 31, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 32, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 33, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 34, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 35, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 36, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 37, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 38, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 39, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 113, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 114, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 149, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 150, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 189, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 190, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 191, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 192, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 193, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 194, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 237, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 238, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (32-bit)', 136, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (64-bit)', 137, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (32-bit)', 195, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (64-bit)', 196, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (32-bit)', 197, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (64-bit)', 198, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (32-bit)', 199, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (64-bit)', 204, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (32-bit)', 205, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (64-bit)', 206, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (32-bit)', 239, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (64-bit)', 240, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 7', 245, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 10 SP1 (32-bit)', 41, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 10 SP1 (64-bit)', 42, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 10 SP2 (32-bit)', 43, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 10 SP2 (64-bit)', 44, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 10 SP3 (32-bit)', 151, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 10 SP3 (64-bit)', 45, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 10 SP4 (32-bit)', 153, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 10 SP4 (64-bit)', 152, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 11 (32-bit)', 46, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 11 (64-bit)', 47, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 11 SP1 (32-bit)', 155, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 11 SP2 (32-bit)', 186, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 11 SP2 (64-bit)', 185, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 11 SP3 (32-bit)', 188, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 11 SP3 (32-bit)', 187, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 12 (64-bit)', 244, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows 7 (32-bit)', 48, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows 7 (64-bit)', 49, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows 8 (32-bit)', 165, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows 8 (64-bit)', 166, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 51, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 87, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 88, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 89, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 90, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows Server 2008 (32-bit)', 52, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows Server 2008 (64-bit)', 53, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows Server 2008 R2 (64-bit)', 54, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows Server 2012 (64-bit)', 167, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows Server 2012 R2 (64-bit)', 168, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 58, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Lucid Lynx 10.04 (32-bit)', 121, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Lucid Lynx 10.04 (64-bit)', 126, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Maverick Meerkat 10.10 (32-bit) (experimental)', 156, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Maverick Meerkat 10.10 (64-bit) (experimental)', 157, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Precise Pangolin 12.04 (32-bit)', 163, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Precise Pangolin 12.04 (64-bit)', 164, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Trusty Tahr 14.04', 241, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Trusty Tahr 14.04', 254, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 169, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 170, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 98, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 99, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 60, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 103, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 200, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 201, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 59, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 100, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 202, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 203, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Trusty Tahr 14.04', 255, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Xenial Xerus 16.04', 256, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows 10 (32-bit)', 257, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows 10 (64-bit)', 258, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows Server 2016 (64-bit)', 259, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 7', 260, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (32-bit)', 261, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit)', 262, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (32-bit)', 263, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit)', 264, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (32-bit)', 265, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (64-bit)', 266, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (32-bit)', 267, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (64-bit)', 268, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CoreOS', 271, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 7', 272, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 7', 273, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 7', 274, now(), 0);
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100235to4100236-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100235to4100236-cleanup.sql
new file mode 100644
index 000000000000..a816d28f929e
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100235to4100236-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.10.0.235 to 4.10.0.236;
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100235to4100236.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100235to4100236.sql
new file mode 100644
index 000000000000..ea0e7c4b9266
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100235to4100236.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.10.0.235 to 4.10.0.236;
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100236to4100237-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100236to4100237-cleanup.sql
new file mode 100644
index 000000000000..7ffdcad75012
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100236to4100237-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.10.0.236 to 4.10.0.237;
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100236to4100237.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100236to4100237.sql
new file mode 100644
index 000000000000..2dc50cb3a92d
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100236to4100237.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.10.0.236 to 4.10.0.237;
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100237to4100238-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100237to4100238-cleanup.sql
new file mode 100644
index 000000000000..d6cc24aae28d
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100237to4100238-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.10.0.237 to 4.10.0.238;
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100237to4100238.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100237to4100238.sql
new file mode 100644
index 000000000000..d9fdd706736a
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100237to4100238.sql
@@ -0,0 +1,22 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.10.0.237 to 4.10.0.238;
+--;
+
+INSERT IGNORE INTO configuration (`category`, `instance`, `component`, `name`, `value`, `description`) VALUES ('Network', 'DEFAULT', 'management-server', 'vpc.usage.whitelist.cidr', null, 'List of CIDRs to track usage separately in VPCs');
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100238to4100239-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100238to4100239-cleanup.sql
new file mode 100644
index 000000000000..eb704d49eb9b
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100238to4100239-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.10.0.238 to 4.10.0.239;
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100238to4100239.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100238to4100239.sql
new file mode 100644
index 000000000000..33285f34a9cc
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100238to4100239.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.10.0.238 to 4.10.0.239;
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100239to4100240-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100239to4100240-cleanup.sql
new file mode 100644
index 000000000000..64daa6bb0252
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100239to4100240-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.10.0.239 to 4.10.0.240;
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100239to4100240.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100239to4100240.sql
new file mode 100644
index 000000000000..65f7954ccc89
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100239to4100240.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.10.0.239 to 4.10.0.240;
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41000to41100-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100240to41100-cleanup.sql
similarity index 96%
rename from engine/schema/src/main/resources/META-INF/db/schema-41000to41100-cleanup.sql
rename to engine/schema/src/main/resources/META-INF/db/schema-4100240to41100-cleanup.sql
index f8d9ce9b73bb..1657a0802e1d 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-41000to41100-cleanup.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100240to41100-cleanup.sql
@@ -16,7 +16,7 @@
-- under the License.
--;
--- Schema upgrade cleanup from 4.10.0.0 to 4.11.0.0
+-- Schema upgrade cleanup from 4.10.0.240 to 4.11.0.0;
--;
DELETE FROM `cloud`.`configuration` WHERE name='snapshot.backup.rightafter';
@@ -66,4 +66,4 @@ CREATE VIEW `cloud`.`user_view` AS
left join
`cloud`.`async_job` ON async_job.instance_id = user.id
and async_job.instance_type = 'User'
- and async_job.job_status = 0;
\ No newline at end of file
+ and async_job.job_status = 0;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41000to41100.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100240to41100.sql
similarity index 99%
rename from engine/schema/src/main/resources/META-INF/db/schema-41000to41100.sql
rename to engine/schema/src/main/resources/META-INF/db/schema-4100240to41100.sql
index 2db644f927cc..2608cbd6fdb3 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-41000to41100.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4100240to41100.sql
@@ -16,7 +16,7 @@
-- under the License.
--;
--- Schema upgrade from 4.10.0.0 to 4.11.0.0
+-- Schema upgrade from 4.10.0.240 to 4.11.0.0;
--;
--;
@@ -25,7 +25,7 @@
DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_ADD_COLUMN`;
CREATE PROCEDURE `cloud`.`IDEMPOTENT_ADD_COLUMN` (
- IN in_table_name VARCHAR(200)
+ IN in_table_name VARCHAR(200)
, IN in_column_name VARCHAR(200)
, IN in_column_definition VARCHAR(1000)
)
@@ -36,7 +36,7 @@ BEGIN
DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_DROP_FOREIGN_KEY`;
CREATE PROCEDURE `cloud`.`IDEMPOTENT_DROP_FOREIGN_KEY` (
- IN in_table_name VARCHAR(200)
+ IN in_table_name VARCHAR(200)
, IN in_foreign_key_name VARCHAR(200)
)
BEGIN
@@ -46,7 +46,7 @@ BEGIN
DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_DROP_INDEX`;
CREATE PROCEDURE `cloud`.`IDEMPOTENT_DROP_INDEX` (
- IN in_index_name VARCHAR(200)
+ IN in_index_name VARCHAR(200)
, IN in_table_name VARCHAR(200)
)
BEGIN
@@ -56,7 +56,7 @@ BEGIN
DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_CREATE_UNIQUE_INDEX`;
CREATE PROCEDURE `cloud`.`IDEMPOTENT_CREATE_UNIQUE_INDEX` (
- IN in_index_name VARCHAR(200)
+ IN in_index_name VARCHAR(200)
, IN in_table_name VARCHAR(200)
, IN in_index_definition VARCHAR(1000)
)
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41200to41201-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41200to41201-cleanup.sql
new file mode 100644
index 000000000000..137776090809
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41200to41201-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.12.0.0 to 4.12.0.1
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41200to41201.sql b/engine/schema/src/main/resources/META-INF/db/schema-41200to41201.sql
new file mode 100644
index 000000000000..2b089cf48226
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41200to41201.sql
@@ -0,0 +1,153 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.12.0.0 to 4.12.0.1
+--;
+
+-- add KVM / qemu io bursting options PR 3133
+ALTER VIEW `cloud`.`disk_offering_view` AS
+ SELECT
+ `disk_offering`.`id` AS `id`,
+ `disk_offering`.`uuid` AS `uuid`,
+ `disk_offering`.`name` AS `name`,
+ `disk_offering`.`display_text` AS `display_text`,
+ `disk_offering`.`provisioning_type` AS `provisioning_type`,
+ `disk_offering`.`disk_size` AS `disk_size`,
+ `disk_offering`.`min_iops` AS `min_iops`,
+ `disk_offering`.`max_iops` AS `max_iops`,
+ `disk_offering`.`created` AS `created`,
+ `disk_offering`.`tags` AS `tags`,
+ `disk_offering`.`customized` AS `customized`,
+ `disk_offering`.`customized_iops` AS `customized_iops`,
+ `disk_offering`.`removed` AS `removed`,
+ `disk_offering`.`use_local_storage` AS `use_local_storage`,
+ `disk_offering`.`system_use` AS `system_use`,
+ `disk_offering`.`hv_ss_reserve` AS `hv_ss_reserve`,
+ `disk_offering`.`bytes_read_rate` AS `bytes_read_rate`,
+ `disk_offering`.`bytes_read_rate_max` AS `bytes_read_rate_max`,
+ `disk_offering`.`bytes_read_rate_max_length` AS `bytes_read_rate_max_length`,
+ `disk_offering`.`bytes_write_rate` AS `bytes_write_rate`,
+ `disk_offering`.`bytes_write_rate_max` AS `bytes_write_rate_max`,
+ `disk_offering`.`bytes_write_rate_max_length` AS `bytes_write_rate_max_length`,
+ `disk_offering`.`iops_read_rate` AS `iops_read_rate`,
+ `disk_offering`.`iops_read_rate_max` AS `iops_read_rate_max`,
+ `disk_offering`.`iops_read_rate_max_length` AS `iops_read_rate_max_length`,
+ `disk_offering`.`iops_write_rate` AS `iops_write_rate`,
+ `disk_offering`.`iops_write_rate_max` AS `iops_write_rate_max`,
+ `disk_offering`.`iops_write_rate_max_length` AS `iops_write_rate_max_length`,
+ `disk_offering`.`min_iops_per_gb` AS `min_iops_per_gb`,
+ `disk_offering`.`max_iops_per_gb` AS `max_iops_per_gb`,
+ `disk_offering`.`highest_min_iops` AS `highest_min_iops`,
+ `disk_offering`.`highest_max_iops` AS `highest_max_iops`,
+ `disk_offering`.`cache_mode` AS `cache_mode`,
+ `disk_offering`.`sort_key` AS `sort_key`,
+ `disk_offering`.`type` AS `type`,
+ `disk_offering`.`display_offering` AS `display_offering`,
+ `domain`.`id` AS `domain_id`,
+ `domain`.`uuid` AS `domain_uuid`,
+ `domain`.`name` AS `domain_name`,
+ `domain`.`path` AS `domain_path`
+ FROM
+ (`disk_offering`
+ LEFT JOIN `domain` ON ((`disk_offering`.`domain_id` = `domain`.`id`)))
+ WHERE
+ (`disk_offering`.`state` = 'ACTIVE');
+
+-- Add Ubuntu 18.04 LTS as support guest os
+INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name, created) VALUES (277, UUID(), 10, 'Ubuntu 18.04 (32-bit)', utc_timestamp());
+INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name, created) VALUES (278, UUID(), 10, 'Ubuntu 18.04 (64-bit)', utc_timestamp());
+-- Ubuntu 18.04 KVM guest os mapping
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'KVM', 'default', 'Ubuntu 18.04', 277, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'KVM', 'default', 'Ubuntu 18.04', 278, utc_timestamp(), 0);
+-- Ubuntu 18.04 XenServer guest os mapping
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '6.5.0', 'Ubuntu Bionic Beaver 18.04', 277, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '6.5.0', 'Ubuntu Bionic Beaver 18.04', 278, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.0.0', 'Ubuntu Bionic Beaver 18.04', 277, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.0.0', 'Ubuntu Bionic Beaver 18.04', 278, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.0', 'Ubuntu Bionic Beaver 18.04', 277, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.0', 'Ubuntu Bionic Beaver 18.04', 278, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.1', 'Ubuntu Bionic Beaver 18.04', 277, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.1', 'Ubuntu Bionic Beaver 18.04', 278, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.2', 'Ubuntu Bionic Beaver 18.04', 277, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.2', 'Ubuntu Bionic Beaver 18.04', 278, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.2.0', 'Ubuntu Bionic Beaver 18.04', 277, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.2.0', 'Ubuntu Bionic Beaver 18.04', 278, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.3.0', 'Ubuntu Bionic Beaver 18.04', 277, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.3.0', 'Ubuntu Bionic Beaver 18.04', 278, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.4.0', 'Ubuntu Bionic Beaver 18.04', 277, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.4.0', 'Ubuntu Bionic Beaver 18.04', 278, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.5.0', 'Ubuntu Bionic Beaver 18.04', 277, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.5.0', 'Ubuntu Bionic Beaver 18.04', 278, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.6.0', 'Ubuntu Bionic Beaver 18.04', 277, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.6.0', 'Ubuntu Bionic Beaver 18.04', 278, utc_timestamp(), 0);
+
+-- Add Ubuntu 18.10 as support guest os
+INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name, created) VALUES (279, UUID(), 10, 'Ubuntu 18.10 (32-bit)', utc_timestamp());
+INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name, created) VALUES (280, UUID(), 10, 'Ubuntu 18.10 (64-bit)', utc_timestamp());
+-- Ubuntu 18.10 KVM guest os mapping
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'KVM', 'default', 'Ubuntu 18.10', 279, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'KVM', 'default', 'Ubuntu 18.10', 280, utc_timestamp(), 0);
+-- Ubuntu 18.10 XenServer guest os mapping
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '6.5.0', 'Ubuntu Cosmic Cuttlefish 18.10', 279, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '6.5.0', 'Ubuntu Cosmic Cuttlefish 18.10', 280, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.0.0', 'Ubuntu Cosmic Cuttlefish 18.10', 279, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.0.0', 'Ubuntu Cosmic Cuttlefish 18.10', 280, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.0', 'Ubuntu Cosmic Cuttlefish 18.10', 279, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.0', 'Ubuntu Cosmic Cuttlefish 18.10', 280, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.1', 'Ubuntu Cosmic Cuttlefish 18.10', 279, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.1', 'Ubuntu Cosmic Cuttlefish 18.10', 280, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.2', 'Ubuntu Cosmic Cuttlefish 18.10', 279, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.2', 'Ubuntu Cosmic Cuttlefish 18.10', 280, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.2.0', 'Ubuntu Cosmic Cuttlefish 18.10', 279, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.2.0', 'Ubuntu Cosmic Cuttlefish 18.10', 280, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.3.0', 'Ubuntu Cosmic Cuttlefish 18.10', 279, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.3.0', 'Ubuntu Cosmic Cuttlefish 18.10', 280, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.4.0', 'Ubuntu Cosmic Cuttlefish 18.10', 279, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.4.0', 'Ubuntu Cosmic Cuttlefish 18.10', 280, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.5.0', 'Ubuntu Cosmic Cuttlefish 18.10', 279, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.5.0', 'Ubuntu Cosmic Cuttlefish 18.10', 280, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.6.0', 'Ubuntu Cosmic Cuttlefish 18.10', 279, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.6.0', 'Ubuntu Cosmic Cuttlefish 18.10', 280, utc_timestamp(), 0);
+
+-- Add Ubuntu 19.04 as support guest os
+INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name, created) VALUES (281, UUID(), 10, 'Ubuntu 19.04 (32-bit)', utc_timestamp());
+INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name, created) VALUES (282, UUID(), 10, 'Ubuntu 19.04 (64-bit)', utc_timestamp());
+-- Ubuntu 19.04 KVM guest os mapping
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'KVM', 'default', 'Ubuntu 19.04', 281, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'KVM', 'default', 'Ubuntu 19.04', 282, utc_timestamp(), 0);
+-- Ubuntu 19.04 XenServer guest os mapping
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '6.5.0', 'Ubuntu Disco Dingo 19.04', 281, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '6.5.0', 'Ubuntu Disco Dingo 19.04', 282, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.0.0', 'Ubuntu Disco Dingo 19.04', 281, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.0.0', 'Ubuntu Disco Dingo 19.04', 282, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.0', 'Ubuntu Disco Dingo 19.04', 281, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.0', 'Ubuntu Disco Dingo 19.04', 282, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.1', 'Ubuntu Disco Dingo 19.04', 281, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.1', 'Ubuntu Disco Dingo 19.04', 282, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.2', 'Ubuntu Disco Dingo 19.04', 281, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.2', 'Ubuntu Disco Dingo 19.04', 282, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.2.0', 'Ubuntu Disco Dingo 19.04', 281, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.2.0', 'Ubuntu Disco Dingo 19.04', 282, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.3.0', 'Ubuntu Disco Dingo 19.04', 281, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.3.0', 'Ubuntu Disco Dingo 19.04', 282, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.4.0', 'Ubuntu Disco Dingo 19.04', 281, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.4.0', 'Ubuntu Disco Dingo 19.04', 282, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.5.0', 'Ubuntu Disco Dingo 19.04', 281, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.5.0', 'Ubuntu Disco Dingo 19.04', 282, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.6.0', 'Ubuntu Disco Dingo 19.04', 281, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.6.0', 'Ubuntu Disco Dingo 19.04', 282, utc_timestamp(), 0);
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41200to41300.sql b/engine/schema/src/main/resources/META-INF/db/schema-41200to41300.sql
deleted file mode 100644
index 0b368eb310df..000000000000
--- a/engine/schema/src/main/resources/META-INF/db/schema-41200to41300.sql
+++ /dev/null
@@ -1,542 +0,0 @@
--- Licensed to the Apache Software Foundation (ASF) under one
--- or more contributor license agreements. See the NOTICE file
--- distributed with this work for additional information
--- regarding copyright ownership. The ASF licenses this file
--- to you under the Apache License, Version 2.0 (the
--- "License"); you may not use this file except in compliance
--- with the License. You may obtain a copy of the License at
---
--- http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing,
--- software distributed under the License is distributed on an
--- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
--- KIND, either express or implied. See the License for the
--- specific language governing permissions and limitations
--- under the License.
-
---;
--- Schema upgrade from 4.12.0.0 to 4.13.0.0
---;
-
--- Add XenServer 7.1.2, 7.6 and 8.0 hypervisor capabilities
-INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) values (UUID(), 'XenServer', '7.6.0', 1000, 253, 64, 1);
-INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) values (UUID(), 'XenServer', '8.0.0', 1000, 253, 64, 1);
-INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) values (UUID(), 'XenServer', '7.1.1', 1000, 253, 64, 1);
-INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) values (UUID(), 'XenServer', '7.1.2', 1000, 253, 64, 1);
-
--- Add VMware 6.7 hypervisor capabilities
-INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid,hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) VALUES (UUID(), 'VMware', '6.7', '1024', '0', '59', '64', '1', '1');
-INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid,hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) VALUES (UUID(), 'VMware', '6.7.1', '1024', '0', '59', '64', '1', '1');
-INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid,hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) VALUES (UUID(), 'VMware', '6.7.2', '1024', '0', '59', '64', '1', '1');
-INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid,hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) VALUES (UUID(), 'VMware', '6.7.3', '1024', '0', '59', '64', '1', '1');
-
--- Update VMware 6.x hypervisor capabilities
-UPDATE `cloud`.`hypervisor_capabilities` SET max_guests_limit='1024', max_data_volumes_limit='59', max_hosts_per_cluster='64' WHERE (hypervisor_type='VMware' AND hypervisor_version='6.0' );
-UPDATE `cloud`.`hypervisor_capabilities` SET max_guests_limit='1024', max_data_volumes_limit='59', max_hosts_per_cluster='64' WHERE (hypervisor_type='VMware' AND hypervisor_version='6.5' );
-
--- Add new OS versions
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('277', UUID(), '1', 'Ubuntu 17.04', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('278', UUID(), '1', 'Ubuntu 17.10', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('279', UUID(), '1', 'Ubuntu 18.04 LTS', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('280', UUID(), '1', 'Ubuntu 18.10', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('281', UUID(), '1', 'Ubuntu 19.04', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('282', UUID(), '1', 'Red Hat Enterprise Linux 7.3', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('283', UUID(), '1', 'Red Hat Enterprise Linux 7.4', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('284', UUID(), '1', 'Red Hat Enterprise Linux 7.5', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('285', UUID(), '1', 'Red Hat Enterprise Linux 7.6', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('286', UUID(), '1', 'Red Hat Enterprise Linux 8.0', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('289', UUID(), '2', 'Debian GNU/Linux 9 (32-bit)', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('290', UUID(), '2', 'Debian GNU/Linux 9 (64-bit)', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('291', UUID(), '5', 'SUSE Linux Enterprise Server 15 (64-bit)', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('292', UUID(), '2', 'Debian GNU/Linux 10 (32-bit)', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('293', UUID(), '2', 'Debian GNU/Linux 10 (64-bit)', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('294', UUID(), '2', 'Linux 4.x Kernel (32-bit)', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('295', UUID(), '2', 'Linux 4.x Kernel (64-bit)', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('296', UUID(), '3', 'Oracle Linux 8', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('297', UUID(), '1', 'CentOS 8', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('298', UUID(), '9', 'FreeBSD 11 (32-bit)', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('299', UUID(), '9', 'FreeBSD 11 (64-bit)', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('300', UUID(), '9', 'FreeBSD 12 (32-bit)', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('301', UUID(), '9', 'FreeBSD 12 (64-bit)', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('302', UUID(), '1', 'CentOS 6.8', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('303', UUID(), '1', 'CentOS 6.9', now(), '0');
-INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('304', UUID(), '1', 'CentOS 6.10', now(), '0');
-
--- Add New and missing VMware 6.5 Guest OSes
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 235, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 236, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 147, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 148, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 213, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 214, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 215, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 216, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 217, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 218, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 219, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 220, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 250, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 251, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux7_64Guest', 247, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntuGuest', 255, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 256, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 277, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 278, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 279, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 280, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'rhel7_64Guest', 282, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'rhel7_64Guest', 283, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'rhel7_64Guest', 284, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'rhel7_64Guest', 285, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'windows9Server64Guest', 276, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'debian9Guest', 289, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'debian9_64Guest', 290, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'debian10Guest', 282, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'debian10_64Guest', 293, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'sles15_64Guest', 291, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'centos6_64Guest', 302, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'centos6_64Guest', 303, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'centos6_64Guest', 304, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'rhel8_64Guest', 286, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 281, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'other4xLinuxGuest', 294, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'other4xLinux64Guest', 295, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux8_64Guest', 296, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'centos8_64Guest', 297, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'freebsd11Guest', 298, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'freebsd11_64Guest', 299, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'freebsd12Guest', 300, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'freebsd12_64Guest', 301, now(), 0);
-
--- Copy VMware 6.5 Guest OSes to VMware 6.7
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'VMware', '6.7', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='VMware' AND hypervisor_version='6.5';
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'VMware', '6.7.1', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='VMware' AND hypervisor_version='6.7';
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'VMware', '6.7.2', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='VMware' AND hypervisor_version='6.7.1';
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'VMware', '6.7.3', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='VMware' AND hypervisor_version='6.7.2';
-
--- Copy XenServer 7.1.0 to XenServer 7.1.1
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '7.1.1', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.1.0';
-
--- Copy XenServer 7.1.1 to XenServer 7.1.2
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '7.1.2', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.1.1';
-
--- Add New XenServer 7.1.2 Guest OSes
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Debian Stretch 9.0', 289, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Debian Stretch 9.0', 290, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Bionic Beaver 18.04', 279, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows Server 2019 (64-bit)', 276, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit', 303, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 7', 283, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 7', 284, now(), 0);
--- Copy XenServer 7.5 hypervisor guest OS mappings to XenServer 7.6
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '7.6.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.5.0';
-
--- Add New XenServer 7.6 Guest OSes
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Jessie 8.0', 269, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Jessie 8.0', 270, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Stretch 9.0', 289, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Stretch 9.0', 290, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Xenial Xerus 16.04', 255, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Xenial Xerus 16.04', 256, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Bionic Beaver 18.04', 279, now(), 0);
-
--- Copy XenServer 7.6 hypervisor guest OS mappings to XenServer8.0
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '8.0.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.6.0';
-
--- Add New XenServer 8.0 Guest OSes
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '8.0.0', 'Windows Server 2019 (64-bit)', 276, now(), 0);
-
--- Add Missing KVM Guest OSes
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.6', 262, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.7', 263, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.7', 264, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.8', 302, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.9', 303, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.10', 304, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 7.2', 269, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 7.3', 282, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 7.4', 283, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 7.5', 284, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 7.6', 285, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 8', 286, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Ubuntu 17.04', 277, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Ubuntu 17.10', 278, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Ubuntu 18.04 LTS', 279, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Ubuntu 18.10', 280, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Ubuntu 19.04', 281, now(), 0);
-
--- DPDK client and server mode support
-ALTER TABLE `cloud`.`service_offering_details` CHANGE COLUMN `value` `value` TEXT NOT NULL;
-
-ALTER TABLE `cloud`.`vpc_offerings` ADD COLUMN `sort_key` int(32) NOT NULL default 0 COMMENT 'sort key used for customising sort method';
-
--- Add `sort_key` column to data_center
-ALTER TABLE `cloud`.`data_center` ADD COLUMN `sort_key` INT(32) NOT NULL DEFAULT 0;
-
--- Move domain_id to disk offering details and drop the domain_id column
-INSERT INTO `cloud`.`disk_offering_details` (offering_id, name, value, display) SELECT id, 'domainid', domain_id, 0 FROM `cloud`.`disk_offering` WHERE domain_id IS NOT NULL AND type='Disk';
-INSERT INTO `cloud`.`service_offering_details` (service_offering_id, name, value, display) SELECT id, 'domainid', domain_id, 0 FROM `cloud`.`disk_offering` WHERE domain_id IS NOT NULL AND type='Service';
-
-ALTER TABLE `cloud`.`disk_offering` DROP COLUMN `domain_id`;
-
-ALTER TABLE `cloud`.`service_offering_details` DROP FOREIGN KEY `fk_service_offering_details__service_offering_id`, DROP KEY `uk_service_offering_id_name`;
-ALTER TABLE `cloud`.`service_offering_details` ADD CONSTRAINT `fk_service_offering_details__service_offering_id` FOREIGN KEY (`service_offering_id`) REFERENCES `service_offering`(`id`) ON DELETE CASCADE;
-
--- Disk offering with multi-domains and multi-zones
-DROP VIEW IF EXISTS `cloud`.`disk_offering_view`;
-CREATE VIEW `cloud`.`disk_offering_view` AS
- SELECT
- `disk_offering`.`id` AS `id`,
- `disk_offering`.`uuid` AS `uuid`,
- `disk_offering`.`name` AS `name`,
- `disk_offering`.`display_text` AS `display_text`,
- `disk_offering`.`provisioning_type` AS `provisioning_type`,
- `disk_offering`.`disk_size` AS `disk_size`,
- `disk_offering`.`min_iops` AS `min_iops`,
- `disk_offering`.`max_iops` AS `max_iops`,
- `disk_offering`.`created` AS `created`,
- `disk_offering`.`tags` AS `tags`,
- `disk_offering`.`customized` AS `customized`,
- `disk_offering`.`customized_iops` AS `customized_iops`,
- `disk_offering`.`removed` AS `removed`,
- `disk_offering`.`use_local_storage` AS `use_local_storage`,
- `disk_offering`.`system_use` AS `system_use`,
- `disk_offering`.`hv_ss_reserve` AS `hv_ss_reserve`,
- `disk_offering`.`bytes_read_rate` AS `bytes_read_rate`,
- `disk_offering`.`bytes_read_rate_max` AS `bytes_read_rate_max`,
- `disk_offering`.`bytes_read_rate_max_length` AS `bytes_read_rate_max_length`,
- `disk_offering`.`bytes_write_rate` AS `bytes_write_rate`,
- `disk_offering`.`bytes_write_rate_max` AS `bytes_write_rate_max`,
- `disk_offering`.`bytes_write_rate_max_length` AS `bytes_write_rate_max_length`,
- `disk_offering`.`iops_read_rate` AS `iops_read_rate`,
- `disk_offering`.`iops_read_rate_max` AS `iops_read_rate_max`,
- `disk_offering`.`iops_read_rate_max_length` AS `iops_read_rate_max_length`,
- `disk_offering`.`iops_write_rate` AS `iops_write_rate`,
- `disk_offering`.`iops_write_rate_max` AS `iops_write_rate_max`,
- `disk_offering`.`iops_write_rate_max_length` AS `iops_write_rate_max_length`,
- `disk_offering`.`cache_mode` AS `cache_mode`,
- `disk_offering`.`sort_key` AS `sort_key`,
- `disk_offering`.`type` AS `type`,
- `disk_offering`.`display_offering` AS `display_offering`,
- `disk_offering`.`state` AS `state`,
- GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id,
- GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid,
- GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name,
- GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path,
- GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id,
- GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid,
- GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name
- FROM
- `cloud`.`disk_offering`
- LEFT JOIN
- `cloud`.`disk_offering_details` AS `domain_details` ON `domain_details`.`offering_id` = `disk_offering`.`id` AND `domain_details`.`name`='domainid'
- LEFT JOIN
- `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`)
- LEFT JOIN
- `cloud`.`disk_offering_details` AS `zone_details` ON `zone_details`.`offering_id` = `disk_offering`.`id` AND `zone_details`.`name`='zoneid'
- LEFT JOIN
- `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`)
- WHERE
- `disk_offering`.`state`='Active'
- GROUP BY
- `disk_offering`.`id`;
-
--- Service offering with multi-domains and multi-zones
-DROP VIEW IF EXISTS `cloud`.`service_offering_view`;
-CREATE VIEW `cloud`.`service_offering_view` AS
- SELECT
- `service_offering`.`id` AS `id`,
- `disk_offering`.`uuid` AS `uuid`,
- `disk_offering`.`name` AS `name`,
- `disk_offering`.`display_text` AS `display_text`,
- `disk_offering`.`provisioning_type` AS `provisioning_type`,
- `disk_offering`.`created` AS `created`,
- `disk_offering`.`tags` AS `tags`,
- `disk_offering`.`removed` AS `removed`,
- `disk_offering`.`use_local_storage` AS `use_local_storage`,
- `disk_offering`.`system_use` AS `system_use`,
- `disk_offering`.`customized_iops` AS `customized_iops`,
- `disk_offering`.`min_iops` AS `min_iops`,
- `disk_offering`.`max_iops` AS `max_iops`,
- `disk_offering`.`hv_ss_reserve` AS `hv_ss_reserve`,
- `disk_offering`.`bytes_read_rate` AS `bytes_read_rate`,
- `disk_offering`.`bytes_read_rate_max` AS `bytes_read_rate_max`,
- `disk_offering`.`bytes_read_rate_max_length` AS `bytes_read_rate_max_length`,
- `disk_offering`.`bytes_write_rate` AS `bytes_write_rate`,
- `disk_offering`.`bytes_write_rate_max` AS `bytes_write_rate_max`,
- `disk_offering`.`bytes_write_rate_max_length` AS `bytes_write_rate_max_length`,
- `disk_offering`.`iops_read_rate` AS `iops_read_rate`,
- `disk_offering`.`iops_read_rate_max` AS `iops_read_rate_max`,
- `disk_offering`.`iops_read_rate_max_length` AS `iops_read_rate_max_length`,
- `disk_offering`.`iops_write_rate` AS `iops_write_rate`,
- `disk_offering`.`iops_write_rate_max` AS `iops_write_rate_max`,
- `disk_offering`.`iops_write_rate_max_length` AS `iops_write_rate_max_length`,
- `disk_offering`.`cache_mode` AS `cache_mode`,
- `service_offering`.`cpu` AS `cpu`,
- `service_offering`.`speed` AS `speed`,
- `service_offering`.`ram_size` AS `ram_size`,
- `service_offering`.`nw_rate` AS `nw_rate`,
- `service_offering`.`mc_rate` AS `mc_rate`,
- `service_offering`.`ha_enabled` AS `ha_enabled`,
- `service_offering`.`limit_cpu_use` AS `limit_cpu_use`,
- `service_offering`.`host_tag` AS `host_tag`,
- `service_offering`.`default_use` AS `default_use`,
- `service_offering`.`vm_type` AS `vm_type`,
- `service_offering`.`sort_key` AS `sort_key`,
- `service_offering`.`is_volatile` AS `is_volatile`,
- `service_offering`.`deployment_planner` AS `deployment_planner`,
- GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id,
- GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid,
- GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name,
- GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path,
- GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id,
- GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid,
- GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name
- FROM
- `cloud`.`service_offering`
- INNER JOIN
- `cloud`.`disk_offering_view` AS `disk_offering` ON service_offering.id = disk_offering.id
- LEFT JOIN
- `cloud`.`service_offering_details` AS `domain_details` ON `domain_details`.`service_offering_id` = `disk_offering`.`id` AND `domain_details`.`name`='domainid'
- LEFT JOIN
- `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`)
- LEFT JOIN
- `cloud`.`service_offering_details` AS `zone_details` ON `zone_details`.`service_offering_id` = `disk_offering`.`id` AND `zone_details`.`name`='zoneid'
- LEFT JOIN
- `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`)
- WHERE
- `disk_offering`.`state`='Active'
- GROUP BY
- `service_offering`.`id`;
-
--- Add display column for network offering details table
-ALTER TABLE `cloud`.`network_offering_details` ADD COLUMN `display` tinyint(1) NOT NULL DEFAULT '1' COMMENT 'True if the detail can be displayed to the end user';
-
--- Network offering with multi-domains and multi-zones
-DROP VIEW IF EXISTS `cloud`.`network_offering_view`;
-CREATE VIEW `cloud`.`network_offering_view` AS
- SELECT
- `network_offerings`.`id` AS `id`,
- `network_offerings`.`uuid` AS `uuid`,
- `network_offerings`.`name` AS `name`,
- `network_offerings`.`unique_name` AS `unique_name`,
- `network_offerings`.`display_text` AS `display_text`,
- `network_offerings`.`nw_rate` AS `nw_rate`,
- `network_offerings`.`mc_rate` AS `mc_rate`,
- `network_offerings`.`traffic_type` AS `traffic_type`,
- `network_offerings`.`tags` AS `tags`,
- `network_offerings`.`system_only` AS `system_only`,
- `network_offerings`.`specify_vlan` AS `specify_vlan`,
- `network_offerings`.`service_offering_id` AS `service_offering_id`,
- `network_offerings`.`conserve_mode` AS `conserve_mode`,
- `network_offerings`.`created` AS `created`,
- `network_offerings`.`removed` AS `removed`,
- `network_offerings`.`default` AS `default`,
- `network_offerings`.`availability` AS `availability`,
- `network_offerings`.`dedicated_lb_service` AS `dedicated_lb_service`,
- `network_offerings`.`shared_source_nat_service` AS `shared_source_nat_service`,
- `network_offerings`.`sort_key` AS `sort_key`,
- `network_offerings`.`redundant_router_service` AS `redundant_router_service`,
- `network_offerings`.`state` AS `state`,
- `network_offerings`.`guest_type` AS `guest_type`,
- `network_offerings`.`elastic_ip_service` AS `elastic_ip_service`,
- `network_offerings`.`eip_associate_public_ip` AS `eip_associate_public_ip`,
- `network_offerings`.`elastic_lb_service` AS `elastic_lb_service`,
- `network_offerings`.`specify_ip_ranges` AS `specify_ip_ranges`,
- `network_offerings`.`inline` AS `inline`,
- `network_offerings`.`is_persistent` AS `is_persistent`,
- `network_offerings`.`internal_lb` AS `internal_lb`,
- `network_offerings`.`public_lb` AS `public_lb`,
- `network_offerings`.`egress_default_policy` AS `egress_default_policy`,
- `network_offerings`.`concurrent_connections` AS `concurrent_connections`,
- `network_offerings`.`keep_alive_enabled` AS `keep_alive_enabled`,
- `network_offerings`.`supports_streched_l2` AS `supports_streched_l2`,
- `network_offerings`.`supports_public_access` AS `supports_public_access`,
- `network_offerings`.`for_vpc` AS `for_vpc`,
- `network_offerings`.`service_package_id` AS `service_package_id`,
- GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id,
- GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid,
- GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name,
- GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path,
- GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id,
- GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid,
- GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name
- FROM
- `cloud`.`network_offerings`
- LEFT JOIN
- `cloud`.`network_offering_details` AS `domain_details` ON `domain_details`.`network_offering_id` = `network_offerings`.`id` AND `domain_details`.`name`='domainid'
- LEFT JOIN
- `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`)
- LEFT JOIN
- `cloud`.`network_offering_details` AS `zone_details` ON `zone_details`.`network_offering_id` = `network_offerings`.`id` AND `zone_details`.`name`='zoneid'
- LEFT JOIN
- `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`)
- GROUP BY
- `network_offerings`.`id`;
-
--- Create VPC offering details table
-CREATE TABLE `vpc_offering_details` (
- `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
- `offering_id` bigint(20) unsigned NOT NULL COMMENT 'vpc offering id',
- `name` varchar(255) NOT NULL,
- `value` varchar(1024) NOT NULL,
- `display` tinyint(1) NOT NULL DEFAULT '1' COMMENT 'True if the detail can be displayed to the end user',
- PRIMARY KEY (`id`),
- KEY `fk_vpc_offering_details__vpc_offering_id` (`offering_id`),
- CONSTRAINT `fk_vpc_offering_details__vpc_offering_id` FOREIGN KEY (`offering_id`) REFERENCES `vpc_offerings` (`id`) ON DELETE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
--- VPC offering with multi-domains and multi-zones
-DROP VIEW IF EXISTS `cloud`.`vpc_offering_view`;
-CREATE VIEW `cloud`.`vpc_offering_view` AS
- SELECT
- `vpc_offerings`.`id` AS `id`,
- `vpc_offerings`.`uuid` AS `uuid`,
- `vpc_offerings`.`name` AS `name`,
- `vpc_offerings`.`unique_name` AS `unique_name`,
- `vpc_offerings`.`display_text` AS `display_text`,
- `vpc_offerings`.`state` AS `state`,
- `vpc_offerings`.`default` AS `default`,
- `vpc_offerings`.`created` AS `created`,
- `vpc_offerings`.`removed` AS `removed`,
- `vpc_offerings`.`service_offering_id` AS `service_offering_id`,
- `vpc_offerings`.`supports_distributed_router` AS `supports_distributed_router`,
- `vpc_offerings`.`supports_region_level_vpc` AS `supports_region_level_vpc`,
- `vpc_offerings`.`redundant_router_service` AS `redundant_router_service`,
- `vpc_offerings`.`sort_key` AS `sort_key`,
- GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id,
- GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid,
- GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name,
- GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path,
- GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id,
- GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid,
- GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name
- FROM
- `cloud`.`vpc_offerings`
- LEFT JOIN
- `cloud`.`vpc_offering_details` AS `domain_details` ON `domain_details`.`offering_id` = `vpc_offerings`.`id` AND `domain_details`.`name`='domainid'
- LEFT JOIN
- `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`)
- LEFT JOIN
- `cloud`.`vpc_offering_details` AS `zone_details` ON `zone_details`.`offering_id` = `vpc_offerings`.`id` AND `zone_details`.`name`='zoneid'
- LEFT JOIN
- `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`)
- GROUP BY
- `vpc_offerings`.`id`;
-
--- Recreate data_center_view
-DROP VIEW IF EXISTS `cloud`.`data_center_view`;
-CREATE VIEW `cloud`.`data_center_view` AS
- select
- data_center.id,
- data_center.uuid,
- data_center.name,
- data_center.is_security_group_enabled,
- data_center.is_local_storage_enabled,
- data_center.description,
- data_center.dns1,
- data_center.dns2,
- data_center.ip6_dns1,
- data_center.ip6_dns2,
- data_center.internal_dns1,
- data_center.internal_dns2,
- data_center.guest_network_cidr,
- data_center.domain,
- data_center.networktype,
- data_center.allocation_state,
- data_center.zone_token,
- data_center.dhcp_provider,
- data_center.removed,
- data_center.sort_key,
- domain.id domain_id,
- domain.uuid domain_uuid,
- domain.name domain_name,
- domain.path domain_path,
- dedicated_resources.affinity_group_id,
- dedicated_resources.account_id,
- affinity_group.uuid affinity_group_uuid
- from
- `cloud`.`data_center`
- left join
- `cloud`.`domain` ON data_center.domain_id = domain.id
- left join
- `cloud`.`dedicated_resources` ON data_center.id = dedicated_resources.data_center_id
- left join
- `cloud`.`affinity_group` ON dedicated_resources.affinity_group_id = affinity_group.id;
-
--- Remove key/value tags from project_view
-DROP VIEW IF EXISTS `cloud`.`project_view`;
-CREATE VIEW `cloud`.`project_view` AS
- select
- projects.id,
- projects.uuid,
- projects.name,
- projects.display_text,
- projects.state,
- projects.removed,
- projects.created,
- projects.project_account_id,
- account.account_name owner,
- pacct.account_id,
- domain.id domain_id,
- domain.uuid domain_uuid,
- domain.name domain_name,
- domain.path domain_path
- from
- `cloud`.`projects`
- inner join
- `cloud`.`domain` ON projects.domain_id = domain.id
- inner join
- `cloud`.`project_account` ON projects.id = project_account.project_id
- and project_account.account_role = 'Admin'
- inner join
- `cloud`.`account` ON account.id = project_account.account_id
- left join
- `cloud`.`project_account` pacct ON projects.id = pacct.project_id;
-
--- KVM: Add background task to upload certificates for direct download
-CREATE TABLE `cloud`.`direct_download_certificate` (
- `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
- `uuid` varchar(40) NOT NULL,
- `alias` varchar(255) NOT NULL,
- `certificate` text NOT NULL,
- `hypervisor_type` varchar(45) NOT NULL,
- `zone_id` bigint(20) unsigned NOT NULL,
- PRIMARY KEY (`id`),
- KEY `i_direct_download_certificate_alias` (`alias`),
- KEY `fk_direct_download_certificate__zone_id` (`zone_id`),
- CONSTRAINT `fk_direct_download_certificate__zone_id` FOREIGN KEY (`zone_id`) REFERENCES `data_center` (`id`) ON DELETE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-CREATE TABLE `cloud`.`direct_download_certificate_host_map` (
- `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
- `certificate_id` bigint(20) unsigned NOT NULL,
- `host_id` bigint(20) unsigned NOT NULL,
- `revoked` int(1) NOT NULL DEFAULT 0,
- PRIMARY KEY (`id`),
- KEY `fk_direct_download_certificate_host_map__host_id` (`host_id`),
- KEY `fk_direct_download_certificate_host_map__certificate_id` (`certificate_id`),
- CONSTRAINT `fk_direct_download_certificate_host_map__host_id` FOREIGN KEY (`host_id`) REFERENCES `host` (`id`) ON DELETE CASCADE,
- CONSTRAINT `fk_direct_download_certificate_host_map__certificate_id` FOREIGN KEY (`certificate_id`) REFERENCES `direct_download_certificate` (`id`) ON DELETE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
--- [Vmware] Allow configuring appliances on the VM instance wizard when OVF properties are available
-CREATE TABLE `cloud`.`template_ovf_properties` (
- `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
- `template_id` bigint(20) unsigned NOT NULL,
- `key` VARCHAR(100) NOT NULL,
- `type` VARCHAR(45) DEFAULT NULL,
- `value` VARCHAR(100) DEFAULT NULL,
- `password` TINYINT(1) NOT NULL DEFAULT '0',
- `qualifiers` TEXT DEFAULT NULL,
- `user_configurable` TINYINT(1) NOT NULL DEFAULT '0',
- `label` TEXT DEFAULT NULL,
- `description` TEXT DEFAULT NULL,
- PRIMARY KEY (`id`),
- CONSTRAINT `fk_template_ovf_properties__template_id` FOREIGN KEY (`template_id`) REFERENCES `vm_template`(`id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
--- Add VM snapshot ID on usage helper tables
-ALTER TABLE `cloud_usage`.`usage_vmsnapshot` ADD COLUMN `vm_snapshot_id` BIGINT(20) NULL DEFAULT NULL AFTER `processed`;
-ALTER TABLE `cloud_usage`.`usage_snapshot_on_primary` ADD COLUMN `vm_snapshot_id` BIGINT(20) NULL DEFAULT NULL AFTER `deleted`;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41201to41202-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41201to41202-cleanup.sql
new file mode 100644
index 000000000000..744bc7bb2c09
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41201to41202-cleanup.sql
@@ -0,0 +1,82 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.12.0.1 to 4.12.0.2
+--;
+
+-- Ubuntu 18.04 fixes
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '6.5.0' AND `guest_os_id` = 277;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '6.5.0' AND `guest_os_id` = 278;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.0.0' AND `guest_os_id` = 277;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.0.0' AND `guest_os_id` = 278;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.0' AND `guest_os_id` = 277;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.0' AND `guest_os_id` = 278;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.1' AND `guest_os_id` = 277;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.1' AND `guest_os_id` = 278;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.2.0' AND `guest_os_id` = 277;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.2.0' AND `guest_os_id` = 278;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.3.0' AND `guest_os_id` = 277;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.3.0' AND `guest_os_id` = 278;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.4.0' AND `guest_os_id` = 277;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.4.0' AND `guest_os_id` = 278;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.5.0' AND `guest_os_id` = 277;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.5.0' AND `guest_os_id` = 278;
+
+-- Ubuntu 18.10 fixes
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '6.5.0' AND `guest_os_id` = 279;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '6.5.0' AND `guest_os_id` = 280;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.0.0' AND `guest_os_id` = 279;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.0.0' AND `guest_os_id` = 280;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.0' AND `guest_os_id` = 279;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.0' AND `guest_os_id` = 280;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.1' AND `guest_os_id` = 279;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.1' AND `guest_os_id` = 280;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.2' AND `guest_os_id` = 279;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.2' AND `guest_os_id` = 280;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.2.0' AND `guest_os_id` = 279;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.2.0' AND `guest_os_id` = 280;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.3.0' AND `guest_os_id` = 279;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.3.0' AND `guest_os_id` = 280;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.4.0' AND `guest_os_id` = 279;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.4.0' AND `guest_os_id` = 280;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.5.0' AND `guest_os_id` = 279;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.5.0' AND `guest_os_id` = 280;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.6.0' AND `guest_os_id` = 279;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.6.0' AND `guest_os_id` = 280;
+
+-- Ubuntu 19.04 fixes
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '6.5.0' AND `guest_os_id` = 281;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '6.5.0' AND `guest_os_id` = 282;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.0.0' AND `guest_os_id` = 281;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.0.0' AND `guest_os_id` = 282;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.0' AND `guest_os_id` = 281;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.0' AND `guest_os_id` = 282;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.1' AND `guest_os_id` = 281;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.1' AND `guest_os_id` = 282;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.2' AND `guest_os_id` = 281;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.2' AND `guest_os_id` = 282;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.2.0' AND `guest_os_id` = 281;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.2.0' AND `guest_os_id` = 282;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.3.0' AND `guest_os_id` = 281;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.3.0' AND `guest_os_id` = 282;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.4.0' AND `guest_os_id` = 281;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.4.0' AND `guest_os_id` = 282;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.5.0' AND `guest_os_id` = 281;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.5.0' AND `guest_os_id` = 282;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.6.0' AND `guest_os_id` = 281;
+UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.6.0' AND `guest_os_id` = 282;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41201to41202.sql b/engine/schema/src/main/resources/META-INF/db/schema-41201to41202.sql
new file mode 100644
index 000000000000..335a326980e5
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41201to41202.sql
@@ -0,0 +1,32 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.12.0.1 to 4.12.0.2
+--;
+
+-- Windows Server 2019 XenServer guest os mapping
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '6.5.0', 'Other install media', 276, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.0.0', 'Other install media', 276, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.0', 'Other install media', 276, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.1', 'Other install media', 276, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.2', 'Other install media', 276, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.2.0', 'Other install media', 276, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.3.0', 'Other install media', 276, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.4.0', 'Other install media', 276, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.5.0', 'Other install media', 276, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.6.0', 'Other install media', 276, utc_timestamp(), 0);
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41200to41300-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-412025to41300-cleanup.sql
similarity index 100%
rename from engine/schema/src/main/resources/META-INF/db/schema-41200to41300-cleanup.sql
rename to engine/schema/src/main/resources/META-INF/db/schema-412025to41300-cleanup.sql
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-412025to41300.sql b/engine/schema/src/main/resources/META-INF/db/schema-412025to41300.sql
new file mode 100644
index 000000000000..3d1920ca492d
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-412025to41300.sql
@@ -0,0 +1,668 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.12.0.0 to 4.13.0.0
+--;
+
+-- Add XenServer 7.1.2, 7.6 and 8.0 hypervisor capabilities
+INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) values (UUID(), 'XenServer', '7.6.0', 1000, 253, 64, 1);
+INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) values (UUID(), 'XenServer', '8.0.0', 1000, 253, 64, 1);
+INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) values (UUID(), 'XenServer', '7.1.1', 1000, 253, 64, 1);
+INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) values (UUID(), 'XenServer', '7.1.2', 1000, 253, 64, 1);
+
+-- Add VMware 6.7 hypervisor capabilities
+INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid,hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) VALUES (UUID(), 'VMware', '6.7', '1024', '0', '59', '64', '1', '1');
+INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid,hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) VALUES (UUID(), 'VMware', '6.7.1', '1024', '0', '59', '64', '1', '1');
+INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid,hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) VALUES (UUID(), 'VMware', '6.7.2', '1024', '0', '59', '64', '1', '1');
+INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid,hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) VALUES (UUID(), 'VMware', '6.7.3', '1024', '0', '59', '64', '1', '1');
+
+-- Update VMware 6.x hypervisor capabilities
+UPDATE `cloud`.`hypervisor_capabilities` SET max_guests_limit='1024', max_data_volumes_limit='59', max_hosts_per_cluster='64' WHERE (hypervisor_type='VMware' AND hypervisor_version='6.0' );
+UPDATE `cloud`.`hypervisor_capabilities` SET max_guests_limit='1024', max_data_volumes_limit='59', max_hosts_per_cluster='64' WHERE (hypervisor_type='VMware' AND hypervisor_version='6.5' );
+
+
+-- Copy from 41520to41600 - PR#4699 Drop the procedure `ADD_GUEST_OS_AND_HYPERVISOR_MAPPING` if it already exist.
+DROP PROCEDURE IF EXISTS `cloud`.`ADD_GUEST_OS_AND_HYPERVISOR_MAPPING`;
+
+-- Copy from 41520to41600 - PR#4699 Create the procedure `ADD_GUEST_OS_AND_HYPERVISOR_MAPPING` to add guest_os and guest_os_hypervisor mapping.
+CREATE PROCEDURE `cloud`.`ADD_GUEST_OS_AND_HYPERVISOR_MAPPING` (
+ IN guest_os_category_id bigint(20) unsigned,
+ IN guest_os_display_name VARCHAR(255),
+ IN guest_os_hypervisor_hypervisor_type VARCHAR(32),
+ IN guest_os_hypervisor_hypervisor_version VARCHAR(32),
+ IN guest_os_hypervisor_guest_os_name VARCHAR(255)
+)
+BEGIN
+ INSERT INTO cloud.guest_os (uuid, category_id, display_name, created)
+ SELECT UUID(), guest_os_category_id, guest_os_display_name, now()
+ FROM DUAL
+ WHERE not exists( SELECT 1
+ FROM cloud.guest_os
+ WHERE cloud.guest_os.category_id = guest_os_category_id
+ AND cloud.guest_os.display_name = guest_os_display_name)
+
+; INSERT INTO cloud.guest_os_hypervisor (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created)
+ SELECT UUID(), guest_os_hypervisor_hypervisor_type, guest_os_hypervisor_hypervisor_version, guest_os_hypervisor_guest_os_name, guest_os.id, now()
+ FROM cloud.guest_os
+ WHERE guest_os.category_id = guest_os_category_id
+ AND guest_os.display_name = guest_os_display_name
+ AND NOT EXISTS (SELECT 1
+ FROM cloud.guest_os_hypervisor as hypervisor
+ WHERE hypervisor_type = guest_os_hypervisor_hypervisor_type
+ AND hypervisor_version = guest_os_hypervisor_hypervisor_version
+ AND hypervisor.guest_os_id = guest_os.id
+ AND hypervisor.guest_os_name = guest_os_hypervisor_guest_os_name)
+;END;
+
+DROP PROCEDURE IF EXISTS `cloud`.`ADD_GUEST_OS_ONLY`;
+CREATE PROCEDURE `cloud`.`ADD_GUEST_OS_ONLY` (
+ IN guest_os_category_id bigint(20) unsigned,
+ IN guest_os_display_name VARCHAR(255)
+)
+BEGIN
+ INSERT INTO cloud.guest_os (uuid, category_id, display_name, created)
+ SELECT UUID(), guest_os_category_id, guest_os_display_name, now()
+ FROM DUAL
+ WHERE not exists( SELECT 1
+ FROM cloud.guest_os
+ WHERE cloud.guest_os.category_id = guest_os_category_id
+ AND cloud.guest_os.display_name = guest_os_display_name)
+;END;
+
+---- Add new OS versions
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('277', UUID(), '1', 'Ubuntu 17.04', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('278', UUID(), '1', 'Ubuntu 17.10', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('279', UUID(), '1', 'Ubuntu 18.04 LTS', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('280', UUID(), '1', 'Ubuntu 18.10', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('281', UUID(), '1', 'Ubuntu 19.04', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('282', UUID(), '1', 'Red Hat Enterprise Linux 7.3', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('283', UUID(), '1', 'Red Hat Enterprise Linux 7.4', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('284', UUID(), '1', 'Red Hat Enterprise Linux 7.5', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('285', UUID(), '1', 'Red Hat Enterprise Linux 7.6', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('286', UUID(), '1', 'Red Hat Enterprise Linux 8.0', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('289', UUID(), '2', 'Debian GNU/Linux 9 (32-bit)', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('290', UUID(), '2', 'Debian GNU/Linux 9 (64-bit)', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('291', UUID(), '5', 'SUSE Linux Enterprise Server 15 (64-bit)', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('292', UUID(), '2', 'Debian GNU/Linux 10 (32-bit)', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('293', UUID(), '2', 'Debian GNU/Linux 10 (64-bit)', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('294', UUID(), '2', 'Linux 4.x Kernel (32-bit)', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('295', UUID(), '2', 'Linux 4.x Kernel (64-bit)', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('296', UUID(), '3', 'Oracle Linux 8', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('297', UUID(), '1', 'CentOS 8', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('298', UUID(), '9', 'FreeBSD 11 (32-bit)', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('299', UUID(), '9', 'FreeBSD 11 (64-bit)', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('300', UUID(), '9', 'FreeBSD 12 (32-bit)', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('301', UUID(), '9', 'FreeBSD 12 (64-bit)', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('302', UUID(), '1', 'CentOS 6.8', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('303', UUID(), '1', 'CentOS 6.9', now(), '0');
+--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('304', UUID(), '1', 'CentOS 6.10', now(), '0');
+--
+---- Add New and missing VMware 6.5 Guest OSes
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 235, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 236, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 147, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 148, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 213, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 214, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 215, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 216, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 217, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 218, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 219, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 220, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 250, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 251, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux7_64Guest', 247, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntuGuest', 255, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 256, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 277, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 278, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 279, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 280, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'rhel7_64Guest', 282, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'rhel7_64Guest', 283, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'rhel7_64Guest', 284, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'rhel7_64Guest', 285, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'windows9Server64Guest', 276, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'debian9Guest', 289, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'debian9_64Guest', 290, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'debian10Guest', 282, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'debian10_64Guest', 293, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'sles15_64Guest', 291, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'centos6_64Guest', 302, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'centos6_64Guest', 303, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'centos6_64Guest', 304, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'rhel8_64Guest', 286, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 281, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'other4xLinuxGuest', 294, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'other4xLinux64Guest', 295, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux8_64Guest', 296, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'centos8_64Guest', 297, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'freebsd11Guest', 298, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'freebsd11_64Guest', 299, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'freebsd12Guest', 300, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'freebsd12_64Guest', 301, now(), 0);
+
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6 (32-bit)', 'VMware', '6.5', 'oracleLinux6Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6 (64-bit)', 'VMware', '6.5', 'oracleLinux6_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.0 (32-bit)', 'VMware', '6.5', 'oracleLinux6Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.0 (64-bit)', 'VMware', '6.5', 'oracleLinux6_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.1 (32-bit)', 'VMware', '6.5', 'oracleLinux6Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.1 (64-bit)', 'VMware', '6.5', 'oracleLinux6_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.2 (32-bit)', 'VMware', '6.5', 'oracleLinux6Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.2 (64-bit)', 'VMware', '6.5', 'oracleLinux6_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.3 (32-bit)', 'VMware', '6.5', 'oracleLinux6Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.3 (64-bit)', 'VMware', '6.5', 'oracleLinux6_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.4 (32-bit)', 'VMware', '6.5', 'oracleLinux6Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.4 (64-bit)', 'VMware', '6.5', 'oracleLinux6_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.5 (32-bit)', 'VMware', '6.5', 'oracleLinux6Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.5 (64-bit)', 'VMware', '6.5', 'oracleLinux6_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Linux 7', 'VMware', '6.5', 'oracleLinux7_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 16.04 (32-bit)', 'VMware', '6.5', 'ubuntuGuest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 16.04 (64-bit)', 'VMware', '6.5', 'ubuntu64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (6, 'Windows Server 2019 (64-bit)', 'VMware', '6.5', 'windows9Server64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 17.04', 'VMware', '6.5', 'ubuntu64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 17.10', 'VMware', '6.5', 'ubuntu64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 18.04 LTS', 'VMware', '6.5', 'ubuntu64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 18.10', 'VMware', '6.5', 'ubuntu64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 19.04', 'VMware', '6.5', 'ubuntu64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.3', 'VMware', '6.5', 'rhel7_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.4', 'VMware', '6.5', 'rhel7_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.5', 'VMware', '6.5', 'rhel7_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.6', 'VMware', '6.5', 'rhel7_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 8.0', 'VMware', '6.5', 'rhel8_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 9 (32-bit)', 'VMware', '6.5', 'debian9Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 9 (64-bit)', 'VMware', '6.5', 'debian9_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (5, 'SUSE Linux Enterprise Server 15 (64-bit)', 'VMware', '6.5', 'sles15_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 10 (32-bit)', 'VMware', '6.5', 'debian10Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 10 (64-bit)', 'VMware', '6.5', 'debian10_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Linux 4.x Kernel (32-bit)', 'VMware', '6.5', 'other4xLinuxGuest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Linux 4.x Kernel (64-bit)', 'VMware', '6.5', 'other4xLinux64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Linux 8', 'VMware', '6.5', 'oracleLinux8_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 8', 'VMware', '6.5', 'centos8_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (9, 'FreeBSD 11 (32-bit)', 'VMware', '6.5', 'freebsd11Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (9, 'FreeBSD 11 (64-bit)', 'VMware', '6.5', 'freebsd11_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (9, 'FreeBSD 12 (32-bit)', 'VMware', '6.5', 'freebsd12Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (9, 'FreeBSD 12 (64-bit)', 'VMware', '6.5', 'freebsd12_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 6.8', 'VMware', '6.5', 'centos6_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 6.9', 'VMware', '6.5', 'centos6_64Guest');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 6.10', 'VMware', '6.5', 'centos6_64Guest');
+
+-- Copy VMware 6.5 Guest OSes to VMware 6.7
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'VMware', '6.7', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='VMware' AND hypervisor_version='6.5';
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'VMware', '6.7.1', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='VMware' AND hypervisor_version='6.7';
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'VMware', '6.7.2', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='VMware' AND hypervisor_version='6.7.1';
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'VMware', '6.7.3', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='VMware' AND hypervisor_version='6.7.2';
+
+-- Copy XenServer 7.1.0 to XenServer 7.1.1
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '7.1.1', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.1.0';
+
+-- Copy XenServer 7.1.1 to XenServer 7.1.2
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '7.1.2', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.1.1';
+
+---- Add New XenServer 7.1.2 Guest OSes
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Debian Stretch 9.0', 289, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Debian Stretch 9.0', 290, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Bionic Beaver 18.04', 279, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows Server 2019 (64-bit)', 276, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit', 303, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 7', 283, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 7', 284, now(), 0);
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 9 (32-bit)', 'Xenserver', '7.1.2', 'Debian Stretch 9.0');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 9 (64-bit)', 'Xenserver', '7.1.2', 'Debian Stretch 9.0');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 18.04', 'Xenserver', '7.1.2', 'Ubuntu Bionic Beaver 18.04');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (6, 'Windows Server 2019 (64-bit)', 'Xenserver', '7.1.2', 'Windows Server 2019 (64-bit)');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 6.9', 'Xenserver', '7.1.2', 'CentOS 6 (64-bit)');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.4', 'Xenserver', '7.1.2', 'CentOS 7');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.5', 'Xenserver', '7.1.2', 'CentOS 7');
+
+-- Copy XenServer 7.5 hypervisor guest OS mappings to XenServer 7.6
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '7.6.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.5.0';
+
+---- Add New XenServer 7.6 Guest OSes
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Jessie 8.0', 269, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Jessie 8.0', 270, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Stretch 9.0', 289, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Stretch 9.0', 290, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Xenial Xerus 16.04', 255, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Xenial Xerus 16.04', 256, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Bionic Beaver 18.04', 279, now(), 0);
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 8 (32-bit)', 'Xenserver', '7.6.0', 'Debian Jessie 8.0');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 8 (64-bit)', 'Xenserver', '7.6.0', 'Debian Jessie 8.0');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 9 (32-bit)', 'Xenserver', '7.6.0', 'Debian Jessie 9.0');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 9 (64-bit)', 'Xenserver', '7.6.0', 'Debian Jessie 9.0');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 16.04 (32-bit)', 'Xenserver', '7.6.0', 'Ubuntu Xenial Xerus 16.04');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 16.04 (32-bit)', 'Xenserver', '7.6.0', 'Ubuntu Xenial Xerus 16.04');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 18.04', 'Xenserver', '7.6.0', 'Ubuntu Bionic Beaver 18.04');
+
+-- Copy XenServer 7.6 hypervisor guest OS mappings to XenServer8.0
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '8.0.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.6.0';
+
+---- Add New XenServer 8.0 Guest OSes
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (6, 'Windows Server 2019 (64-bit)', 'Xenserver', '8.0.0', 'Windows Server 2019 (64-bit)');
+
+---- Add Missing KVM Guest OSes
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.6', 262, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.7', 263, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.7', 264, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.8', 302, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.9', 303, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.10', 304, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 7.2', 269, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 7.3', 282, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 7.4', 283, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 7.5', 284, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 7.6', 285, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 8', 286, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Ubuntu 17.04', 277, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Ubuntu 17.10', 278, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Ubuntu 18.04 LTS', 279, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Ubuntu 18.10', 280, now(), 0);
+--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Ubuntu 19.04', 281, now(), 0);
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 6.6 (64-bit)', 'KVM', 'default', 'CentOS 6.6');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 6.7 (32-bit)', 'KVM', 'default', 'CentOS 6.7');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 6.7 (64-bit)', 'KVM', 'default', 'CentOS 6.7');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 6.8', 'KVM', 'default', 'CentOS 6.8');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 6.9', 'KVM', 'default', 'CentOS 6.9');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 6.10', 'KVM', 'default', 'CentOS 6.10');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.2', 'KVM', 'default', 'Red Hat Enterprise Linux 7.2');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.3', 'KVM', 'default', 'Red Hat Enterprise Linux 7.3');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.4', 'KVM', 'default', 'Red Hat Enterprise Linux 7.4');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.5', 'KVM', 'default', 'Red Hat Enterprise Linux 7.5');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.6', 'KVM', 'default', 'Red Hat Enterprise Linux 7.6');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 8', 'KVM', 'default', 'Red Hat Enterprise Linux 8');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 17.04', 'KVM', 'default', 'Ubuntu 17.04');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 17.10', 'KVM', 'default', 'Ubuntu 17.10');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 18.04 LTS', 'KVM', 'default', 'Ubuntu 18.04 LTS');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 18.10', 'KVM', 'default', 'Ubuntu 18.10');
+CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 19.04', 'KVM', 'default', 'Ubuntu 19.04');
+
+-- DPDK client and server mode support
+ALTER TABLE `cloud`.`service_offering_details` CHANGE COLUMN `value` `value` TEXT NOT NULL;
+
+ALTER TABLE `cloud`.`vpc_offerings` ADD COLUMN `sort_key` int(32) NOT NULL default 0 COMMENT 'sort key used for customising sort method';
+
+-- Add `sort_key` column to data_center
+ALTER TABLE `cloud`.`data_center` ADD COLUMN `sort_key` INT(32) NOT NULL DEFAULT 0;
+
+-- Move domain_id to disk offering details and drop the domain_id column
+INSERT INTO `cloud`.`disk_offering_details` (offering_id, name, value, display) SELECT id, 'domainid', domain_id, 0 FROM `cloud`.`disk_offering` WHERE domain_id IS NOT NULL AND type='Disk';
+INSERT INTO `cloud`.`service_offering_details` (service_offering_id, name, value, display) SELECT id, 'domainid', domain_id, 0 FROM `cloud`.`disk_offering` WHERE domain_id IS NOT NULL AND type='Service';
+
+ALTER TABLE `cloud`.`disk_offering` DROP COLUMN `domain_id`;
+
+ALTER TABLE `cloud`.`service_offering_details` DROP FOREIGN KEY `fk_service_offering_details__service_offering_id`, DROP KEY `uk_service_offering_id_name`;
+ALTER TABLE `cloud`.`service_offering_details` ADD CONSTRAINT `fk_service_offering_details__service_offering_id` FOREIGN KEY (`service_offering_id`) REFERENCES `service_offering`(`id`) ON DELETE CASCADE;
+
+-- Disk offering with multi-domains and multi-zones
+DROP VIEW IF EXISTS `cloud`.`disk_offering_view`;
+CREATE VIEW `cloud`.`disk_offering_view` AS
+ SELECT
+ `disk_offering`.`id` AS `id`,
+ `disk_offering`.`uuid` AS `uuid`,
+ `disk_offering`.`name` AS `name`,
+ `disk_offering`.`display_text` AS `display_text`,
+ `disk_offering`.`provisioning_type` AS `provisioning_type`,
+ `disk_offering`.`disk_size` AS `disk_size`,
+ `disk_offering`.`min_iops` AS `min_iops`,
+ `disk_offering`.`max_iops` AS `max_iops`,
+ `disk_offering`.`created` AS `created`,
+ `disk_offering`.`tags` AS `tags`,
+ `disk_offering`.`customized` AS `customized`,
+ `disk_offering`.`customized_iops` AS `customized_iops`,
+ `disk_offering`.`removed` AS `removed`,
+ `disk_offering`.`use_local_storage` AS `use_local_storage`,
+ `disk_offering`.`system_use` AS `system_use`,
+ `disk_offering`.`hv_ss_reserve` AS `hv_ss_reserve`,
+ `disk_offering`.`bytes_read_rate` AS `bytes_read_rate`,
+ `disk_offering`.`bytes_read_rate_max` AS `bytes_read_rate_max`,
+ `disk_offering`.`bytes_read_rate_max_length` AS `bytes_read_rate_max_length`,
+ `disk_offering`.`bytes_write_rate` AS `bytes_write_rate`,
+ `disk_offering`.`bytes_write_rate_max` AS `bytes_write_rate_max`,
+ `disk_offering`.`bytes_write_rate_max_length` AS `bytes_write_rate_max_length`,
+ `disk_offering`.`iops_read_rate` AS `iops_read_rate`,
+ `disk_offering`.`iops_read_rate_max` AS `iops_read_rate_max`,
+ `disk_offering`.`iops_read_rate_max_length` AS `iops_read_rate_max_length`,
+ `disk_offering`.`iops_write_rate` AS `iops_write_rate`,
+ `disk_offering`.`iops_write_rate_max` AS `iops_write_rate_max`,
+ `disk_offering`.`iops_write_rate_max_length` AS `iops_write_rate_max_length`,
+ `disk_offering`.`cache_mode` AS `cache_mode`,
+ `disk_offering`.`sort_key` AS `sort_key`,
+ `disk_offering`.`type` AS `type`,
+ `disk_offering`.`display_offering` AS `display_offering`,
+ `disk_offering`.`state` AS `state`,
+ GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id,
+ GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid,
+ GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name,
+ GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path,
+ GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id,
+ GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid,
+ GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name
+ FROM
+ `cloud`.`disk_offering`
+ LEFT JOIN
+ `cloud`.`disk_offering_details` AS `domain_details` ON `domain_details`.`offering_id` = `disk_offering`.`id` AND `domain_details`.`name`='domainid'
+ LEFT JOIN
+ `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`)
+ LEFT JOIN
+ `cloud`.`disk_offering_details` AS `zone_details` ON `zone_details`.`offering_id` = `disk_offering`.`id` AND `zone_details`.`name`='zoneid'
+ LEFT JOIN
+ `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`)
+ WHERE
+ `disk_offering`.`state`='Active'
+ GROUP BY
+ `disk_offering`.`id`;
+
+-- Service offering with multi-domains and multi-zones
+DROP VIEW IF EXISTS `cloud`.`service_offering_view`;
+CREATE VIEW `cloud`.`service_offering_view` AS
+ SELECT
+ `service_offering`.`id` AS `id`,
+ `disk_offering`.`uuid` AS `uuid`,
+ `disk_offering`.`name` AS `name`,
+ `disk_offering`.`display_text` AS `display_text`,
+ `disk_offering`.`provisioning_type` AS `provisioning_type`,
+ `disk_offering`.`created` AS `created`,
+ `disk_offering`.`tags` AS `tags`,
+ `disk_offering`.`removed` AS `removed`,
+ `disk_offering`.`use_local_storage` AS `use_local_storage`,
+ `disk_offering`.`system_use` AS `system_use`,
+ `disk_offering`.`customized_iops` AS `customized_iops`,
+ `disk_offering`.`min_iops` AS `min_iops`,
+ `disk_offering`.`max_iops` AS `max_iops`,
+ `disk_offering`.`hv_ss_reserve` AS `hv_ss_reserve`,
+ `disk_offering`.`bytes_read_rate` AS `bytes_read_rate`,
+ `disk_offering`.`bytes_read_rate_max` AS `bytes_read_rate_max`,
+ `disk_offering`.`bytes_read_rate_max_length` AS `bytes_read_rate_max_length`,
+ `disk_offering`.`bytes_write_rate` AS `bytes_write_rate`,
+ `disk_offering`.`bytes_write_rate_max` AS `bytes_write_rate_max`,
+ `disk_offering`.`bytes_write_rate_max_length` AS `bytes_write_rate_max_length`,
+ `disk_offering`.`iops_read_rate` AS `iops_read_rate`,
+ `disk_offering`.`iops_read_rate_max` AS `iops_read_rate_max`,
+ `disk_offering`.`iops_read_rate_max_length` AS `iops_read_rate_max_length`,
+ `disk_offering`.`iops_write_rate` AS `iops_write_rate`,
+ `disk_offering`.`iops_write_rate_max` AS `iops_write_rate_max`,
+ `disk_offering`.`iops_write_rate_max_length` AS `iops_write_rate_max_length`,
+ `disk_offering`.`cache_mode` AS `cache_mode`,
+ `service_offering`.`cpu` AS `cpu`,
+ `service_offering`.`speed` AS `speed`,
+ `service_offering`.`ram_size` AS `ram_size`,
+ `service_offering`.`nw_rate` AS `nw_rate`,
+ `service_offering`.`mc_rate` AS `mc_rate`,
+ `service_offering`.`ha_enabled` AS `ha_enabled`,
+ `service_offering`.`limit_cpu_use` AS `limit_cpu_use`,
+ `service_offering`.`host_tag` AS `host_tag`,
+ `service_offering`.`default_use` AS `default_use`,
+ `service_offering`.`vm_type` AS `vm_type`,
+ `service_offering`.`sort_key` AS `sort_key`,
+ `service_offering`.`is_volatile` AS `is_volatile`,
+ `service_offering`.`deployment_planner` AS `deployment_planner`,
+ GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id,
+ GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid,
+ GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name,
+ GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path,
+ GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id,
+ GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid,
+ GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name
+ FROM
+ `cloud`.`service_offering`
+ INNER JOIN
+ `cloud`.`disk_offering_view` AS `disk_offering` ON service_offering.id = disk_offering.id
+ LEFT JOIN
+ `cloud`.`service_offering_details` AS `domain_details` ON `domain_details`.`service_offering_id` = `disk_offering`.`id` AND `domain_details`.`name`='domainid'
+ LEFT JOIN
+ `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`)
+ LEFT JOIN
+ `cloud`.`service_offering_details` AS `zone_details` ON `zone_details`.`service_offering_id` = `disk_offering`.`id` AND `zone_details`.`name`='zoneid'
+ LEFT JOIN
+ `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`)
+ WHERE
+ `disk_offering`.`state`='Active'
+ GROUP BY
+ `service_offering`.`id`;
+
+-- Add display column for network offering details table
+ALTER TABLE `cloud`.`network_offering_details` ADD COLUMN `display` tinyint(1) NOT NULL DEFAULT '1' COMMENT 'True if the detail can be displayed to the end user';
+
+-- Network offering with multi-domains and multi-zones
+DROP VIEW IF EXISTS `cloud`.`network_offering_view`;
+CREATE VIEW `cloud`.`network_offering_view` AS
+ SELECT
+ `network_offerings`.`id` AS `id`,
+ `network_offerings`.`uuid` AS `uuid`,
+ `network_offerings`.`name` AS `name`,
+ `network_offerings`.`unique_name` AS `unique_name`,
+ `network_offerings`.`display_text` AS `display_text`,
+ `network_offerings`.`nw_rate` AS `nw_rate`,
+ `network_offerings`.`mc_rate` AS `mc_rate`,
+ `network_offerings`.`traffic_type` AS `traffic_type`,
+ `network_offerings`.`tags` AS `tags`,
+ `network_offerings`.`system_only` AS `system_only`,
+ `network_offerings`.`specify_vlan` AS `specify_vlan`,
+ `network_offerings`.`service_offering_id` AS `service_offering_id`,
+ `network_offerings`.`conserve_mode` AS `conserve_mode`,
+ `network_offerings`.`created` AS `created`,
+ `network_offerings`.`removed` AS `removed`,
+ `network_offerings`.`default` AS `default`,
+ `network_offerings`.`availability` AS `availability`,
+ `network_offerings`.`dedicated_lb_service` AS `dedicated_lb_service`,
+ `network_offerings`.`shared_source_nat_service` AS `shared_source_nat_service`,
+ `network_offerings`.`sort_key` AS `sort_key`,
+ `network_offerings`.`redundant_router_service` AS `redundant_router_service`,
+ `network_offerings`.`state` AS `state`,
+ `network_offerings`.`guest_type` AS `guest_type`,
+ `network_offerings`.`elastic_ip_service` AS `elastic_ip_service`,
+ `network_offerings`.`eip_associate_public_ip` AS `eip_associate_public_ip`,
+ `network_offerings`.`elastic_lb_service` AS `elastic_lb_service`,
+ `network_offerings`.`specify_ip_ranges` AS `specify_ip_ranges`,
+ `network_offerings`.`inline` AS `inline`,
+ `network_offerings`.`is_persistent` AS `is_persistent`,
+ `network_offerings`.`internal_lb` AS `internal_lb`,
+ `network_offerings`.`public_lb` AS `public_lb`,
+ `network_offerings`.`egress_default_policy` AS `egress_default_policy`,
+ `network_offerings`.`concurrent_connections` AS `concurrent_connections`,
+ `network_offerings`.`keep_alive_enabled` AS `keep_alive_enabled`,
+ `network_offerings`.`supports_streched_l2` AS `supports_streched_l2`,
+ `network_offerings`.`supports_public_access` AS `supports_public_access`,
+ `network_offerings`.`for_vpc` AS `for_vpc`,
+ `network_offerings`.`service_package_id` AS `service_package_id`,
+ GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id,
+ GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid,
+ GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name,
+ GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path,
+ GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id,
+ GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid,
+ GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name
+ FROM
+ `cloud`.`network_offerings`
+ LEFT JOIN
+ `cloud`.`network_offering_details` AS `domain_details` ON `domain_details`.`network_offering_id` = `network_offerings`.`id` AND `domain_details`.`name`='domainid'
+ LEFT JOIN
+ `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`)
+ LEFT JOIN
+ `cloud`.`network_offering_details` AS `zone_details` ON `zone_details`.`network_offering_id` = `network_offerings`.`id` AND `zone_details`.`name`='zoneid'
+ LEFT JOIN
+ `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`)
+ GROUP BY
+ `network_offerings`.`id`;
+
+-- Create VPC offering details table
+CREATE TABLE `vpc_offering_details` (
+ `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ `offering_id` bigint(20) unsigned NOT NULL COMMENT 'vpc offering id',
+ `name` varchar(255) NOT NULL,
+ `value` varchar(1024) NOT NULL,
+ `display` tinyint(1) NOT NULL DEFAULT '1' COMMENT 'True if the detail can be displayed to the end user',
+ PRIMARY KEY (`id`),
+ KEY `fk_vpc_offering_details__vpc_offering_id` (`offering_id`),
+ CONSTRAINT `fk_vpc_offering_details__vpc_offering_id` FOREIGN KEY (`offering_id`) REFERENCES `vpc_offerings` (`id`) ON DELETE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- VPC offering with multi-domains and multi-zones
+DROP VIEW IF EXISTS `cloud`.`vpc_offering_view`;
+CREATE VIEW `cloud`.`vpc_offering_view` AS
+ SELECT
+ `vpc_offerings`.`id` AS `id`,
+ `vpc_offerings`.`uuid` AS `uuid`,
+ `vpc_offerings`.`name` AS `name`,
+ `vpc_offerings`.`unique_name` AS `unique_name`,
+ `vpc_offerings`.`display_text` AS `display_text`,
+ `vpc_offerings`.`state` AS `state`,
+ `vpc_offerings`.`default` AS `default`,
+ `vpc_offerings`.`created` AS `created`,
+ `vpc_offerings`.`removed` AS `removed`,
+ `vpc_offerings`.`service_offering_id` AS `service_offering_id`,
+ `vpc_offerings`.`supports_distributed_router` AS `supports_distributed_router`,
+ `vpc_offerings`.`supports_region_level_vpc` AS `supports_region_level_vpc`,
+ `vpc_offerings`.`redundant_router_service` AS `redundant_router_service`,
+ `vpc_offerings`.`sort_key` AS `sort_key`,
+ GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id,
+ GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid,
+ GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name,
+ GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path,
+ GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id,
+ GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid,
+ GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name
+ FROM
+ `cloud`.`vpc_offerings`
+ LEFT JOIN
+ `cloud`.`vpc_offering_details` AS `domain_details` ON `domain_details`.`offering_id` = `vpc_offerings`.`id` AND `domain_details`.`name`='domainid'
+ LEFT JOIN
+ `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`)
+ LEFT JOIN
+ `cloud`.`vpc_offering_details` AS `zone_details` ON `zone_details`.`offering_id` = `vpc_offerings`.`id` AND `zone_details`.`name`='zoneid'
+ LEFT JOIN
+ `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`)
+ GROUP BY
+ `vpc_offerings`.`id`;
+
+-- Recreate data_center_view
+DROP VIEW IF EXISTS `cloud`.`data_center_view`;
+CREATE VIEW `cloud`.`data_center_view` AS
+ select
+ data_center.id,
+ data_center.uuid,
+ data_center.name,
+ data_center.is_security_group_enabled,
+ data_center.is_local_storage_enabled,
+ data_center.description,
+ data_center.dns1,
+ data_center.dns2,
+ data_center.ip6_dns1,
+ data_center.ip6_dns2,
+ data_center.internal_dns1,
+ data_center.internal_dns2,
+ data_center.guest_network_cidr,
+ data_center.domain,
+ data_center.networktype,
+ data_center.allocation_state,
+ data_center.zone_token,
+ data_center.dhcp_provider,
+ data_center.removed,
+ data_center.sort_key,
+ domain.id domain_id,
+ domain.uuid domain_uuid,
+ domain.name domain_name,
+ domain.path domain_path,
+ dedicated_resources.affinity_group_id,
+ dedicated_resources.account_id,
+ affinity_group.uuid affinity_group_uuid
+ from
+ `cloud`.`data_center`
+ left join
+ `cloud`.`domain` ON data_center.domain_id = domain.id
+ left join
+ `cloud`.`dedicated_resources` ON data_center.id = dedicated_resources.data_center_id
+ left join
+ `cloud`.`affinity_group` ON dedicated_resources.affinity_group_id = affinity_group.id;
+
+-- Remove key/value tags from project_view
+DROP VIEW IF EXISTS `cloud`.`project_view`;
+CREATE VIEW `cloud`.`project_view` AS
+ select
+ projects.id,
+ projects.uuid,
+ projects.name,
+ projects.display_text,
+ projects.state,
+ projects.removed,
+ projects.created,
+ projects.project_account_id,
+ account.account_name owner,
+ pacct.account_id,
+ domain.id domain_id,
+ domain.uuid domain_uuid,
+ domain.name domain_name,
+ domain.path domain_path
+ from
+ `cloud`.`projects`
+ inner join
+ `cloud`.`domain` ON projects.domain_id = domain.id
+ inner join
+ `cloud`.`project_account` ON projects.id = project_account.project_id
+ and project_account.account_role = 'Admin'
+ inner join
+ `cloud`.`account` ON account.id = project_account.account_id
+ left join
+ `cloud`.`project_account` pacct ON projects.id = pacct.project_id;
+
+-- KVM: Add background task to upload certificates for direct download
+CREATE TABLE `cloud`.`direct_download_certificate` (
+ `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ `uuid` varchar(40) NOT NULL,
+ `alias` varchar(255) NOT NULL,
+ `certificate` text NOT NULL,
+ `hypervisor_type` varchar(45) NOT NULL,
+ `zone_id` bigint(20) unsigned NOT NULL,
+ PRIMARY KEY (`id`),
+ KEY `i_direct_download_certificate_alias` (`alias`),
+ KEY `fk_direct_download_certificate__zone_id` (`zone_id`),
+ CONSTRAINT `fk_direct_download_certificate__zone_id` FOREIGN KEY (`zone_id`) REFERENCES `data_center` (`id`) ON DELETE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `cloud`.`direct_download_certificate_host_map` (
+ `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ `certificate_id` bigint(20) unsigned NOT NULL,
+ `host_id` bigint(20) unsigned NOT NULL,
+ `revoked` int(1) NOT NULL DEFAULT 0,
+ PRIMARY KEY (`id`),
+ KEY `fk_direct_download_certificate_host_map__host_id` (`host_id`),
+ KEY `fk_direct_download_certificate_host_map__certificate_id` (`certificate_id`),
+ CONSTRAINT `fk_direct_download_certificate_host_map__host_id` FOREIGN KEY (`host_id`) REFERENCES `host` (`id`) ON DELETE CASCADE,
+ CONSTRAINT `fk_direct_download_certificate_host_map__certificate_id` FOREIGN KEY (`certificate_id`) REFERENCES `direct_download_certificate` (`id`) ON DELETE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- [Vmware] Allow configuring appliances on the VM instance wizard when OVF properties are available
+CREATE TABLE `cloud`.`template_ovf_properties` (
+ `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ `template_id` bigint(20) unsigned NOT NULL,
+ `key` VARCHAR(100) NOT NULL,
+ `type` VARCHAR(45) DEFAULT NULL,
+ `value` VARCHAR(100) DEFAULT NULL,
+ `password` TINYINT(1) NOT NULL DEFAULT '0',
+ `qualifiers` TEXT DEFAULT NULL,
+ `user_configurable` TINYINT(1) NOT NULL DEFAULT '0',
+ `label` TEXT DEFAULT NULL,
+ `description` TEXT DEFAULT NULL,
+ PRIMARY KEY (`id`),
+ CONSTRAINT `fk_template_ovf_properties__template_id` FOREIGN KEY (`template_id`) REFERENCES `vm_template`(`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- Add VM snapshot ID on usage helper tables
+ALTER TABLE `cloud_usage`.`usage_vmsnapshot` ADD COLUMN `vm_snapshot_id` BIGINT(20) NULL DEFAULT NULL AFTER `processed`;
+ALTER TABLE `cloud_usage`.`usage_snapshot_on_primary` ADD COLUMN `vm_snapshot_id` BIGINT(20) NULL DEFAULT NULL AFTER `deleted`;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41202to41203-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41202to41203-cleanup.sql
new file mode 100644
index 000000000000..2e6817e906b2
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41202to41203-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.12.0.2 to 4.12.0.3
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41202to41203.sql b/engine/schema/src/main/resources/META-INF/db/schema-41202to41203.sql
new file mode 100644
index 000000000000..d0697a2e4161
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41202to41203.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.12.0.2 to 4.12.0.3
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41203to41204-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41203to41204-cleanup.sql
new file mode 100644
index 000000000000..f330ef6e0a87
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41203to41204-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.12.0.3 to 4.12.0.4
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41203to41204.sql b/engine/schema/src/main/resources/META-INF/db/schema-41203to41204.sql
new file mode 100644
index 000000000000..49d2a3454de1
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41203to41204.sql
@@ -0,0 +1,23 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.12.0.3 to 4.12.0.4
+--;
+INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name, created) VALUES (283, UUID(), 10, 'Citrix ADC VPX', utc_timestamp());
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.2', 'Other install media', 283, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.6.0', 'Other install media', 283, utc_timestamp(), 0);
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41204to41205-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41204to41205-cleanup.sql
new file mode 100644
index 000000000000..94a5a4613805
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41204to41205-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.12.0.4 to 4.12.0.5
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41204to41205.sql b/engine/schema/src/main/resources/META-INF/db/schema-41204to41205.sql
new file mode 100644
index 000000000000..e90cb53897b8
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41204to41205.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.12.0.4 to 4.12.0.5
+--;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41205to41206-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41205to41206-cleanup.sql
new file mode 100644
index 000000000000..7bb083d3bd6e
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41205to41206-cleanup.sql
@@ -0,0 +1,50 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.12.0.5 to 4.12.0.6
+--;
+
+-- Remove key/value tags from project_view
+DROP VIEW IF EXISTS `cloud`.`project_view`;
+CREATE VIEW `cloud`.`project_view` AS
+ select
+ projects.id,
+ projects.uuid,
+ projects.name,
+ projects.display_text,
+ projects.state,
+ projects.removed,
+ projects.created,
+ projects.project_account_id,
+ account.account_name owner,
+ pacct.account_id,
+ domain.id domain_id,
+ domain.uuid domain_uuid,
+ domain.name domain_name,
+ domain.path domain_path
+ from
+ `cloud`.`projects`
+ inner join
+ `cloud`.`domain` ON projects.domain_id = domain.id
+ inner join
+ `cloud`.`project_account` ON projects.id = project_account.project_id
+ and project_account.account_role = 'Admin'
+ inner join
+ `cloud`.`account` ON account.id = project_account.account_id
+ left join
+ `cloud`.`project_account` pacct ON projects.id = pacct.project_id;
\ No newline at end of file
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41205to41206.sql b/engine/schema/src/main/resources/META-INF/db/schema-41205to41206.sql
new file mode 100644
index 000000000000..5f11dee9f852
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41205to41206.sql
@@ -0,0 +1,38 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.12.0.5 to 4.12.0.6
+--;
+
+-- Add XenServer 8.1 hypervisor capabilities
+INSERT INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, storage_motion_supported)
+values (UUID(), 'Citrix Hypervisor', '8.0.0', 500, 13, 1);
+INSERT INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, storage_motion_supported)
+values (UUID(), 'Citrix Hypervisor', '8.1.0', 500, 13, 1);
+
+-- Copy XenServer 7.6 hypervisor guest OS mappings to XenServer 8.0
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined)
+SELECT UUID(),'Citrix Hypervisor', '8.0.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.6.0';
+
+-- Add New XenServer 8.0 Guest OSes
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined)
+VALUES (UUID(), 'Citrix Hypervisor', '8.0.0', 'Windows Server 2019 (64-bit)', 276, now(), 0);
+
+-- Copy XenServer 8.0 hypervisor guest OS mappings to XenServer 8.1
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined)
+SELECT UUID(),'Citrix Hypervisor', '8.1.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Citrix Hypervisor' AND hypervisor_version='8.0.0';
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41206to41207-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41206to41207-cleanup.sql
new file mode 100644
index 000000000000..95473c23789c
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41206to41207-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.12.0.6 to 4.12.0.7
+--;
\ No newline at end of file
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41206to41207.sql b/engine/schema/src/main/resources/META-INF/db/schema-41206to41207.sql
new file mode 100644
index 000000000000..204f6af8919c
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41206to41207.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.12.0.6 to 4.12.0.7
+--;
\ No newline at end of file
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41207to41208-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41207to41208-cleanup.sql
new file mode 100644
index 000000000000..95473c23789c
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41207to41208-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.12.0.6 to 4.12.0.7
+--;
\ No newline at end of file
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41207to41208.sql b/engine/schema/src/main/resources/META-INF/db/schema-41207to41208.sql
new file mode 100644
index 000000000000..204f6af8919c
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41207to41208.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.12.0.6 to 4.12.0.7
+--;
\ No newline at end of file
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41208to41209-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41208to41209-cleanup.sql
new file mode 100644
index 000000000000..95473c23789c
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41208to41209-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.12.0.6 to 4.12.0.7
+--;
\ No newline at end of file
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41208to41209.sql b/engine/schema/src/main/resources/META-INF/db/schema-41208to41209.sql
new file mode 100644
index 000000000000..204f6af8919c
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41208to41209.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.12.0.6 to 4.12.0.7
+--;
\ No newline at end of file
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41209to412010-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41209to412010-cleanup.sql
new file mode 100644
index 000000000000..ed9aba697ec3
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41209to412010-cleanup.sql
@@ -0,0 +1,127 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.12.0.6 to 4.12.0.7
+--;
+
+-- Changes to template_view
+DROP VIEW IF EXISTS `cloud`.`template_view`;
+CREATE VIEW `cloud`.`template_view` AS
+ SELECT
+ `vm_template`.`id` AS `id`,
+ `vm_template`.`uuid` AS `uuid`,
+ `vm_template`.`unique_name` AS `unique_name`,
+ `vm_template`.`name` AS `name`,
+ `vm_template`.`public` AS `public`,
+ `vm_template`.`featured` AS `featured`,
+ `vm_template`.`type` AS `type`,
+ `vm_template`.`hvm` AS `hvm`,
+ `vm_template`.`boot_filename` AS `boot_filename`,
+ `vm_template`.`bits` AS `bits`,
+ `vm_template`.`url` AS `url`,
+ `vm_template`.`format` AS `format`,
+ `vm_template`.`created` AS `created`,
+ `vm_template`.`checksum` AS `checksum`,
+ `vm_template`.`display_text` AS `display_text`,
+ `vm_template`.`enable_password` AS `enable_password`,
+ `vm_template`.`dynamically_scalable` AS `dynamically_scalable`,
+ `vm_template`.`state` AS `template_state`,
+ `vm_template`.`guest_os_id` AS `guest_os_id`,
+ `guest_os`.`uuid` AS `guest_os_uuid`,
+ `guest_os`.`display_name` AS `guest_os_name`,
+ `vm_template`.`bootable` AS `bootable`,
+ `vm_template`.`prepopulate` AS `prepopulate`,
+ `vm_template`.`cross_zones` AS `cross_zones`,
+ `vm_template`.`hypervisor_type` AS `hypervisor_type`,
+ `vm_template`.`extractable` AS `extractable`,
+ `vm_template`.`template_tag` AS `template_tag`,
+ `vm_template`.`sort_key` AS `sort_key`,
+ `vm_template`.`removed` AS `removed`,
+ `vm_template`.`enable_sshkey` AS `enable_sshkey`,
+ `parent_template`.`id` AS `parent_template_id`,
+ `parent_template`.`uuid` AS `parent_template_uuid`,
+ `source_template`.`id` AS `source_template_id`,
+ `source_template`.`uuid` AS `source_template_uuid`,
+ `account`.`id` AS `account_id`,
+ `account`.`uuid` AS `account_uuid`,
+ `account`.`account_name` AS `account_name`,
+ `account`.`type` AS `account_type`,
+ `domain`.`id` AS `domain_id`,
+ `domain`.`uuid` AS `domain_uuid`,
+ `domain`.`name` AS `domain_name`,
+ `domain`.`path` AS `domain_path`,
+ `projects`.`id` AS `project_id`,
+ `projects`.`uuid` AS `project_uuid`,
+ `projects`.`name` AS `project_name`,
+ `data_center`.`id` AS `data_center_id`,
+ `data_center`.`uuid` AS `data_center_uuid`,
+ `data_center`.`name` AS `data_center_name`,
+ `launch_permission`.`account_id` AS `lp_account_id`,
+ `template_store_ref`.`store_id` AS `store_id`,
+ `image_store`.`scope` AS `store_scope`,
+ `template_store_ref`.`state` AS `state`,
+ `template_store_ref`.`download_state` AS `download_state`,
+ `template_store_ref`.`download_pct` AS `download_pct`,
+ `template_store_ref`.`error_str` AS `error_str`,
+ `template_store_ref`.`size` AS `size`,
+ `template_store_ref`.physical_size AS `physical_size`,
+ `template_store_ref`.`destroyed` AS `destroyed`,
+ `template_store_ref`.`created` AS `created_on_store`,
+ `vm_template_details`.`name` AS `detail_name`,
+ `vm_template_details`.`value` AS `detail_value`,
+ `resource_tags`.`id` AS `tag_id`,
+ `resource_tags`.`uuid` AS `tag_uuid`,
+ `resource_tags`.`key` AS `tag_key`,
+ `resource_tags`.`value` AS `tag_value`,
+ `resource_tags`.`domain_id` AS `tag_domain_id`,
+ `domain`.`uuid` AS `tag_domain_uuid`,
+ `domain`.`name` AS `tag_domain_name`,
+ `resource_tags`.`account_id` AS `tag_account_id`,
+ `account`.`account_name` AS `tag_account_name`,
+ `resource_tags`.`resource_id` AS `tag_resource_id`,
+ `resource_tags`.`resource_uuid` AS `tag_resource_uuid`,
+ `resource_tags`.`resource_type` AS `tag_resource_type`,
+ `resource_tags`.`customer` AS `tag_customer`,
+ CONCAT(`vm_template`.`id`,
+ '_',
+ IFNULL(`data_center`.`id`, 0)) AS `temp_zone_pair`,
+ `vm_template`.`direct_download` AS `direct_download`
+ FROM
+ (((((((((((((`vm_template`
+ JOIN `guest_os` ON ((`guest_os`.`id` = `vm_template`.`guest_os_id`)))
+ JOIN `account` ON ((`account`.`id` = `vm_template`.`account_id`)))
+ JOIN `domain` ON ((`domain`.`id` = `account`.`domain_id`)))
+ LEFT JOIN `projects` ON ((`projects`.`project_account_id` = `account`.`id`)))
+ LEFT JOIN `vm_template_details` ON ((`vm_template_details`.`template_id` = `vm_template`.`id`)))
+ LEFT JOIN `vm_template` `source_template` ON ((`source_template`.`id` = `vm_template`.`source_template_id`)))
+ LEFT JOIN `template_store_ref` ON (((`template_store_ref`.`template_id` = `vm_template`.`id`)
+ AND (`template_store_ref`.`store_role` = 'Image')
+ AND (`template_store_ref`.`destroyed` = 0))))
+ LEFT JOIN `vm_template` `parent_template` ON ((`parent_template`.`id` = `vm_template`.`parent_template_id`)))
+ LEFT JOIN `image_store` ON ((ISNULL(`image_store`.`removed`)
+ AND (`template_store_ref`.`store_id` IS NOT NULL)
+ AND (`image_store`.`id` = `template_store_ref`.`store_id`))))
+ LEFT JOIN `template_zone_ref` ON (((`template_zone_ref`.`template_id` = `vm_template`.`id`)
+ AND ISNULL(`template_store_ref`.`store_id`)
+ AND ISNULL(`template_zone_ref`.`removed`))))
+ LEFT JOIN `data_center` ON (((`image_store`.`data_center_id` = `data_center`.`id`)
+ OR (`template_zone_ref`.`zone_id` = `data_center`.`id`))))
+ LEFT JOIN `launch_permission` ON ((`launch_permission`.`template_id` = `vm_template`.`id`)))
+ LEFT JOIN `resource_tags` ON (((`resource_tags`.`resource_id` = `vm_template`.`id`)
+ AND ((`resource_tags`.`resource_type` = 'Template')
+ OR (`resource_tags`.`resource_type` = 'ISO')))));
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41209to412010.sql b/engine/schema/src/main/resources/META-INF/db/schema-41209to412010.sql
new file mode 100644
index 000000000000..57b53e4de5af
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41209to412010.sql
@@ -0,0 +1,32 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.12.0.6 to 4.12.0.7
+--;
+
+-- Adding Pxe Boot variables
+ALTER TABLE `cloud`.`vm_template` ADD `boot_filename` varchar(255) NULL default NULL COMMENT 'the url where the template exists externally';
+ALTER TABLE `cloud`.`vpc` ADD `network_boot_ip` char(40) NULL default NULL COMMENT 'Network Boot Ip';
+
+-- Add XenServer 8.2 hypervisor capabilities
+INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, storage_motion_supported)
+values (UUID(), 'Citrix Hypervisor', '8.2.0', 500, 13, 1);
+
+-- Copy XenServer 8.1 hypervisor guest OS mappings to XenServer 8.2
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined)
+SELECT UUID(),'Citrix Hypervisor', '8.2.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Citrix Hypervisor' AND hypervisor_version='8.1.0';
\ No newline at end of file
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41210to412011-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41210to412011-cleanup.sql
new file mode 100644
index 000000000000..95473c23789c
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41210to412011-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.12.0.6 to 4.12.0.7
+--;
\ No newline at end of file
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41210to412011.sql b/engine/schema/src/main/resources/META-INF/db/schema-41210to412011.sql
new file mode 100644
index 000000000000..204f6af8919c
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41210to412011.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.12.0.6 to 4.12.0.7
+--;
\ No newline at end of file
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41211to412012-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41211to412012-cleanup.sql
new file mode 100644
index 000000000000..2fd0e630fde2
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41211to412012-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.12.0.11 to 4.12.0.12
+--;
\ No newline at end of file
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41211to412012.sql b/engine/schema/src/main/resources/META-INF/db/schema-41211to412012.sql
new file mode 100644
index 000000000000..735dc776fcfc
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41211to412012.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.12.0.11 to 4.12.0.12
+--;
\ No newline at end of file
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41212to412013-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41212to412013-cleanup.sql
new file mode 100644
index 000000000000..2fd0e630fde2
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41212to412013-cleanup.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade cleanup from 4.12.0.11 to 4.12.0.12
+--;
\ No newline at end of file
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41212to412013.sql b/engine/schema/src/main/resources/META-INF/db/schema-41212to412013.sql
new file mode 100644
index 000000000000..735dc776fcfc
--- /dev/null
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41212to412013.sql
@@ -0,0 +1,20 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied. See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.12.0.11 to 4.12.0.12
+--;
\ No newline at end of file
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41710to41800.sql b/engine/schema/src/main/resources/META-INF/db/schema-41710to41800.sql
index 4ec812cc1c0a..93b7669482b7 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-41710to41800.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41710to41800.sql
@@ -27,3 +27,274 @@ WHERE so.default_use = 1 AND so.vm_type IN ('domainrouter', 'secondarystoragevm'
-- Add cidr_list column to load_balancing_rules
ALTER TABLE `cloud`.`load_balancing_rules`
ADD cidr_list VARCHAR(4096);
+
+-- Fixes for custom schema changes
+DROP VIEW IF EXISTS `cloud`.`template_view`;
+CREATE VIEW `cloud`.`template_view` AS
+ SELECT
+ `vm_template`.`id` AS `id`,
+ `vm_template`.`uuid` AS `uuid`,
+ `vm_template`.`unique_name` AS `unique_name`,
+ `vm_template`.`name` AS `name`,
+ `vm_template`.`public` AS `public`,
+ `vm_template`.`featured` AS `featured`,
+ `vm_template`.`type` AS `type`,
+ `vm_template`.`hvm` AS `hvm`,
+ `vm_template`.`bits` AS `bits`,
+ `vm_template`.`url` AS `url`,
+ `vm_template`.`format` AS `format`,
+ `vm_template`.`created` AS `created`,
+ `vm_template`.`checksum` AS `checksum`,
+ `vm_template`.`display_text` AS `display_text`,
+ `vm_template`.`enable_password` AS `enable_password`,
+ `vm_template`.`dynamically_scalable` AS `dynamically_scalable`,
+ `vm_template`.`state` AS `template_state`,
+ `vm_template`.`guest_os_id` AS `guest_os_id`,
+ `guest_os`.`uuid` AS `guest_os_uuid`,
+ `guest_os`.`display_name` AS `guest_os_name`,
+ `vm_template`.`bootable` AS `bootable`,
+ `vm_template`.`prepopulate` AS `prepopulate`,
+ `vm_template`.`cross_zones` AS `cross_zones`,
+ `vm_template`.`hypervisor_type` AS `hypervisor_type`,
+ `vm_template`.`extractable` AS `extractable`,
+ `vm_template`.`template_tag` AS `template_tag`,
+ `vm_template`.`sort_key` AS `sort_key`,
+ `vm_template`.`removed` AS `removed`,
+ `vm_template`.`enable_sshkey` AS `enable_sshkey`,
+ `vm_template`.`boot_filename` AS `boot_filename`,
+ `parent_template`.`id` AS `parent_template_id`,
+ `parent_template`.`uuid` AS `parent_template_uuid`,
+ `source_template`.`id` AS `source_template_id`,
+ `source_template`.`uuid` AS `source_template_uuid`,
+ `account`.`id` AS `account_id`,
+ `account`.`uuid` AS `account_uuid`,
+ `account`.`account_name` AS `account_name`,
+ `account`.`type` AS `account_type`,
+ `domain`.`id` AS `domain_id`,
+ `domain`.`uuid` AS `domain_uuid`,
+ `domain`.`name` AS `domain_name`,
+ `domain`.`path` AS `domain_path`,
+ `projects`.`id` AS `project_id`,
+ `projects`.`uuid` AS `project_uuid`,
+ `projects`.`name` AS `project_name`,
+ `data_center`.`id` AS `data_center_id`,
+ `data_center`.`uuid` AS `data_center_uuid`,
+ `data_center`.`name` AS `data_center_name`,
+ `launch_permission`.`account_id` AS `lp_account_id`,
+ `template_store_ref`.`store_id` AS `store_id`,
+ `image_store`.`scope` AS `store_scope`,
+ `template_store_ref`.`state` AS `state`,
+ `template_store_ref`.`download_state` AS `download_state`,
+ `template_store_ref`.`download_pct` AS `download_pct`,
+ `template_store_ref`.`error_str` AS `error_str`,
+ `template_store_ref`.`size` AS `size`,
+ `template_store_ref`.physical_size AS `physical_size`,
+ `template_store_ref`.`destroyed` AS `destroyed`,
+ `template_store_ref`.`created` AS `created_on_store`,
+ `vm_template_details`.`name` AS `detail_name`,
+ `vm_template_details`.`value` AS `detail_value`,
+ `resource_tags`.`id` AS `tag_id`,
+ `resource_tags`.`uuid` AS `tag_uuid`,
+ `resource_tags`.`key` AS `tag_key`,
+ `resource_tags`.`value` AS `tag_value`,
+ `resource_tags`.`domain_id` AS `tag_domain_id`,
+ `domain`.`uuid` AS `tag_domain_uuid`,
+ `domain`.`name` AS `tag_domain_name`,
+ `resource_tags`.`account_id` AS `tag_account_id`,
+ `account`.`account_name` AS `tag_account_name`,
+ `resource_tags`.`resource_id` AS `tag_resource_id`,
+ `resource_tags`.`resource_uuid` AS `tag_resource_uuid`,
+ `resource_tags`.`resource_type` AS `tag_resource_type`,
+ `resource_tags`.`customer` AS `tag_customer`,
+ CONCAT(`vm_template`.`id`,
+ '_',
+ IFNULL(`data_center`.`id`, 0)) AS `temp_zone_pair`,
+ `vm_template`.`direct_download` AS `direct_download`,
+ `vm_template`.`deploy_as_is` AS `deploy_as_is`
+ FROM
+ (((((((((((((`vm_template`
+ JOIN `guest_os` ON ((`guest_os`.`id` = `vm_template`.`guest_os_id`)))
+ JOIN `account` ON ((`account`.`id` = `vm_template`.`account_id`)))
+ JOIN `domain` ON ((`domain`.`id` = `account`.`domain_id`)))
+ LEFT JOIN `projects` ON ((`projects`.`project_account_id` = `account`.`id`)))
+ LEFT JOIN `vm_template_details` ON ((`vm_template_details`.`template_id` = `vm_template`.`id`)))
+ LEFT JOIN `vm_template` `source_template` ON ((`source_template`.`id` = `vm_template`.`source_template_id`)))
+ LEFT JOIN `template_store_ref` ON (((`template_store_ref`.`template_id` = `vm_template`.`id`)
+ AND (`template_store_ref`.`store_role` = 'Image')
+ AND (`template_store_ref`.`destroyed` = 0))))
+ LEFT JOIN `vm_template` `parent_template` ON ((`parent_template`.`id` = `vm_template`.`parent_template_id`)))
+ LEFT JOIN `image_store` ON ((ISNULL(`image_store`.`removed`)
+ AND (`template_store_ref`.`store_id` IS NOT NULL)
+ AND (`image_store`.`id` = `template_store_ref`.`store_id`))))
+ LEFT JOIN `template_zone_ref` ON (((`template_zone_ref`.`template_id` = `vm_template`.`id`)
+ AND ISNULL(`template_store_ref`.`store_id`)
+ AND ISNULL(`template_zone_ref`.`removed`))))
+ LEFT JOIN `data_center` ON (((`image_store`.`data_center_id` = `data_center`.`id`)
+ OR (`template_zone_ref`.`zone_id` = `data_center`.`id`))))
+ LEFT JOIN `launch_permission` ON ((`launch_permission`.`template_id` = `vm_template`.`id`)))
+ LEFT JOIN `resource_tags` ON (((`resource_tags`.`resource_id` = `vm_template`.`id`)
+ AND ((`resource_tags`.`resource_type` = 'Template')
+ OR (`resource_tags`.`resource_type` = 'ISO')))));
+
+DROP VIEW IF EXISTS `cloud`.`disk_offering_view`;
+CREATE VIEW `cloud`.`disk_offering_view` AS
+ SELECT
+ `disk_offering`.`id` AS `id`,
+ `disk_offering`.`uuid` AS `uuid`,
+ `disk_offering`.`name` AS `name`,
+ `disk_offering`.`display_text` AS `display_text`,
+ `disk_offering`.`provisioning_type` AS `provisioning_type`,
+ `disk_offering`.`disk_size` AS `disk_size`,
+ `disk_offering`.`min_iops` AS `min_iops`,
+ `disk_offering`.`max_iops` AS `max_iops`,
+ `disk_offering`.`created` AS `created`,
+ `disk_offering`.`tags` AS `tags`,
+ `disk_offering`.`customized` AS `customized`,
+ `disk_offering`.`customized_iops` AS `customized_iops`,
+ `disk_offering`.`removed` AS `removed`,
+ `disk_offering`.`use_local_storage` AS `use_local_storage`,
+ `disk_offering`.`hv_ss_reserve` AS `hv_ss_reserve`,
+ `disk_offering`.`bytes_read_rate` AS `bytes_read_rate`,
+ `disk_offering`.`bytes_read_rate_max` AS `bytes_read_rate_max`,
+ `disk_offering`.`bytes_read_rate_max_length` AS `bytes_read_rate_max_length`,
+ `disk_offering`.`bytes_write_rate` AS `bytes_write_rate`,
+ `disk_offering`.`bytes_write_rate_max` AS `bytes_write_rate_max`,
+ `disk_offering`.`bytes_write_rate_max_length` AS `bytes_write_rate_max_length`,
+ `disk_offering`.`iops_read_rate` AS `iops_read_rate`,
+ `disk_offering`.`iops_read_rate_max` AS `iops_read_rate_max`,
+ `disk_offering`.`iops_read_rate_max_length` AS `iops_read_rate_max_length`,
+ `disk_offering`.`iops_write_rate` AS `iops_write_rate`,
+ `disk_offering`.`iops_write_rate_max` AS `iops_write_rate_max`,
+ `disk_offering`.`iops_write_rate_max_length` AS `iops_write_rate_max_length`,
+ `disk_offering`.`cache_mode` AS `cache_mode`,
+ `disk_offering`.`sort_key` AS `sort_key`,
+ `disk_offering`.`compute_only` AS `compute_only`,
+ `disk_offering`.`display_offering` AS `display_offering`,
+ `disk_offering`.`state` AS `state`,
+ `disk_offering`.`disk_size_strictness` AS `disk_size_strictness`,
+ `disk_offering`.`min_iops_per_gb` AS `min_iops_per_gb`,
+ `disk_offering`.`max_iops_per_gb` AS `max_iops_per_gb`,
+ `disk_offering`.`highest_min_iops` AS `highest_min_iops`,
+ `disk_offering`.`highest_max_iops` AS `highest_max_iops`,
+ `vsphere_storage_policy`.`value` AS `vsphere_storage_policy`,
+ GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id,
+ GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid,
+ GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name,
+ GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path,
+ GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id,
+ GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid,
+ GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name
+ FROM
+ `cloud`.`disk_offering`
+ LEFT JOIN
+ `cloud`.`disk_offering_details` AS `domain_details` ON `domain_details`.`offering_id` = `disk_offering`.`id` AND `domain_details`.`name`='domainid'
+ LEFT JOIN
+ `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`)
+ LEFT JOIN
+ `cloud`.`disk_offering_details` AS `zone_details` ON `zone_details`.`offering_id` = `disk_offering`.`id` AND `zone_details`.`name`='zoneid'
+ LEFT JOIN
+ `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`)
+ LEFT JOIN
+ `cloud`.`disk_offering_details` AS `vsphere_storage_policy` ON `vsphere_storage_policy`.`offering_id` = `disk_offering`.`id` AND `vsphere_storage_policy`.`name` = 'storagepolicy'
+ WHERE
+ `disk_offering`.`state`='Active'
+ GROUP BY
+ `disk_offering`.`id`;
+
+DROP VIEW IF EXISTS `cloud`.`service_offering_view`;
+CREATE VIEW `cloud`.`service_offering_view` AS
+ SELECT
+ `service_offering`.`id` AS `id`,
+ `service_offering`.`uuid` AS `uuid`,
+ `service_offering`.`name` AS `name`,
+ `service_offering`.`display_text` AS `display_text`,
+ `disk_offering`.`provisioning_type` AS `provisioning_type`,
+ `service_offering`.`created` AS `created`,
+ `disk_offering`.`tags` AS `tags`,
+ `service_offering`.`removed` AS `removed`,
+ `disk_offering`.`use_local_storage` AS `use_local_storage`,
+ `service_offering`.`system_use` AS `system_use`,
+ `disk_offering`.`id` AS `disk_offering_id`,
+ `disk_offering`.`name` AS `disk_offering_name`,
+ `disk_offering`.`uuid` AS `disk_offering_uuid`,
+ `disk_offering`.`display_text` AS `disk_offering_display_text`,
+ `disk_offering`.`customized_iops` AS `customized_iops`,
+ `disk_offering`.`min_iops` AS `min_iops`,
+ `disk_offering`.`max_iops` AS `max_iops`,
+ `disk_offering`.`hv_ss_reserve` AS `hv_ss_reserve`,
+ `disk_offering`.`bytes_read_rate` AS `bytes_read_rate`,
+ `disk_offering`.`bytes_read_rate_max` AS `bytes_read_rate_max`,
+ `disk_offering`.`bytes_read_rate_max_length` AS `bytes_read_rate_max_length`,
+ `disk_offering`.`bytes_write_rate` AS `bytes_write_rate`,
+ `disk_offering`.`bytes_write_rate_max` AS `bytes_write_rate_max`,
+ `disk_offering`.`bytes_write_rate_max_length` AS `bytes_write_rate_max_length`,
+ `disk_offering`.`iops_read_rate` AS `iops_read_rate`,
+ `disk_offering`.`iops_read_rate_max` AS `iops_read_rate_max`,
+ `disk_offering`.`iops_read_rate_max_length` AS `iops_read_rate_max_length`,
+ `disk_offering`.`iops_write_rate` AS `iops_write_rate`,
+ `disk_offering`.`iops_write_rate_max` AS `iops_write_rate_max`,
+ `disk_offering`.`iops_write_rate_max_length` AS `iops_write_rate_max_length`,
+ `disk_offering`.`cache_mode` AS `cache_mode`,
+ `disk_offering`.`disk_size` AS `root_disk_size`,
+ `disk_offering`.`min_iops_per_gb` AS `min_iops_per_gb`,
+ `disk_offering`.`max_iops_per_gb` AS `max_iops_per_gb`,
+ `disk_offering`.`highest_min_iops` AS `highest_min_iops`,
+ `disk_offering`.`highest_max_iops` AS `highest_max_iops`,
+ `service_offering`.`cpu` AS `cpu`,
+ `service_offering`.`speed` AS `speed`,
+ `service_offering`.`ram_size` AS `ram_size`,
+ `service_offering`.`nw_rate` AS `nw_rate`,
+ `service_offering`.`mc_rate` AS `mc_rate`,
+ `service_offering`.`ha_enabled` AS `ha_enabled`,
+ `service_offering`.`limit_cpu_use` AS `limit_cpu_use`,
+ `service_offering`.`host_tag` AS `host_tag`,
+ `service_offering`.`default_use` AS `default_use`,
+ `service_offering`.`vm_type` AS `vm_type`,
+ `service_offering`.`sort_key` AS `sort_key`,
+ `service_offering`.`is_volatile` AS `is_volatile`,
+ `service_offering`.`deployment_planner` AS `deployment_planner`,
+ `service_offering`.`dynamic_scaling_enabled` AS `dynamic_scaling_enabled`,
+ `service_offering`.`disk_offering_strictness` AS `disk_offering_strictness`,
+ `vsphere_storage_policy`.`value` AS `vsphere_storage_policy`,
+ GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id,
+ GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid,
+ GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name,
+ GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path,
+ GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id,
+ GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid,
+ GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name,
+ IFNULL(`min_compute_details`.`value`, `cpu`) AS min_cpu,
+ IFNULL(`max_compute_details`.`value`, `cpu`) AS max_cpu,
+ IFNULL(`min_memory_details`.`value`, `ram_size`) AS min_memory,
+ IFNULL(`max_memory_details`.`value`, `ram_size`) AS max_memory
+ FROM
+ `cloud`.`service_offering`
+ INNER JOIN
+ `cloud`.`disk_offering_view` AS `disk_offering` ON service_offering.disk_offering_id = disk_offering.id
+ LEFT JOIN
+ `cloud`.`service_offering_details` AS `domain_details` ON `domain_details`.`service_offering_id` = `service_offering`.`id` AND `domain_details`.`name`='domainid'
+ LEFT JOIN
+ `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`)
+ LEFT JOIN
+ `cloud`.`service_offering_details` AS `zone_details` ON `zone_details`.`service_offering_id` = `service_offering`.`id` AND `zone_details`.`name`='zoneid'
+ LEFT JOIN
+ `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`)
+ LEFT JOIN
+ `cloud`.`service_offering_details` AS `min_compute_details` ON `min_compute_details`.`service_offering_id` = `service_offering`.`id`
+ AND `min_compute_details`.`name` = 'mincpunumber'
+ LEFT JOIN
+ `cloud`.`service_offering_details` AS `max_compute_details` ON `max_compute_details`.`service_offering_id` = `service_offering`.`id`
+ AND `max_compute_details`.`name` = 'maxcpunumber'
+ LEFT JOIN
+ `cloud`.`service_offering_details` AS `min_memory_details` ON `min_memory_details`.`service_offering_id` = `service_offering`.`id`
+ AND `min_memory_details`.`name` = 'minmemory'
+ LEFT JOIN
+ `cloud`.`service_offering_details` AS `max_memory_details` ON `max_memory_details`.`service_offering_id` = `service_offering`.`id`
+ AND `max_memory_details`.`name` = 'maxmemory'
+ LEFT JOIN
+ `cloud`.`service_offering_details` AS `vsphere_storage_policy` ON `vsphere_storage_policy`.`service_offering_id` = `service_offering`.`id`
+ AND `vsphere_storage_policy`.`name` = 'storagepolicy'
+ WHERE
+ `service_offering`.`state`='Active'
+ GROUP BY
+ `service_offering`.`id`;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4930to41000.sql b/engine/schema/src/main/resources/META-INF/db/schema-4930to41000.sql
index dc0cd6d4d75a..b5eb8d361250 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-4930to41000.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-4930to41000.sql
@@ -49,7 +49,7 @@ INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervi
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.0', 'windows9_64Guest', 258, now(), 0);
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Windows 10', 258, now(), 0);
-INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '6.5.0', 'Windows Server 2016 (64-bit)', 259, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '6.5.0', 'Other install media', 259, now(), 0);
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.0.0', 'Windows Server 2016 (64-bit)', 259, now(), 0);
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.0', 'windows9Server64Guest', 259, now(), 0);
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Windows Server 2016', 259, now(), 0);
@@ -235,6 +235,205 @@ WHERE (o.cpu is null AND o.speed IS NULL AND o.ram_size IS NULL) AND
-- CLOUDSTACK-9827: Storage tags stored in multiple places
DROP VIEW IF EXISTS `cloud`.`storage_tag_view`;
+-- XenServer 7.1 support update
+INSERT INTO `cloud`.`hypervisor_capabilities`(
+ uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, storage_motion_supported)
+values
+ (UUID(), 'XenServer', '7.1.0', 500, 13, 1);
+
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 4.5 (32-bit)', 1, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 4.6 (32-bit)', 2, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 4.7 (32-bit)', 3, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 4.8 (32-bit)', 4, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 5, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 6, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 7, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 8, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 9, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 10, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 11, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 12, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 13, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 14, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 111, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 112, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 141, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 142, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 161, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 162, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 173, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 174, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 175, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 176, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 231, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 232, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 139, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 140, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (32-bit)', 143, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (64-bit)', 144, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (32-bit)', 177, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (64-bit)', 178, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (32-bit)', 179, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (64-bit)', 180, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (32-bit)', 171, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (64-bit)', 172, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (32-bit)', 181, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (64-bit)', 182, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (32-bit)', 227, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (64-bit)', 228, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (32-bit)', 248, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (64-bit)', 249, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 7', 246, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Debian Squeeze 6.0 (32-bit)', 132, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Debian Squeeze 6.0 (64-bit)', 133, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Debian Wheezy 7.0 (32-bit)', 183, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Debian Wheezy 7.0 (64-bit)', 184, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 16, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 17, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 18, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 19, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 20, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 21, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 22, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 23, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 24, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 25, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 134, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 135, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 145, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 146, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 207, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 208, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 209, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 210, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 211, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 212, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 233, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 234, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (32-bit)', 147, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (64-bit)', 148, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (32-bit)', 213, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (64-bit)', 214, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (32-bit)', 215, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (64-bit)', 216, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (32-bit)', 217, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (64-bit)', 218, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (32-bit)', 219, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (64-bit)', 220, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (32-bit)', 235, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (64-bit)', 236, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (32-bit)', 250, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (64-bit)', 251, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Linux 7', 247, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 4.5 (32-bit)', 26, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 4.6 (32-bit)', 27, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 4.7 (32-bit)', 28, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 4.8 (32-bit)', 29, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 30, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 31, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 32, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 33, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 34, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 35, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 36, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 37, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 38, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 39, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 113, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 114, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 149, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 150, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 189, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 190, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 191, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 192, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 193, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 194, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 237, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 238, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (32-bit)', 136, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (64-bit)', 137, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (32-bit)', 195, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (64-bit)', 196, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (32-bit)', 197, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (64-bit)', 198, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (32-bit)', 199, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (64-bit)', 204, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (32-bit)', 205, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (64-bit)', 206, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (32-bit)', 239, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (64-bit)', 240, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 7', 245, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 10 SP1 (32-bit)', 41, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 10 SP1 (64-bit)', 42, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 10 SP2 (32-bit)', 43, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 10 SP2 (64-bit)', 44, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 10 SP3 (32-bit)', 151, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 10 SP3 (64-bit)', 45, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 10 SP4 (32-bit)', 153, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 10 SP4 (64-bit)', 152, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 11 (32-bit)', 46, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 11 (64-bit)', 47, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 11 SP1 (32-bit)', 155, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 11 SP2 (32-bit)', 186, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 11 SP2 (64-bit)', 185, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 11 SP3 (32-bit)', 188, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 11 SP3 (32-bit)', 187, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 12 (64-bit)', 244, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows 7 (32-bit)', 48, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows 7 (64-bit)', 49, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows 8 (32-bit)', 165, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows 8 (64-bit)', 166, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 51, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 87, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 88, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 89, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 90, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows Server 2008 (32-bit)', 52, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows Server 2008 (64-bit)', 53, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows Server 2008 R2 (64-bit)', 54, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows Server 2012 (64-bit)', 167, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows Server 2012 R2 (64-bit)', 168, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 58, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Ubuntu Lucid Lynx 10.04 (32-bit)', 121, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Ubuntu Lucid Lynx 10.04 (64-bit)', 126, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Ubuntu Maverick Meerkat 10.10 (32-bit) (experimental)', 156, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Ubuntu Maverick Meerkat 10.10 (64-bit) (experimental)', 157, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Ubuntu Precise Pangolin 12.04 (32-bit)', 163, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Ubuntu Precise Pangolin 12.04 (64-bit)', 164, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Ubuntu Trusty Tahr 14.04', 241, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Ubuntu Trusty Tahr 14.04', 254, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 169, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 170, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 98, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 99, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 60, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 103, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 200, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 201, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 59, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 100, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 202, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 203, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Ubuntu Trusty Tahr 14.04', 255, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Ubuntu Xenial Xerus 16.04', 256, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows 10 (32-bit)', 257, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows 10 (64-bit)', 258, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows Server 2016 (64-bit)', 259, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 7', 260, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (32-bit)', 261, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (64-bit)', 262, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (32-bit)', 263, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (64-bit)', 264, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (32-bit)', 265, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (64-bit)', 266, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (32-bit)', 267, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (64-bit)', 268, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CoreOS', 271, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 7', 272, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 7', 273, now(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 7', 274, now(), 0);
+
CREATE TABLE IF NOT EXISTS `cloud`.`guest_os_details` (
`id` bigint unsigned NOT NULL auto_increment,
`guest_os_id` bigint unsigned NOT NULL COMMENT 'VPC gateway id',
diff --git a/engine/schema/src/test/java/com/cloud/upgrade/DatabaseUpgradeCheckerTest.java b/engine/schema/src/test/java/com/cloud/upgrade/DatabaseUpgradeCheckerTest.java
index 982a386161b6..f8ba468c0874 100644
--- a/engine/schema/src/test/java/com/cloud/upgrade/DatabaseUpgradeCheckerTest.java
+++ b/engine/schema/src/test/java/com/cloud/upgrade/DatabaseUpgradeCheckerTest.java
@@ -27,11 +27,15 @@
import com.cloud.upgrade.DatabaseUpgradeChecker.NoopDbUpgrade;
import com.cloud.upgrade.dao.DbUpgrade;
-import com.cloud.upgrade.dao.Upgrade41000to41100;
+import com.cloud.upgrade.dao.Upgrade41000to4100226;
+import com.cloud.upgrade.dao.Upgrade4100226to4100227;
+import com.cloud.upgrade.dao.Upgrade4100227to4100228;
+import com.cloud.upgrade.dao.Upgrade4100228to4100229;
+import com.cloud.upgrade.dao.Upgrade4100229to4100230;
import com.cloud.upgrade.dao.Upgrade41100to41110;
import com.cloud.upgrade.dao.Upgrade41110to41120;
import com.cloud.upgrade.dao.Upgrade41120to41130;
-import com.cloud.upgrade.dao.Upgrade41120to41200;
+import com.cloud.upgrade.dao.Upgrade41130to41200;
import com.cloud.upgrade.dao.Upgrade452to453;
import com.cloud.upgrade.dao.Upgrade453to460;
import com.cloud.upgrade.dao.Upgrade460to461;
@@ -96,14 +100,18 @@ public void testCalculateUpgradePath410to412() {
assertNotNull(upgrades);
assertTrue(upgrades.length >= 1);
- assertTrue(upgrades[0] instanceof Upgrade41000to41100);
- assertTrue(upgrades[1] instanceof Upgrade41100to41110);
- assertTrue(upgrades[2] instanceof Upgrade41110to41120);
- assertTrue(upgrades[3] instanceof Upgrade41120to41130);
- assertTrue(upgrades[4] instanceof Upgrade41120to41200);
-
- assertTrue(Arrays.equals(new String[] {"4.11.0.0", "4.11.1.0"}, upgrades[1].getUpgradableVersionRange()));
- assertEquals(currentVersion.toString(), upgrades[4].getUpgradedVersion());
+ assertTrue(upgrades[0] instanceof Upgrade41000to4100226);
+ assertTrue(upgrades[1] instanceof Upgrade4100226to4100227);
+ assertTrue(upgrades[2] instanceof Upgrade4100227to4100228);
+ assertTrue(upgrades[3] instanceof Upgrade4100228to4100229);
+ assertTrue(upgrades[4] instanceof Upgrade4100229to4100230);
+ assertTrue(upgrades[16] instanceof Upgrade41100to41110);
+ assertTrue(upgrades[17] instanceof Upgrade41110to41120);
+ assertTrue(upgrades[18] instanceof Upgrade41120to41130);
+ assertTrue(upgrades[19] instanceof Upgrade41130to41200);
+
+ assertTrue(Arrays.equals(new String[] {"4.11.0.0", "4.11.1.0"}, upgrades[16].getUpgradableVersionRange()));
+ assertEquals(currentVersion.toString(), upgrades[19].getUpgradedVersion());
}
diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java
index 2639968f261a..9b01e5351864 100644
--- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java
+++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java
@@ -18,14 +18,14 @@
*/
package org.apache.cloudstack.storage.motion;
+import static com.cloud.storage.snapshot.SnapshotManager.BackupSnapshotAfterTakingSnapshot;
+
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.inject.Inject;
-import com.cloud.agent.api.to.DiskTO;
-import com.cloud.storage.Storage;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy;
@@ -60,18 +60,19 @@
import com.cloud.agent.api.to.DataObjectType;
import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.DataTO;
+import com.cloud.agent.api.to.DiskTO;
import com.cloud.agent.api.to.NfsTO;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.configuration.Config;
import com.cloud.host.Host;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.storage.DataStoreRole;
-import com.cloud.storage.StorageManager;
+import com.cloud.storage.Storage;
import com.cloud.storage.Storage.StoragePoolType;
+import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.VolumeDao;
-import static com.cloud.storage.snapshot.SnapshotManager.BackupSnapshotAfterTakingSnapshot;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.db.DB;
import com.cloud.utils.exception.CloudRuntimeException;
diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java
index b48ae6d22dc0..9ed95b2388f5 100644
--- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java
+++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java
@@ -2453,6 +2453,7 @@ private Map getVolumeDetails(VolumeInfo volumeInfo) {
volumeDetails.put(DiskTO.PROTOCOL_TYPE, (volumeVO.getPoolType() != null) ? volumeVO.getPoolType().toString() : null);
volumeDetails.put(StorageManager.STORAGE_POOL_DISK_WAIT.toString(), String.valueOf(StorageManager.STORAGE_POOL_DISK_WAIT.valueIn(storagePoolVO.getId())));
+ volumeDetails.put(DiskTO.PATH, volumeVO.getPath());
volumeDetails.put(DiskTO.VOLUME_SIZE, String.valueOf(volumeVO.getSize()));
volumeDetails.put(DiskTO.SCSI_NAA_DEVICE_ID, getVolumeProperty(volumeInfo.getId(), DiskTO.SCSI_NAA_DEVICE_ID));
diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java
index 4aefccc67abd..231fe272461e 100644
--- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java
+++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java
@@ -183,6 +183,10 @@ public void createTemplateAsync(TemplateInfo template, DataStore store, AsyncCom
// update template_store_ref and template state
try {
templateOnStore.processEvent(ObjectInDataStoreStateMachine.Event.CreateOnlyRequested);
+ if(template.getFormat().equals(ImageFormat.PXEBOOT)) {
+ templateOnStore.processEvent(ObjectInDataStoreStateMachine.Event.OperationSuccessed);
+ templateOnStore.setSize(0L);
+ }
} catch (Exception e) {
TemplateApiResult result = new TemplateApiResult(templateOnStore);
result.setResult(e.toString());
@@ -798,7 +802,7 @@ private boolean createChildDataDiskTemplate(DatadiskTO dataDiskTemplate, VMTempl
String templateName = dataDiskTemplate.isIso() ? dataDiskTemplate.getPath().substring(dataDiskTemplate.getPath().lastIndexOf(File.separator) + 1) : template.getName() + suffix + diskCount;
VMTemplateVO templateVO = new VMTemplateVO(templateId, templateName, format, false, false, false, ttype, template.getUrl(),
template.requiresHvm(), template.getBits(), template.getAccountId(), null, templateName, false, guestOsId, false, template.getHypervisorType(), null,
- null, false, false, false, false);
+ null, false, false, false, false, template.getBootFilename());
if (dataDiskTemplate.isIso()){
templateVO.setUniqueName(templateName);
}
diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java
index 0151a7cdedb0..f32f5e517fc0 100644
--- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java
+++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java
@@ -436,6 +436,11 @@ public boolean isRequiresHvm() {
return imageVO.isRequiresHvm();
}
+ @Override
+ public String getBootFilename() {
+ return imageVO.getBootFilename();
+ }
+
@Override
public String getDisplayText() {
return imageVO.getDisplayText();
diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java
index 3ef9fbc4225e..eb5bef52df47 100644
--- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java
+++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java
@@ -175,6 +175,14 @@ protected Long getMaxTemplateSizeInBytes() {
}
}
+ protected Long getMaxVolumeSizeInBytes() {
+ try {
+ return Long.parseLong(configDao.getValue("storage.max.volume.upload.size")) * 1024L * 1024L * 1024L;
+ } catch (NumberFormatException e) {
+ return null;
+ }
+ }
+
@Override
public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback callback) {
CreateContext context = new CreateContext(callback, data);
diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java
index 53fa21f3a794..cc6965d6cc3a 100644
--- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java
+++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java
@@ -23,8 +23,6 @@
import javax.inject.Inject;
-import com.cloud.storage.VMTemplateVO;
-import com.cloud.storage.dao.VMTemplateDao;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
@@ -34,7 +32,10 @@
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
import com.cloud.storage.DataStoreRole;
+import com.cloud.storage.VMTemplateVO;
+import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VolumeDao;
public class VolumeDataFactoryImpl implements VolumeDataFactory {
@@ -83,7 +84,7 @@ public VolumeInfo getVolume(long volumeId) {
return null;
}
VolumeObject vol = null;
- if (volumeVO.getPoolId() == null) {
+ if (volumeVO.getPoolId() == null || volumeVO.getState() == Volume.State.Uploaded) {
DataStore store = null;
VolumeDataStoreVO volumeStore = volumeStoreDao.findByVolume(volumeId);
if (volumeStore != null) {
diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
index f74ef7a38771..a2df3ec570bf 100644
--- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
+++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
@@ -1776,7 +1776,7 @@ public AsyncCallFuture copyVolume(VolumeInfo srcVolume, DataSto
return copyVolumeFromImageToPrimary(srcVolume, destStore);
}
- if (destStore.getRole() == DataStoreRole.Image) {
+ if (destStore.getRole() == DataStoreRole.Image || destStore.getRole() == DataStoreRole.ImageCache) {
return copyVolumeFromPrimaryToImage(srcVolume, destStore);
}
diff --git a/packaging/centos7/cloud.spec b/packaging/centos7/cloud.spec
index 431dbee93024..42aa21f3eb01 100644
--- a/packaging/centos7/cloud.spec
+++ b/packaging/centos7/cloud.spec
@@ -212,6 +212,15 @@ if [ \"%{_temp}\" != "" ]; then
FLAGS="$FLAGS `rpm --eval %{?_temp}`"
fi
+if [ "%{_tests}" == "SKIP" ] ; then
+ echo "Adding skipTests flag to the maven build"
+ FLAGS="$FLAGS -DskipTests"
+fi
+
+# Installing missing deps
+curl -L https://github.com/Juniper/netconf-java/releases/download/1.0.0/Netconf.jar --output netconf-java.jar
+mvn install:install-file -Dfile=netconf-java.jar -DgroupId=net.juniper.netconf -DartifactId=netconf-juniper -Dversion=1.0 -Dpackaging=jar
+
mvn -Psystemvm,developer $FLAGS clean package
cd ui && npm install && npm run build && cd ..
diff --git a/packaging/package.sh b/packaging/package.sh
index bf95f84a11a7..0be1a8802d20 100755
--- a/packaging/package.sh
+++ b/packaging/package.sh
@@ -35,6 +35,7 @@ Optional arguments:
-r, --release integer Set the package release version (default is 1 for normal and prereleases, empty for SNAPSHOT)
-s, --simulator string Build package for Simulator ("default"|"DEFAULT"|"simulator"|"SIMULATOR") (default "default")
-b, --brand string Set branding to be used in package name (it will override any branding string in POM version)
+ -S, --skip-tests Set the flag to skip unit tests (if not provided tests will be executed)
-T, --use-timestamp Use epoch timestamp instead of SNAPSHOT in the package name (if not provided, use "SNAPSHOT")
-t --templates Passes necessary flag to package the required templates. Comma separated string - kvm,xen,vmware,ovm,hyperv
@@ -78,6 +79,9 @@ function packaging() {
else
INDICATOR="SNAPSHOT"
fi
+ if [ "$SKIP_TESTS" == "true" ]; then
+ DEFTESTS="-D_tests SKIP"
+ fi
DISTRO=$3
@@ -173,7 +177,7 @@ function packaging() {
echo ". executing rpmbuild"
cp "$PWD/$DISTRO/cloud.spec" "$RPMDIR/SPECS"
- (cd "$RPMDIR"; rpmbuild --define "_topdir ${RPMDIR}" "${DEFVER}" "${DEFFULLVER}" "${DEFREL}" ${DEFPRE+"$DEFPRE"} ${DEFOSSNOSS+"$DEFOSSNOSS"} ${DEFSIM+"$DEFSIM"} ${DEFTEMP+"$DEFTEMP"} -bb SPECS/cloud.spec)
+ (cd "$RPMDIR"; rpmbuild --define "_topdir ${RPMDIR}" "${DEFVER}" "${DEFFULLVER}" "${DEFREL}" ${DEFPRE+"$DEFPRE"} ${DEFOSSNOSS+"$DEFOSSNOSS"} ${DEFSIM+"$DEFSIM"} ${DEFTEMP+"$DEFTEMP"} ${DEFTESTS+"$DEFTESTS"} -bb SPECS/cloud.spec)
if [ $? -ne 0 ]; then
if [ "$USE_TIMESTAMP" == "true" ]; then
(cd $PWD/../; git reset --hard)
@@ -194,6 +198,7 @@ SIM=""
PACKAGEVAL=""
RELEASE=""
BRANDING=""
+SKIP_TESTS="false"
USE_TIMESTAMP="false"
unrecognized_flags=""
@@ -253,6 +258,11 @@ while [ -n "$1" ]; do
shift 2
;;
+ -S | --skip-tests)
+ SKIP_TESTS="true"
+ shift 1
+ ;;
+
-T | --use-timestamp)
USE_TIMESTAMP="true"
shift 1
diff --git a/packaging/suse15/cloud.spec b/packaging/suse15/cloud.spec
index 9f2dc3782197..5661e9afe906 100644
--- a/packaging/suse15/cloud.spec
+++ b/packaging/suse15/cloud.spec
@@ -210,6 +210,13 @@ fi
mvn -Psystemvm,developer $FLAGS clean package
cd ui && npm install && npm run build && cd ..
+if [ "%{_tests}" == "SKIP" ] ; then
+ echo "Adding skipTests flag to the maven build"
+ FLAGS="$FLAGS -DskipTests"
+fi
+
+mvn -Psystemvm,developer $FLAGS clean package
+
%install
[ ${RPM_BUILD_ROOT} != "/" ] && rm -rf ${RPM_BUILD_ROOT}
# Common directories
diff --git a/plugins/ca/root-ca/src/test/java/org/apache/cloudstack/ca/provider/RootCAProviderTest.java b/plugins/ca/root-ca/src/test/java/org/apache/cloudstack/ca/provider/RootCAProviderTest.java
index 15514b91c785..faaca5a8b147 100644
--- a/plugins/ca/root-ca/src/test/java/org/apache/cloudstack/ca/provider/RootCAProviderTest.java
+++ b/plugins/ca/root-ca/src/test/java/org/apache/cloudstack/ca/provider/RootCAProviderTest.java
@@ -29,22 +29,15 @@
import java.security.cert.X509Certificate;
import java.util.Arrays;
-import javax.net.ssl.SSLEngine;
-
import org.apache.cloudstack.framework.ca.Certificate;
-import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.utils.security.CertUtils;
-import org.apache.cloudstack.utils.security.SSLUtils;
import org.joda.time.DateTime;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
-
import org.mockito.junit.MockitoJUnitRunner;
-import org.mockito.Mockito;
-
@RunWith(MockitoJUnitRunner.class)
public class RootCAProviderTest {
@@ -128,22 +121,22 @@ public void testRevokeCertificate() throws Exception {
Assert.assertTrue(provider.revokeCertificate(CertUtils.generateRandomBigInt(), "anyString"));
}
- @Test
- public void testCreateSSLEngineWithoutAuthStrictness() throws Exception {
- provider.rootCAAuthStrictness = Mockito.mock(ConfigKey.class);
- Mockito.when(provider.rootCAAuthStrictness.value()).thenReturn(Boolean.FALSE);
- final SSLEngine e = provider.createSSLEngine(SSLUtils.getSSLContext(), "/1.2.3.4:5678", null);
- Assert.assertTrue(e.getWantClientAuth());
- Assert.assertFalse(e.getNeedClientAuth());
- }
-
- @Test
- public void testCreateSSLEngineWithAuthStrictness() throws Exception {
- provider.rootCAAuthStrictness = Mockito.mock(ConfigKey.class);
- Mockito.when(provider.rootCAAuthStrictness.value()).thenReturn(Boolean.TRUE);
- final SSLEngine e = provider.createSSLEngine(SSLUtils.getSSLContext(), "/1.2.3.4:5678", null);
- Assert.assertTrue(e.getNeedClientAuth());
- }
+// @Test
+// public void testCreateSSLEngineWithoutAuthStrictness() throws Exception {
+// provider.rootCAAuthStrictness = Mockito.mock(ConfigKey.class);
+// Mockito.when(provider.rootCAAuthStrictness.value()).thenReturn(Boolean.FALSE);
+// final SSLEngine e = provider.createSSLEngine(SSLUtils.getSSLContext(), "/1.2.3.4:5678", null);
+// Assert.assertTrue(e.getWantClientAuth());
+// Assert.assertFalse(e.getNeedClientAuth());
+// }
+//
+// @Test
+// public void testCreateSSLEngineWithAuthStrictness() throws Exception {
+// provider.rootCAAuthStrictness = Mockito.mock(ConfigKey.class);
+// Mockito.when(provider.rootCAAuthStrictness.value()).thenReturn(Boolean.TRUE);
+// final SSLEngine e = provider.createSSLEngine(SSLUtils.getSSLContext(), "/1.2.3.4:5678", null);
+// Assert.assertTrue(e.getNeedClientAuth());
+// }
@Test
public void testGetProviderName() throws Exception {
diff --git a/plugins/hypervisors/baremetal/pom.xml b/plugins/hypervisors/baremetal/pom.xml
index a6d3fbf374cf..f26909892f91 100755
--- a/plugins/hypervisors/baremetal/pom.xml
+++ b/plugins/hypervisors/baremetal/pom.xml
@@ -47,5 +47,31 @@
jaxb-impl
${cs.jaxb.version}
-
+
+ org.apache.httpcomponents
+ httpcore
+
+
+ org.apache.httpcomponents
+ httpclient
+
+
+ com.google.code.gson
+ gson
+ 2.8.0
+
+
+ com.github.scribejava
+ scribejava-apis
+ 3.4.1
+
+
+ net.juniper.netconf
+ netconf-juniper
+ 1.0
+
+
+
+
+
diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/database/BaremetalRctVO.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/database/BaremetalRctVO.java
index 3a24cf4554d8..b3610d208b9d 100644
--- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/database/BaremetalRctVO.java
+++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/database/BaremetalRctVO.java
@@ -45,7 +45,7 @@ public class BaremetalRctVO implements InternalIdentity, Identity {
@Column(name = "url")
private String url;
- @Column(name = "rct")
+ @Column(name = "rct", length = 65535)
private String rct;
public long getId() {
diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java
index 3bdd2e81fb51..dcfacfc8a450 100644
--- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java
+++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java
@@ -27,31 +27,40 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Optional;
import java.util.UUID;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.api.ApiConstants;
+import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger;
+import com.cloud.agent.AgentManager;
import com.cloud.agent.api.StartupCommand;
import com.cloud.agent.api.StartupRoutingCommand;
-import com.cloud.baremetal.networkservice.BareMetalResourceBase;
+import com.cloud.api.query.dao.UserVmJoinDao;
+import com.cloud.baremetal.database.BaremetalRctDao;
import com.cloud.configuration.Config;
+import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenterVO;
import com.cloud.exception.DiscoveryException;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
+import com.cloud.host.dao.HostDao;
+import com.cloud.host.dao.HostDetailsDao;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.network.Network;
+import com.cloud.network.dao.NetworkDao;
import com.cloud.resource.Discoverer;
import com.cloud.resource.DiscovererBase;
import com.cloud.resource.ResourceStateAdapter;
import com.cloud.resource.ServerResource;
import com.cloud.resource.UnableDeleteHostException;
+import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.Script;
import com.cloud.utils.script.Script2;
@@ -65,6 +74,16 @@ public class BareMetalDiscoverer extends DiscovererBase implements Discoverer, R
@Inject
protected VMInstanceDao _vmDao = null;
+ @Inject BaremetalVlanManager vlanMgr;
+ @Inject NetworkDao networkDao;
+ @Inject HostDao hostDao;
+ @Inject VMTemplateDao templateDao;
+ @Inject HostDetailsDao hostDetailsDao;
+ @Inject ClusterDetailsDao clusterDetailsDao;
+ @Inject BaremetalRctDao rctDao;
+ @Inject AgentManager agentManager;
+ @Inject UserVmJoinDao userVmJoinDao;
+
@Override
public boolean configure(String name, Map params) throws ConfigurationException {
_resourceMgr.registerResourceStateAdapter(this.getClass().getSimpleName(), this);
@@ -81,13 +100,12 @@ public boolean stop() {
public Map extends ServerResource, Map> find(long dcId, Long podId, Long clusterId, URI url, String username, String password, List hostTags)
throws DiscoveryException {
- /* Enable this after we decide to use addBaremetalHostCmd instead of addHostCmd
String discoverName = _params.get(ApiConstants.BAREMETAL_DISCOVER_NAME);
if (!this.getClass().getName().equals(discoverName)) {
return null;
- } */
+ }
- Map> resources = new HashMap>();
+ Map> resources = new HashMap>();
Map details = new HashMap();
if (!url.getScheme().equals("http")) {
@@ -95,6 +113,7 @@ public Map extends ServerResource, Map> find(long dcId, Long p
s_logger.debug(msg);
return null;
}
+
if (clusterId == null) {
String msg = "must specify cluster Id when add host";
s_logger.debug(msg);
@@ -125,22 +144,24 @@ public Map extends ServerResource, Map> find(long dcId, Long p
String ipmiIp = ia.getHostAddress();
String guid = UUID.nameUUIDFromBytes(ipmiIp.getBytes()).toString();
- String injectScript = "scripts/util/ipmi.py";
- String scriptPath = Script.findScript("", injectScript);
- if (scriptPath == null) {
- throw new CloudRuntimeException("Unable to find key ipmi script "
- + injectScript);
- }
+ // only check the URL when the host is not a Baremetal of type MaaS
+ if (StringUtils.isEmpty(_params.get(ApiConstants.BAREMETAL_MAAS))) {
+ String injectScript = "scripts/util/ipmi.py";
+ String scriptPath = Script.findScript("", injectScript);
+ if (scriptPath == null) {
+ throw new CloudRuntimeException("Unable to find key ipmi script " + injectScript);
+ }
- final Script2 command = new Script2(scriptPath, s_logger);
- command.add("ping");
- command.add("hostname="+ipmiIp);
- command.add("usrname="+username);
- command.add("password="+password, ParamType.PASSWORD);
- final String result = command.execute();
- if (result != null) {
- s_logger.warn(String.format("Can not set up ipmi connection(ip=%1$s, username=%2$s, password=%3$s, args) because %4$s", ipmiIp, username, "******", result));
- return null;
+ final Script2 command = new Script2(scriptPath, s_logger);
+ command.add("ping");
+ command.add("hostname=" + ipmiIp);
+ command.add("usrname=" + username);
+ command.add("password=" + password, ParamType.PASSWORD);
+ final String result = command.execute();
+ if (result != null) {
+ s_logger.warn(String.format("Can not set up ipmi connection(ip=%1$s, username=%2$s, password=%3$s, args) because %4$s", ipmiIp, username, "******", result));
+ return null;
+ }
}
ClusterVO clu = _clusterDao.findById(clusterId);
@@ -158,28 +179,28 @@ public Map extends ServerResource, Map> find(long dcId, Long p
params.put(ApiConstants.PRIVATE_IP, ipmiIp);
params.put(ApiConstants.USERNAME, username);
params.put(ApiConstants.PASSWORD, password);
- params.put("vmDao", _vmDao);
- params.put("configDao", _configDao);
String resourceClassName = _configDao.getValue(Config.ExternalBaremetalResourceClassName.key());
- BareMetalResourceBase resource = null;
+ BareMetalResource resource = null;
if (resourceClassName != null) {
Class> clazz = Class.forName(resourceClassName);
- resource = (BareMetalResourceBase) clazz.newInstance();
+ resource = (BareMetalResource) clazz.newInstance();
String externalUrl = _configDao.getValue(Config.ExternalBaremetalSystemUrl.key());
- if (externalUrl == null) {
+ if (externalUrl == null && resourceClassName != "org.apache.cloudstack.compute.maas.MaasResourceProvider") {
throw new IllegalArgumentException(String.format("You must specify ExternalBaremetalSystemUrl in global config page as ExternalBaremetalResourceClassName is not null"));
}
details.put(BaremetalManager.ExternalBaremetalSystemUrl, externalUrl);
} else {
- resource = new BareMetalResourceBase();
+ resource = new BareMetalResourceProvider();
}
+
resource.configure("Bare Metal Agent", params);
+ resource.start();
- String memCapacity = (String)params.get(ApiConstants.MEMORY);
- String cpuCapacity = (String)params.get(ApiConstants.CPU_SPEED);
- String cpuNum = (String)params.get(ApiConstants.CPU_NUMBER);
- String mac = (String)params.get(ApiConstants.HOST_MAC);
+ String memCapacity = Optional.ofNullable((String)params.get(ApiConstants.MEMORY)).orElse(String.valueOf(resource.getMemCapacity()));
+ String cpuCapacity = Optional.ofNullable((String)params.get(ApiConstants.CPU_SPEED)).orElse(String.valueOf(resource.getCpuCapacity()));
+ String cpuNum = Optional.ofNullable((String)params.get(ApiConstants.CPU_NUMBER)).orElse(String.valueOf(resource.getCpuNum()));
+ String mac = Optional.ofNullable((String)params.get(ApiConstants.HOST_MAC)).orElse(resource.getMac());
if (hostTags != null && hostTags.size() != 0) {
details.put("hostTag", hostTags.get(0));
}
@@ -198,7 +219,6 @@ public Map extends ServerResource, Map> find(long dcId, Long p
details.put(BaremetalManager.EchoSecurityGroupAgent, isEchoScAgent);
resources.put(resource, details);
- resource.start();
zone.setGatewayProvider(Network.Provider.ExternalGateWay.getName());
zone.setDnsProvider(Network.Provider.ExternalDhcpServer.getName());
@@ -273,8 +293,6 @@ protected HashMap buildConfigParams(HostVO host) {
HashMap params = super.buildConfigParams(host);
params.put("hostId", host.getId());
params.put("ipaddress", host.getPrivateIpAddress());
- params.put("vmDao", _vmDao);
- params.put("configDao", _configDao);
return params;
}
diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java
index c37b51df5e9b..929c8efc7c8e 100644
--- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java
+++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java
@@ -22,8 +22,8 @@
import javax.inject.Inject;
import javax.naming.ConfigurationException;
-import com.cloud.utils.NumbersUtil;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
+import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger;
import com.cloud.capacity.CapacityManager;
@@ -46,9 +46,12 @@
import com.cloud.offering.ServiceOffering;
import com.cloud.org.Cluster;
import com.cloud.resource.ResourceManager;
+import com.cloud.utils.NumbersUtil;
import com.cloud.utils.component.AdapterBase;
+import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
+import com.cloud.vm.dao.VMInstanceDao;
public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner {
private static final Logger s_logger = Logger.getLogger(BareMetalPlanner.class);
@@ -68,6 +71,8 @@ public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner {
protected ResourceManager _resourceMgr;
@Inject
protected ClusterDetailsDao _clusterDetailsDao;
+ @Inject
+ protected VMInstanceDao _vmDao;
@Override
public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid) throws InsufficientServerCapacityException {
@@ -104,8 +109,7 @@ public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan pl
hosts = _resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing, cluster.getId(), cluster.getPodId(), cluster.getDataCenterId());
if (hostTag != null) {
for (HostVO h : hosts) {
- _hostDao.loadDetails(h);
- if (h.getDetail("hostTag") != null && h.getDetail("hostTag").equalsIgnoreCase(hostTag)) {
+ if (hasHostCorrectTag(h, hostTag)) {
target = h;
break;
}
@@ -137,7 +141,9 @@ public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan pl
Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
- if (_capacityMgr.checkIfHostHasCapacity(h.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) {
+ if (hasHostCorrectTag(h, hostTag) && _capacityMgr.checkIfHostHasCapacity(h.getId(),
+ cpu_requested, ram_requested, false,
+ cpuOvercommitRatio, memoryOvercommitRatio, true) && isHostAvailable(h)) {
s_logger.debug("Find host " + h.getId() + " has enough capacity");
DataCenter dc = _dcDao.findById(h.getDataCenterId());
Pod pod = _podDao.findById(h.getPodId());
@@ -150,6 +156,26 @@ public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan pl
return null;
}
+ private boolean isHostAvailable(HostVO h) {
+ List vmsRunningOnHost = _vmDao.listByHostId(h.getId());
+ List vmsStoppedOnHost = _vmDao.listByLastHostId(h.getId());
+ return vmsRunningOnHost.isEmpty() && vmsStoppedOnHost.isEmpty();
+ }
+
+ private boolean hasHostCorrectTag(HostVO h, String tag) {
+ _hostDao.loadDetails(h);
+ if (StringUtils.isEmpty(tag)) {
+ return true;
+ }
+ if (StringUtils.isEmpty(h.getDetail("hostTag"))) {
+ return false;
+ }
+ if (h.getDetail("hostTag").equalsIgnoreCase(tag)) {
+ return true;
+ }
+ return false;
+ }
+
@Override
public boolean canHandle(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid) {
return vm.getHypervisorType() == HypervisorType.BareMetal;
diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalResource.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalResource.java
new file mode 100644
index 000000000000..b58a8bb8216e
--- /dev/null
+++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalResource.java
@@ -0,0 +1,31 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// Automatically generated by addcopyright.py at 01/29/2013
+// Apache License, Version 2.0 (the "License"); you may not use this
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+package com.cloud.baremetal.manager;
+
+import com.cloud.resource.ServerResource;
+
+public interface BareMetalResource extends ServerResource {
+ long getMemCapacity();
+ long getCpuCapacity();
+ long getCpuNum();
+ String getMac();
+
+}
diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalResourceProvider.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalResourceProvider.java
new file mode 100644
index 000000000000..dc5275e60dc1
--- /dev/null
+++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalResourceProvider.java
@@ -0,0 +1,107 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// Automatically generated by addcopyright.py at 01/29/2013
+// Apache License, Version 2.0 (the "License"); you may not use this
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+package com.cloud.baremetal.manager;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import javax.inject.Inject;
+import javax.naming.ConfigurationException;
+
+import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
+import org.apache.log4j.Logger;
+import org.springframework.beans.factory.annotation.Configurable;
+
+import com.cloud.agent.api.HostVmStateReportEntry;
+import com.cloud.agent.api.StartupCommand;
+import com.cloud.agent.api.StartupRoutingCommand;
+import com.cloud.baremetal.networkservice.BareMetalResourceBase;
+import com.cloud.hypervisor.Hypervisor;
+import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.dao.VMInstanceDao;
+
+@Configurable
+public class BareMetalResourceProvider extends BareMetalResourceBase implements BareMetalResource {
+ private static final Logger s_logger = Logger.getLogger(BareMetalResourceProvider.class);
+
+ @Inject protected ConfigurationDao configDao;
+ @Inject protected VMInstanceDao vmDao;
+
+ @Override
+ public boolean configure(String name, Map params) throws ConfigurationException {
+ ipmiIface = "default";
+ configure(name, params, configDao, vmDao);
+
+ if (params.keySet().size() == 0) {
+ return true;
+ }
+
+ return true;
+ }
+
+ @Override
+ public boolean start() {
+ return true;
+ }
+
+ @Override
+ public boolean stop() {
+ return true;
+ }
+
+ @Override
+ public StartupCommand[] initialize() {
+ StartupRoutingCommand cmd = new StartupRoutingCommand(0, 0, 0, 0, null, Hypervisor.HypervisorType.BareMetal,
+ new HashMap());
+
+ cmd.setDataCenter(_zone);
+ cmd.setPod(_pod);
+ cmd.setCluster(_cluster);
+ cmd.setGuid(_uuid);
+ cmd.setName(_ip);
+ cmd.setPrivateIpAddress(_ip);
+ cmd.setStorageIpAddress(_ip);
+ cmd.setVersion(BareMetalResourceBase.class.getPackage().getImplementationVersion());
+ cmd.setCpus((int) _cpuNum);
+ cmd.setSpeed(_cpuCapacity);
+ cmd.setMemory(_memCapacity);
+ cmd.setPrivateMacAddress(_mac);
+ cmd.setPublicMacAddress(_mac);
+ return new StartupCommand[] { cmd };
+ }
+
+ protected Map getHostVmStateReport() {
+ Map states = new HashMap();
+ if (hostId != null) {
+ final List extends VMInstanceVO> vms = vmDao.listByHostId(hostId);
+ for (VMInstanceVO vm : vms) {
+ states.put(
+ vm.getInstanceName(),
+ new HostVmStateReportEntry(
+ vm.getPowerState(), "host-" + hostId
+ )
+ );
+ }
+ }
+ return states;
+ }
+}
diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalTemplateAdapter.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalTemplateAdapter.java
index 8265f951f8a8..764a724fda2f 100644
--- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalTemplateAdapter.java
+++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalTemplateAdapter.java
@@ -184,6 +184,14 @@ public boolean delete(TemplateProfile profile) {
}
}
+ if (profile.getZoneId() > 0) {
+ VMTemplateZoneVO templateZone = _tmpltZoneDao.findByZoneTemplate(profile.getZoneId(), templateId);
+
+ if (templateZone != null) {
+ _tmpltZoneDao.remove(templateZone.getId());
+ }
+ }
+
s_logger.debug("Successfully marked template host refs for template: " + template.getName() + " as destroyed in zone: " + zoneName);
// If there are no more non-destroyed template host entries for this template, delete it
diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java
index b1aafc692ef1..b3ce3e8a2156 100644
--- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java
+++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java
@@ -32,6 +32,8 @@
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.dao.VMInstanceDao;
import org.apache.cloudstack.api.BaremetalProvisionDoneNotificationCmd;
+import org.apache.cloudstack.framework.config.ConfigKey;
+import org.apache.cloudstack.framework.config.Configurable;
import org.apache.log4j.Logger;
import org.apache.cloudstack.api.AddBaremetalHostCmd;
@@ -45,7 +47,7 @@
import com.cloud.vm.VirtualMachine.Event;
import com.cloud.vm.VirtualMachine.State;
-public class BaremetalManagerImpl extends ManagerBase implements BaremetalManager, StateListener {
+public class BaremetalManagerImpl extends ManagerBase implements BaremetalManager, StateListener, Configurable {
private static final Logger s_logger = Logger.getLogger(BaremetalManagerImpl.class);
@Inject
@@ -53,6 +55,12 @@ public class BaremetalManagerImpl extends ManagerBase implements BaremetalManage
@Inject
protected VMInstanceDao vmDao;
+ public static final ConfigKey diskEraseOnDestroy = new ConfigKey(Integer.class, "baremetal.disk.erase.destroy", "Advanced", String.valueOf(0),
+ "Erase disk on destroy baremetal VM (0=No erase, 1=Quick erase, 2=Full erase)", false, ConfigKey.Scope.Global, null);
+
+ public static final ConfigKey pxeVlan = new ConfigKey(Integer.class, "baremetal.pxe.vlan", "Advanced", null,
+ "VLAN of the PXE network", false, ConfigKey.Scope.Global, null);
+
@Override
public boolean configure(String name, Map params) throws ConfigurationException {
VirtualMachine.State.getStateMachine().registerListener(this);
@@ -93,7 +101,7 @@ public boolean postStateTransitionEvent(StateMachine2.Transition t
HostVO host = _hostDao.findById(vo.getHostId());
if (host == null) {
- s_logger.debug("Skip oldState " + oldState + " to " + "newState " + newState + " transimtion");
+ s_logger.debug("Skip state transition from " + oldState + " to " + newState);
return true;
}
_hostDao.loadDetails(host);
@@ -153,4 +161,14 @@ public void notifyProvisionDone(BaremetalProvisionDoneNotificationCmd cmd) {
s_logger.debug(String.format("received baremetal provision done notification for vm[id:%s name:%s] running on host[mac:%s, ip:%s]",
vm.getId(), vm.getInstanceName(), host.getPrivateMacAddress(), host.getPrivateIpAddress()));
}
+
+ @Override
+ public String getConfigComponentName() {
+ return BaremetalManager.class.getSimpleName();
+ }
+
+ @Override
+ public ConfigKey>[] getConfigKeys() {
+ return new ConfigKey>[] {diskEraseOnDestroy, pxeVlan};
+ }
}
diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalVlanManager.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalVlanManager.java
index b6311f79aab5..a1168b6c0623 100644
--- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalVlanManager.java
+++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalVlanManager.java
@@ -19,11 +19,8 @@
import com.cloud.baremetal.networkservice.BaremetalRctResponse;
import com.cloud.baremetal.networkservice.BaremetalSwitchBackend;
-import com.cloud.deploy.DeployDestination;
-import com.cloud.network.Network;
import com.cloud.utils.component.Manager;
import com.cloud.utils.component.PluggableService;
-import com.cloud.vm.VirtualMachineProfile;
import org.apache.cloudstack.api.AddBaremetalRctCmd;
import org.apache.cloudstack.api.DeleteBaremetalRctCmd;
@@ -31,13 +28,15 @@ public interface BaremetalVlanManager extends Manager, PluggableService {
BaremetalRctResponse addRct(AddBaremetalRctCmd cmd);
- void prepareVlan(Network nw, DeployDestination destHost);
-
- void releaseVlan(Network nw, VirtualMachineProfile vm);
-
void registerSwitchBackend(BaremetalSwitchBackend backend);
void deleteRct(DeleteBaremetalRctCmd cmd);
+ void prepareVlan(int vlanId, String macAddress, VlanType type);
+
+ void releaseVlan(int vlanId, String macAddress, VlanType type);
+
+ void releaseAllVlan(String macAddress, VlanType type);
+
BaremetalRctResponse listRct();
}
diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalVlanManagerImpl.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalVlanManagerImpl.java
index 274962562bec..a2b74265e066 100644
--- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalVlanManagerImpl.java
+++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalVlanManagerImpl.java
@@ -17,16 +17,28 @@
//
package com.cloud.baremetal.manager;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.acl.RoleType;
+import org.apache.cloudstack.api.AddBaremetalRctCmd;
+import org.apache.cloudstack.api.DeleteBaremetalRctCmd;
+import org.apache.cloudstack.api.ListBaremetalRctCmd;
+import org.apache.cloudstack.utils.baremetal.BaremetalUtils;
+import org.springframework.web.client.RestTemplate;
+
import com.cloud.baremetal.database.BaremetalRctDao;
import com.cloud.baremetal.database.BaremetalRctVO;
import com.cloud.baremetal.networkservice.BaremetalRctResponse;
import com.cloud.baremetal.networkservice.BaremetalSwitchBackend;
import com.cloud.baremetal.networkservice.BaremetalVlanStruct;
-import com.cloud.deploy.DeployDestination;
-import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
-import com.cloud.network.Network;
-import com.cloud.network.Networks;
import com.cloud.user.Account;
import com.cloud.user.AccountManager;
import com.cloud.user.AccountVO;
@@ -38,22 +50,7 @@
import com.cloud.utils.db.QueryBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.exception.CloudRuntimeException;
-import com.cloud.vm.VirtualMachineProfile;
import com.google.gson.Gson;
-import org.apache.cloudstack.acl.RoleType;
-import org.apache.cloudstack.api.AddBaremetalRctCmd;
-import org.apache.cloudstack.api.DeleteBaremetalRctCmd;
-import org.apache.cloudstack.api.ListBaremetalRctCmd;
-import org.apache.cloudstack.utils.baremetal.BaremetalUtils;
-import org.springframework.web.client.RestTemplate;
-
-import javax.inject.Inject;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
/**
* Created by frank on 5/8/14.
@@ -120,7 +117,8 @@ public BaremetalRctResponse addRct(AddBaremetalRctCmd cmd) {
}
@Override
- public void prepareVlan(Network nw, DeployDestination destHost) {
+ public void prepareVlan(int vlan, String macAddress, VlanType type) {
+
List vos = rctDao.listAll();
if (vos.isEmpty()) {
throw new CloudRuntimeException("no rack configuration found, please call addBaremetalRct to add one");
@@ -129,12 +127,11 @@ public void prepareVlan(Network nw, DeployDestination destHost) {
BaremetalRctVO vo = vos.get(0);
BaremetalRct rct = gson.fromJson(vo.getRct(), BaremetalRct.class);
- RackPair rp = findRack(rct, destHost.getHost().getPrivateMacAddress());
+ RackPair rp = findRack(rct, macAddress);
if (rp == null) {
- throw new CloudRuntimeException(String.format("cannot find any rack contains host[mac:%s], please double check your rack configuration file, update it and call addBaremetalRct again", destHost.getHost().getPrivateMacAddress()));
+ throw new CloudRuntimeException(String.format("cannot find any rack contains host[mac:%s], please double check your rack configuration file, update it and call addBaremetalRct again", macAddress));
}
- int vlan = Integer.parseInt(Networks.BroadcastDomainType.getValue(nw.getBroadcastUri()));
BaremetalSwitchBackend backend = getSwitchBackend(rp.rack.getL2Switch().getType());
BaremetalVlanStruct struct = new BaremetalVlanStruct();
struct.setHostMac(rp.host.getMac());
@@ -144,11 +141,12 @@ public void prepareVlan(Network nw, DeployDestination destHost) {
struct.setSwitchType(rp.rack.getL2Switch().getType());
struct.setSwitchUsername(rp.rack.getL2Switch().getUsername());
struct.setVlan(vlan);
+ struct.setVlanType(type);
backend.prepareVlan(struct);
}
@Override
- public void releaseVlan(Network nw, VirtualMachineProfile vm) {
+ public void releaseVlan(int vlanId, String macAddress, VlanType type) {
List vos = rctDao.listAll();
if (vos.isEmpty()) {
throw new CloudRuntimeException("no rack configuration found, please call addBaremetalRct to add one");
@@ -156,11 +154,10 @@ public void releaseVlan(Network nw, VirtualMachineProfile vm) {
BaremetalRctVO vo = vos.get(0);
BaremetalRct rct = gson.fromJson(vo.getRct(), BaremetalRct.class);
- HostVO host = hostDao.findById(vm.getVirtualMachine().getHostId());
- RackPair rp = findRack(rct, host.getPrivateMacAddress());
+
+ RackPair rp = findRack(rct, macAddress);
assert rp != null : String.format("where is my rack???");
- int vlan = Integer.parseInt(Networks.BroadcastDomainType.getValue(nw.getBroadcastUri()));
BaremetalVlanStruct struct = new BaremetalVlanStruct();
struct.setHostMac(rp.host.getMac());
struct.setPort(rp.host.getPort());
@@ -168,7 +165,35 @@ public void releaseVlan(Network nw, VirtualMachineProfile vm) {
struct.setSwitchPassword(rp.rack.getL2Switch().getPassword());
struct.setSwitchType(rp.rack.getL2Switch().getType());
struct.setSwitchUsername(rp.rack.getL2Switch().getUsername());
- struct.setVlan(vlan);
+ struct.setVlan(vlanId);
+ struct.setVlanType(type);
+ struct.setRemoveAll(false);
+ BaremetalSwitchBackend backend = getSwitchBackend(rp.rack.getL2Switch().getType());
+ backend.removePortFromVlan(struct);
+ }
+
+ @Override
+ public void releaseAllVlan(String macAddress, VlanType type) {
+ List vos = rctDao.listAll();
+ if (vos.isEmpty()) {
+ throw new CloudRuntimeException("no rack configuration found, please call addBaremetalRct to add one");
+ }
+
+ BaremetalRctVO vo = vos.get(0);
+ BaremetalRct rct = gson.fromJson(vo.getRct(), BaremetalRct.class);
+
+ RackPair rp = findRack(rct, macAddress);
+ assert rp != null : String.format("where is my rack???");
+
+ BaremetalVlanStruct struct = new BaremetalVlanStruct();
+ struct.setHostMac(rp.host.getMac());
+ struct.setPort(rp.host.getPort());
+ struct.setSwitchIp(rp.rack.getL2Switch().getIp());
+ struct.setSwitchPassword(rp.rack.getL2Switch().getPassword());
+ struct.setSwitchType(rp.rack.getL2Switch().getType());
+ struct.setSwitchUsername(rp.rack.getL2Switch().getUsername());
+ struct.setVlanType(type);
+ struct.setRemoveAll(true);
BaremetalSwitchBackend backend = getSwitchBackend(rp.rack.getL2Switch().getType());
backend.removePortFromVlan(struct);
}
diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/VlanType.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/VlanType.java
new file mode 100644
index 000000000000..ce406771a904
--- /dev/null
+++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/VlanType.java
@@ -0,0 +1,22 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+package com.cloud.baremetal.manager;
+
+public enum VlanType {
+ TAGGED, UNTAGGED
+}
diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BareMetalResourceBase.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BareMetalResourceBase.java
index cc419f4158f7..a1fde144ecb1 100644
--- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BareMetalResourceBase.java
+++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BareMetalResourceBase.java
@@ -22,8 +22,6 @@
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.baremetal.networkservice;
-import java.util.HashMap;
-import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
@@ -40,13 +38,14 @@
import com.cloud.agent.api.CheckVirtualMachineAnswer;
import com.cloud.agent.api.CheckVirtualMachineCommand;
import com.cloud.agent.api.Command;
-import com.cloud.agent.api.HostVmStateReportEntry;
import com.cloud.agent.api.MaintainAnswer;
import com.cloud.agent.api.MaintainCommand;
import com.cloud.agent.api.MigrateAnswer;
import com.cloud.agent.api.MigrateCommand;
import com.cloud.agent.api.PingCommand;
import com.cloud.agent.api.PingRoutingCommand;
+import com.cloud.agent.api.PlugNicAnswer;
+import com.cloud.agent.api.PlugNicCommand;
import com.cloud.agent.api.PrepareForMigrationAnswer;
import com.cloud.agent.api.PrepareForMigrationCommand;
import com.cloud.agent.api.ReadyAnswer;
@@ -56,10 +55,11 @@
import com.cloud.agent.api.SecurityGroupRulesCmd;
import com.cloud.agent.api.StartAnswer;
import com.cloud.agent.api.StartCommand;
-import com.cloud.agent.api.StartupCommand;
-import com.cloud.agent.api.StartupRoutingCommand;
import com.cloud.agent.api.StopAnswer;
import com.cloud.agent.api.StopCommand;
+import com.cloud.agent.api.UnPlugNicAnswer;
+import com.cloud.agent.api.UnPlugNicCommand;
+import com.cloud.agent.api.baremetal.DestroyCommand;
import com.cloud.agent.api.baremetal.IpmISetBootDevCommand;
import com.cloud.agent.api.baremetal.IpmISetBootDevCommand.BootDev;
import com.cloud.agent.api.baremetal.IpmiBootorResetCommand;
@@ -67,7 +67,6 @@
import com.cloud.baremetal.manager.BaremetalManager;
import com.cloud.configuration.Config;
import com.cloud.host.Host.Type;
-import com.cloud.hypervisor.Hypervisor;
import com.cloud.resource.ServerResource;
import com.cloud.utils.component.ManagerBase;
import com.cloud.utils.db.QueryBuilder;
@@ -82,7 +81,7 @@
import com.cloud.vm.VirtualMachine.PowerState;
import com.cloud.vm.dao.VMInstanceDao;
-public class BareMetalResourceBase extends ManagerBase implements ServerResource {
+public abstract class BareMetalResourceBase extends ManagerBase implements ServerResource {
private static final Logger s_logger = Logger.getLogger(BareMetalResourceBase.class);
protected String _uuid;
protected String _zone;
@@ -108,26 +107,49 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource
protected Script2 _forcePowerOffCommand;
protected Script2 _bootOrRebootCommand;
protected String _vmName;
+ protected String ipmiIface;
protected int ipmiRetryTimes = 5;
+ protected long ipmiRetryDelay = 1;
+ protected long ipmiTimeout = 0;
protected boolean provisionDoneNotificationOn = false;
protected int isProvisionDoneNotificationTimeout = 1800;
- protected ConfigurationDao configDao;
- protected VMInstanceDao vmDao;
+ public long getMemCapacity() {
+ return _memCapacity;
+ }
+ public long getCpuCapacity() {
+ return _cpuCapacity;
+ }
- @Override
- public boolean configure(String name, Map params) throws ConfigurationException {
+ public long getCpuNum() {
+ return _cpuNum;
+ }
+
+ public String getMac() {
+ return _mac;
+ }
+
+ public boolean configure(String name, Map params, ConfigurationDao configDao, VMInstanceDao vmDao) throws ConfigurationException {
setName(name);
+
+ if (params.keySet().size() == 0) {
+ return true;
+ }
+
_uuid = (String) params.get("guid");
- try {
- _memCapacity = Long.parseLong((String) params.get(ApiConstants.MEMORY)) * 1024L * 1024L;
- _cpuCapacity = Long.parseLong((String) params.get(ApiConstants.CPU_SPEED));
- _cpuNum = Long.parseLong((String) params.get(ApiConstants.CPU_NUMBER));
- } catch (NumberFormatException e) {
- throw new ConfigurationException(String.format("Unable to parse number of CPU or memory capacity "
- + "or cpu capacity(cpu number = %1$s memCapacity=%2$s, cpuCapacity=%3$s", params.get(ApiConstants.CPU_NUMBER),
- params.get(ApiConstants.MEMORY), params.get(ApiConstants.CPU_SPEED)));
+
+ // MaaS Create Node
+ if (ApiConstants.BAREMETAL_MAAS_ACTION_CREATE.equals((String) params.get(ApiConstants.BAREMETAL_MAAS_ACTION))) {
+ try {
+ _memCapacity = Long.parseLong((String) params.get(ApiConstants.MEMORY)) * 1024L * 1024L;
+ _cpuCapacity = Long.parseLong((String) params.get(ApiConstants.CPU_SPEED));
+ _cpuNum = Long.parseLong((String) params.get(ApiConstants.CPU_NUMBER));
+ } catch (NumberFormatException e) {
+ throw new ConfigurationException(String.format("Unable to parse number of CPU or memory capacity "
+ + "or cpu capacity(cpu number = %1$s memCapacity=%2$s, cpuCapacity=%3$s", params.get(ApiConstants.CPU_NUMBER),
+ params.get(ApiConstants.MEMORY), params.get(ApiConstants.CPU_SPEED)));
+ }
}
_zone = (String) params.get("zone");
@@ -140,8 +162,6 @@ public boolean configure(String name, Map params) throws Configu
_password = (String) params.get(ApiConstants.PASSWORD);
_vmName = (String) params.get("vmName");
String echoScAgent = (String) params.get(BaremetalManager.EchoSecurityGroupAgent);
- vmDao = (VMInstanceDao) params.get("vmDao");
- configDao = (ConfigurationDao) params.get("configDao");
if (_pod == null) {
throw new ConfigurationException("Unable to get the pod");
@@ -155,13 +175,16 @@ public boolean configure(String name, Map params) throws Configu
throw new ConfigurationException("Unable to get the host address");
}
- if (_mac.equalsIgnoreCase("unknown")) {
- throw new ConfigurationException("Unable to get the host mac address");
- }
+ // MaaS Create Node
+ if (ApiConstants.BAREMETAL_MAAS_ACTION_CREATE.equals((String) params.get(ApiConstants.BAREMETAL_MAAS_ACTION))) {
+ if (_mac.equalsIgnoreCase("unknown")) {
+ throw new ConfigurationException("Unable to get the host mac address");
+ }
- if (_mac.split(":").length != 6) {
- throw new ConfigurationException("Wrong MAC format(" + _mac
- + "). It must be in format of for example 00:11:ba:33:aa:dd which is not case sensitive");
+ if (_mac.split(":").length != 6) {
+ throw new ConfigurationException("Wrong MAC format(" + _mac
+ + "). It must be in format of for example 00:11:ba:33:aa:dd which is not case sensitive");
+ }
}
if (_uuid == null) {
@@ -172,24 +195,37 @@ public boolean configure(String name, Map params) throws Configu
_isEchoScAgent = Boolean.valueOf(echoScAgent);
}
- String ipmiIface = "default";
- try {
- ipmiIface = configDao.getValue(Config.BaremetalIpmiLanInterface.key());
- } catch (Exception e) {
- s_logger.debug(e.getMessage(), e);
- }
+ if (configDao != null) {
+ try {
+ ipmiIface = configDao.getValue(Config.BaremetalIpmiLanInterface.key());
+ } catch (Exception e) {
+ s_logger.debug(e.getMessage(), e);
+ }
- try {
- ipmiRetryTimes = Integer.parseInt(configDao.getValue(Config.BaremetalIpmiRetryTimes.key()));
- } catch (Exception e) {
- s_logger.debug(e.getMessage(), e);
- }
+ try {
+ ipmiRetryTimes = Integer.parseInt(configDao.getValue(Config.BaremetalIpmiRetryTimes.key()));
+ } catch (Exception e) {
+ s_logger.debug(e.getMessage(), e);
+ }
- try {
- provisionDoneNotificationOn = Boolean.valueOf(configDao.getValue(Config.BaremetalProvisionDoneNotificationEnabled.key()));
- isProvisionDoneNotificationTimeout = Integer.parseInt(configDao.getValue(Config.BaremetalProvisionDoneNotificationTimeout.key()));
- } catch (Exception e) {
- s_logger.debug(e.getMessage(), e);
+ try {
+ ipmiRetryDelay = Long.parseLong(configDao.getValue(Config.BaremetalIpmiRetryDelay.key()));
+ } catch (Exception e) {
+ s_logger.debug(e.getMessage(), e);
+ }
+
+ try {
+ ipmiTimeout = Long.parseLong(configDao.getValue(Config.BaremetalIpmiTimeout.key()));
+ } catch (Exception e) {
+ s_logger.debug(e.getMessage(), e);
+ }
+
+ try {
+ provisionDoneNotificationOn = Boolean.valueOf(configDao.getValue(Config.BaremetalProvisionDoneNotificationEnabled.key()));
+ isProvisionDoneNotificationTimeout = Integer.parseInt(configDao.getValue(Config.BaremetalProvisionDoneNotificationTimeout.key()));
+ } catch (Exception e) {
+ s_logger.debug(e.getMessage(), e);
+ }
}
String injectScript = "scripts/util/ipmi.py";
@@ -198,7 +234,7 @@ public boolean configure(String name, Map params) throws Configu
throw new ConfigurationException("Cannot find ping script " + scriptPath);
}
String pythonPath = "/usr/bin/python";
- _pingCommand = new Script2(pythonPath, s_logger);
+ _pingCommand = new Script2(pythonPath, ipmiTimeout, s_logger);
_pingCommand.add(scriptPath);
_pingCommand.add("ping");
_pingCommand.add("interface=" + ipmiIface);
@@ -278,19 +314,25 @@ public boolean configure(String name, Map params) throws Configu
return true;
}
- protected boolean doScript(Script cmd) {
- return doScript(cmd, null);
+ @Override
+ public Type getType() {
+ return com.cloud.host.Host.Type.Routing;
}
- protected boolean doScript(Script cmd, int retry) {
- return doScript(cmd, null, retry);
+ @Override
+ public void disconnected() {
+
+ }
+
+ protected boolean doScript(Script cmd) {
+ return doScript(cmd, null);
}
protected boolean doScript(Script cmd, OutputInterpreter interpreter) {
- return doScript(cmd, interpreter, ipmiRetryTimes);
+ return doScript(cmd, interpreter, ipmiRetryTimes, 1);
}
- protected boolean doScript(Script cmd, OutputInterpreter interpreter, int retry) {
+ protected boolean doScript(Script cmd, OutputInterpreter interpreter, int retry, long retryDelay) {
String res = null;
while (retry-- > 0) {
if (interpreter == null) {
@@ -301,7 +343,7 @@ protected boolean doScript(Script cmd, OutputInterpreter interpreter, int retry)
if (res != null && res.startsWith("Error: Unable to establish LAN")) {
s_logger.warn("IPMI script timeout(" + cmd.toString() + "), will retry " + retry + " times");
try {
- TimeUnit.SECONDS.sleep(1);
+ TimeUnit.SECONDS.sleep(retryDelay);
} catch (InterruptedException e) {
s_logger.debug("[ignored] interrupted while waiting to retry running script.");
}
@@ -317,94 +359,8 @@ protected boolean doScript(Script cmd, OutputInterpreter interpreter, int retry)
return false;
}
- @Override
- public boolean start() {
- return true;
- }
-
- @Override
- public boolean stop() {
- return true;
- }
-
- @Override
- public Type getType() {
- return com.cloud.host.Host.Type.Routing;
- }
-
- protected Map getHostVmStateReport() {
- Map states = new HashMap();
- if (hostId != null) {
- final List extends VMInstanceVO> vms = vmDao.listByHostId(hostId);
- for (VMInstanceVO vm : vms) {
- states.put(
- vm.getInstanceName(),
- new HostVmStateReportEntry(
- vm.getPowerState(), "host-" + hostId
- )
- );
- }
- }
- return states;
- }
-
- @Override
- public StartupCommand[] initialize() {
- StartupRoutingCommand cmd = new StartupRoutingCommand(0, 0, 0, 0, null, Hypervisor.HypervisorType.BareMetal,
- new HashMap());
-
- cmd.setDataCenter(_zone);
- cmd.setPod(_pod);
- cmd.setCluster(_cluster);
- cmd.setGuid(_uuid);
- cmd.setName(_ip);
- cmd.setPrivateIpAddress(_ip);
- cmd.setStorageIpAddress(_ip);
- cmd.setVersion(BareMetalResourceBase.class.getPackage().getImplementationVersion());
- cmd.setCpus((int) _cpuNum);
- cmd.setSpeed(_cpuCapacity);
- cmd.setMemory(_memCapacity);
- cmd.setPrivateMacAddress(_mac);
- cmd.setPublicMacAddress(_mac);
- return new StartupCommand[] { cmd };
- }
-
- private boolean ipmiPing() {
- return doScript(_pingCommand);
- }
-
- @Override
- public PingCommand getCurrentStatus(long id) {
- try {
- if (!ipmiPing()) {
- Thread.sleep(1000);
- if (!ipmiPing()) {
- s_logger.warn("Cannot ping ipmi nic " + _ip);
- return null;
- }
- }
- } catch (Exception e) {
- s_logger.debug("Cannot ping ipmi nic " + _ip, e);
- return null;
- }
-
- return new PingRoutingCommand(getType(), id, null);
-
- /*
- if (hostId != null) {
- final List extends VMInstanceVO> vms = vmDao.listByHostId(hostId);
- if (vms.isEmpty()) {
- return new PingRoutingCommand(getType(), id, null);
- } else {
- VMInstanceVO vm = vms.get(0);
- SecurityGroupHttpClient client = new SecurityGroupHttpClient();
- HashMap> nwGrpStates = client.sync(vm.getInstanceName(), vm.getId(), vm.getPrivateIpAddress());
- return new PingRoutingWithNwGroupsCommand(getType(), id, null, nwGrpStates);
- }
- } else {
- return new PingRoutingCommand(getType(), id, null);
- }
- */
+ protected boolean ipmiPing() {
+ return doScript(_pingCommand, null, ipmiRetryTimes, ipmiRetryDelay);
}
protected Answer execute(IpmISetBootDevCommand cmd) {
@@ -463,40 +419,16 @@ protected Answer execute(SecurityGroupRulesCmd cmd) {
return hc.call(cmd.getGuestIp(), cmd);
}
- @Override
- public Answer executeRequest(Command cmd) {
- try {
- if (cmd instanceof ReadyCommand) {
- return execute((ReadyCommand) cmd);
- } else if (cmd instanceof StartCommand) {
- return execute((StartCommand) cmd);
- } else if (cmd instanceof StopCommand) {
- return execute((StopCommand) cmd);
- } else if (cmd instanceof RebootCommand) {
- return execute((RebootCommand) cmd);
- } else if (cmd instanceof IpmISetBootDevCommand) {
- return execute((IpmISetBootDevCommand) cmd);
- } else if (cmd instanceof MaintainCommand) {
- return execute((MaintainCommand) cmd);
- } else if (cmd instanceof PrepareForMigrationCommand) {
- return execute((PrepareForMigrationCommand) cmd);
- } else if (cmd instanceof MigrateCommand) {
- return execute((MigrateCommand) cmd);
- } else if (cmd instanceof CheckVirtualMachineCommand) {
- return execute((CheckVirtualMachineCommand) cmd);
- } else if (cmd instanceof IpmiBootorResetCommand) {
- return execute((IpmiBootorResetCommand) cmd);
- } else if (cmd instanceof SecurityGroupRulesCmd) {
- return execute((SecurityGroupRulesCmd) cmd);
- } else if (cmd instanceof CheckNetworkCommand) {
- return execute((CheckNetworkCommand) cmd);
- } else {
- return Answer.createUnsupportedCommandAnswer(cmd);
- }
- } catch (Throwable t) {
- s_logger.debug(t.getMessage(), t);
- return new Answer(cmd, false, t.getMessage());
- }
+ protected Answer execute(DestroyCommand cmd) {
+ return new Answer(cmd, true, "Success");
+ }
+
+ protected PlugNicAnswer execute(PlugNicCommand cmd) {
+ return new PlugNicAnswer(cmd, false, "Adding NIC not suppored");
+ }
+
+ protected UnPlugNicAnswer execute(UnPlugNicCommand cmd) {
+ return new UnPlugNicAnswer(cmd, false, "Adding NIC not suppored");
}
protected boolean isPowerOn(String str) {
@@ -512,10 +444,10 @@ protected boolean isPowerOn(String str) {
protected RebootAnswer execute(final RebootCommand cmd) {
String infoStr = "Command not supported in present state";
OutputInterpreter.AllLinesParser interpreter = new OutputInterpreter.AllLinesParser();
- if (!doScript(_rebootCommand, interpreter, 10)) {
+ if (!doScript(_rebootCommand, interpreter, 10, 1)) {
if (interpreter.getLines().contains(infoStr)) {
// try again, this error should be temporary
- if (!doScript(_rebootCommand, interpreter, 10)) {
+ if (!doScript(_rebootCommand, interpreter, 10, 1)) {
return new RebootAnswer(cmd, "IPMI reboot failed", false);
}
} else {
@@ -632,11 +564,6 @@ protected ReadyAnswer execute(ReadyCommand cmd) {
return new ReadyAnswer(cmd);
}
- @Override
- public void disconnected() {
-
- }
-
@Override
public IAgentControl getAgentControl() {
return _agentControl;
@@ -647,4 +574,60 @@ public void setAgentControl(IAgentControl agentControl) {
_agentControl = agentControl;
}
+ @Override
+ public Answer executeRequest(Command cmd) {
+ try {
+ if (cmd instanceof ReadyCommand) {
+ return execute((ReadyCommand) cmd);
+ } else if (cmd instanceof StartCommand) {
+ return execute((StartCommand) cmd);
+ } else if (cmd instanceof StopCommand) {
+ return execute((StopCommand) cmd);
+ } else if (cmd instanceof RebootCommand) {
+ return execute((RebootCommand) cmd);
+ } else if (cmd instanceof IpmISetBootDevCommand) {
+ return execute((IpmISetBootDevCommand) cmd);
+ } else if (cmd instanceof MaintainCommand) {
+ return execute((MaintainCommand) cmd);
+ } else if (cmd instanceof PrepareForMigrationCommand) {
+ return execute((PrepareForMigrationCommand) cmd);
+ } else if (cmd instanceof MigrateCommand) {
+ return execute((MigrateCommand) cmd);
+ } else if (cmd instanceof CheckVirtualMachineCommand) {
+ return execute((CheckVirtualMachineCommand) cmd);
+ } else if (cmd instanceof IpmiBootorResetCommand) {
+ return execute((IpmiBootorResetCommand) cmd);
+ } else if (cmd instanceof SecurityGroupRulesCmd) {
+ return execute((SecurityGroupRulesCmd) cmd);
+ } else if (cmd instanceof CheckNetworkCommand) {
+ return execute((CheckNetworkCommand) cmd);
+ } else if (cmd instanceof DestroyCommand) {
+ return execute((DestroyCommand) cmd);
+ } else if (cmd instanceof PlugNicCommand) {
+ return execute((PlugNicCommand) cmd);
+ } else if (cmd instanceof UnPlugNicCommand) {
+ return execute((UnPlugNicCommand) cmd);
+ } else {
+ return Answer.createUnsupportedCommandAnswer(cmd);
+ }
+ } catch (Throwable t) {
+ s_logger.debug(t.getMessage(), t);
+ return new Answer(cmd, false, t.getMessage());
+ }
+ }
+
+ @Override
+ public PingCommand getCurrentStatus(long id) {
+ try {
+ if (!ipmiPing()) {
+ s_logger.warn("Cannot ping ipmi nic " + _ip);
+ return null;
+ }
+ } catch (Exception e) {
+ s_logger.debug("Cannot ping ipmi nic " + _ip, e);
+ return null;
+ }
+
+ return new PingRoutingCommand(getType(), id, null);
+ }
}
diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java
index 71e7ae766dc0..62fdf553edea 100644
--- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java
+++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java
@@ -162,6 +162,7 @@ protected PreparePxeServerAnswer execute(PreparePxeServerCommand cmd) {
if (!SSHCmdHelper.sshExecuteCmd(sshConnection, script)) {
return new PreparePxeServerAnswer(cmd, "prepare PING at " + _ip + " failed, command:" + script);
}
+
s_logger.debug("Prepare Ping PXE server successfully");
return new PreparePxeServerAnswer(cmd);
@@ -190,6 +191,7 @@ protected Answer execute(PrepareCreateTemplateCommand cmd) {
if (!SSHCmdHelper.sshExecuteCmd(sshConnection, script)) {
return new Answer(cmd, false, "prepare for creating template failed, command:" + script);
}
+
s_logger.debug("Prepare for creating template successfully");
return new Answer(cmd, true, "Success");
diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPxeElement.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPxeElement.java
index 17ec90210163..724bed768955 100644
--- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPxeElement.java
+++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPxeElement.java
@@ -20,6 +20,7 @@
import com.cloud.baremetal.database.BaremetalPxeVO;
import com.cloud.baremetal.manager.BaremetalVlanManager;
+import com.cloud.baremetal.manager.VlanType;
import com.cloud.dc.DataCenter;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.Pod;
@@ -28,12 +29,15 @@
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.ResourceUnavailableException;
+import com.cloud.host.HostVO;
+import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.network.Network;
import com.cloud.network.Network.Capability;
import com.cloud.network.Network.GuestType;
import com.cloud.network.Network.Provider;
import com.cloud.network.Network.Service;
+import com.cloud.network.Networks;
import com.cloud.network.Networks.TrafficType;
import com.cloud.network.PhysicalNetworkServiceProvider;
import com.cloud.network.element.NetworkElement;
@@ -72,6 +76,8 @@ public class BaremetalPxeElement extends AdapterBase implements NetworkElement {
BaremetalVlanManager vlanMgr;
@Inject
DataCenterDao zoneDao;
+ @Inject
+ HostDao hostDao;
static {
Capability cap = new Capability(BaremetalPxeManager.BAREMETAL_PXE_CAPABILITY);
@@ -146,7 +152,10 @@ public boolean prepare(Network network, NicProfile nic, VirtualMachineProfile vm
}
private void prepareVlan(Network network, DeployDestination dest) {
- vlanMgr.prepareVlan(network, dest);
+
+ String macAddress = dest.getHost().getPrivateMacAddress();
+ int vlan = Integer.parseInt(Networks.BroadcastDomainType.getValue(network.getBroadcastUri()));
+ vlanMgr.prepareVlan(vlan, macAddress, VlanType.UNTAGGED);
}
@Override
@@ -164,7 +173,11 @@ public boolean release(Network network, NicProfile nic, VirtualMachineProfile vm
}
private void releaseVlan(Network network, VirtualMachineProfile vm) {
- vlanMgr.releaseVlan(network, vm);
+
+ HostVO host = hostDao.findById(vm.getVirtualMachine().getHostId());
+ int vlan = Integer.parseInt(Networks.BroadcastDomainType.getValue(network.getBroadcastUri()));
+
+ vlanMgr.releaseVlan(vlan, host.getPrivateMacAddress(), VlanType.UNTAGGED);
}
@Override
diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalVlanStruct.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalVlanStruct.java
index 32d9b33a3448..cc060de0f989 100644
--- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalVlanStruct.java
+++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalVlanStruct.java
@@ -22,6 +22,8 @@
// Automatically generated by addcopyright.py at 04/03/2012
package com.cloud.baremetal.networkservice;
+import com.cloud.baremetal.manager.VlanType;
+
/**
* Created by frank on 9/2/14.
*/
@@ -33,6 +35,8 @@ public class BaremetalVlanStruct {
private String hostMac;
private String port;
private int vlan;
+ private VlanType type;
+ private boolean removeAll;
public String getSwitchType() {
return switchType;
@@ -89,4 +93,20 @@ public int getVlan() {
public void setVlan(int vlan) {
this.vlan = vlan;
}
+
+ public void setVlanType(VlanType type){
+ this.type = type;
+ }
+
+ public VlanType getVlanType(){
+ return type;
+ }
+
+ public boolean isRemoveAll() {
+ return removeAll;
+ }
+
+ public void setRemoveAll(boolean removeAll) {
+ this.removeAll = removeAll;
+ }
}
diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BrocadeFastIronBaremetalSwitchBackend.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BrocadeFastIronBaremetalSwitchBackend.java
new file mode 100644
index 000000000000..ebcc88ba0883
--- /dev/null
+++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BrocadeFastIronBaremetalSwitchBackend.java
@@ -0,0 +1,204 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+package com.cloud.baremetal.networkservice;
+
+import com.cloud.baremetal.manager.VlanType;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.jcraft.jsch.Channel;
+import com.jcraft.jsch.JSch;
+import com.jcraft.jsch.JSchException;
+import com.jcraft.jsch.Session;
+import org.apache.log4j.Logger;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.UnknownHostException;
+
+public class BrocadeFastIronBaremetalSwitchBackend implements BaremetalSwitchBackend {
+
+ private static final Logger s_logger = Logger.getLogger(BrocadeFastIronBaremetalSwitchBackend.class);
+ public static final String TYPE = "Brocade";
+
+ @Override
+ public String getSwitchBackendType() {
+ return TYPE;
+ }
+
+ @Override
+ public void prepareVlan(BaremetalVlanStruct struct) {
+ try {
+ BrocadeManager bm = new BrocadeManager(struct.getSwitchIp(), struct.getSwitchUsername(), struct.getSwitchPassword());
+ bm.assignVlanToPort(struct.getPort(), struct.getVlan(), struct.getVlanType());
+ } catch (InterruptedException | JSchException | IOException e) {
+ s_logger.warn("Error assigning VLAN to PORT", e);
+ throw new CloudRuntimeException(e);
+ }
+ }
+
+ @Override
+ public void removePortFromVlan(BaremetalVlanStruct struct) {
+ try {
+ BrocadeManager bm = new BrocadeManager(struct.getSwitchIp(), struct.getSwitchUsername(), struct.getSwitchPassword());
+ bm.removePortFromVlan(struct.getPort(), struct.getVlan(), struct.getVlanType());
+ } catch (InterruptedException | JSchException | IOException e) {
+ s_logger.warn("Error removing VLAN", e);
+ throw new CloudRuntimeException(e);
+ }
+ }
+
+ private class BrocadeManager {
+ String user;
+ String password;
+ String ip;
+ int port;
+
+ public BrocadeManager(String ip, String user, String password) throws UnknownHostException {
+ this.user = user;
+ this.password = password;
+ this.ip = ip;
+ this.port = 22;
+
+ }
+
+ public void assignVlanToPort(String port, int vlanId, VlanType vlanType) throws IOException, JSchException, InterruptedException {
+
+ String[] dualModeCmds = {
+ "en\n",
+ this.password + "\n",
+ "config t\n",
+ "int e " + port + "\n",
+ "dual-mode " + Integer.toString(vlanId) + "\n",
+ "end\n",
+ "exit\n",
+ "exit\n"
+ };
+
+ String[] tagCommands = {
+ "en\n",
+ this.password + "\n",
+ "config t\n",
+ "vlan " + Integer.toString(vlanId) + "\n",
+ "tagged e " + port + "\n",
+ "end\n",
+ "exit\n",
+ "exit\n"
+ };
+
+ executeCommands(tagCommands);
+
+ //If it is a untagged VLAN, change the interface to dual mode and add it as a default VLAN
+ if (vlanType.equals(VlanType.UNTAGGED)) {
+ executeCommands(dualModeCmds);
+ }
+
+ // TODO: Check if vlan assignement was successful
+ }
+
+ public void removePortFromVlan(String port, int vlanId, VlanType vlanType) throws JSchException, InterruptedException {
+
+ String[] dualModeCmds = {
+ "en\n",
+ this.password + "\n",
+ "config t\n",
+ "int e " + port + "\n",
+ "no dual-mode " + Integer.toString(vlanId) + "\n",
+ "end\n",
+ "exit\n",
+ "exit\n"
+ };
+
+ String[] untagCmds = {
+ "en\n",
+ this.password + "\n",
+ "config t\n",
+ "vlan " + Integer.toString(vlanId) + "\n",
+ "no tagged " + " e " + port + "\n",
+ "end\n",
+ "exit\n",
+ "exit\n"
+ };
+
+ if(vlanType.equals(VlanType.UNTAGGED)){
+ executeCommands(dualModeCmds);
+ }
+ executeCommands(untagCmds);
+
+ // TODO: Check if vlan removal was successful
+ }
+
+ private void executeCommands(String[] cmds) throws JSchException, InterruptedException {
+
+ CommandInputStream cs = new CommandInputStream(cmds);
+
+ JSch jsch=new JSch();
+ Session session=jsch.getSession(user, ip, port);
+ session.setPassword(password);
+ session.setConfig("StrictHostKeyChecking", "no");
+
+ session.connect(300000);
+
+ Channel channel = session.openChannel("shell");
+ channel.setInputStream(cs);
+ channel.connect(3 * 300000);
+
+ while (!channel.isClosed()){
+ Thread.sleep(1000);
+ }
+ }
+ }
+
+ private class CommandInputStream extends InputStream {
+
+ private final String[] cmds;
+ private int curCmd;
+ private int curIdx;
+
+ CommandInputStream(String[] cmds) {
+ this.cmds = cmds;
+ this.curCmd = 0;
+ this.curIdx = 0;
+ }
+ @Override
+ public int read() throws IOException {
+
+ if (curCmd >= cmds.length)
+ return -1;
+
+
+ String cmd = cmds[curCmd];
+
+ char ch = cmd.charAt(curIdx);
+ curIdx += 1;
+
+ if (ch == '\n'){
+
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+
+ curCmd++;
+ curIdx = 0;
+ s_logger.info("[BrocadeSwitchCmd] " + cmd);
+ }
+
+ return (int)ch;
+ }
+ }
+}
diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/JuniperBaremetalSwitchBackend.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/JuniperBaremetalSwitchBackend.java
new file mode 100644
index 000000000000..cc8c7b0addc9
--- /dev/null
+++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/JuniperBaremetalSwitchBackend.java
@@ -0,0 +1,228 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// Automatically generated by addcopyright.py at 01/29/2013
+// Apache License, Version 2.0 (the "License"); you may not use this
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+
+package com.cloud.baremetal.networkservice;
+
+import com.cloud.baremetal.manager.BaremetalManagerImpl;
+import com.cloud.baremetal.manager.VlanType;
+import com.cloud.utils.exception.CloudRuntimeException;
+import net.juniper.netconf.Device;
+import net.juniper.netconf.NetconfException;
+import net.juniper.netconf.XML;
+import net.juniper.netconf.XMLBuilder;
+import org.apache.log4j.Logger;
+import org.w3c.dom.Document;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+import org.xml.sax.SAXException;
+
+import javax.xml.parsers.ParserConfigurationException;
+import javax.xml.xpath.XPath;
+import javax.xml.xpath.XPathConstants;
+import javax.xml.xpath.XPathExpression;
+import javax.xml.xpath.XPathExpressionException;
+import javax.xml.xpath.XPathFactory;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+public class JuniperBaremetalSwitchBackend implements BaremetalSwitchBackend {
+
+ private static final Logger s_logger = Logger.getLogger(JuniperBaremetalSwitchBackend.class);
+ public static final String TYPE = "Juniper";
+ public static final int NETCONF_PORT = 22;
+
+ @Override
+ public String getSwitchBackendType() {
+ return TYPE;
+ }
+
+ @Override
+ public synchronized void prepareVlan(BaremetalVlanStruct struct) {
+ JuniperDevice juniper = null;
+ CloudRuntimeException cloudRuntimeException = null;
+ try {
+ juniper = new JuniperDevice(struct.getSwitchIp(), NETCONF_PORT, struct.getSwitchUsername(), struct.getSwitchPassword());
+ juniper.addVlanToInterface(struct.getPort(), struct.getVlan(), struct.getVlanType());
+ } catch (ParserConfigurationException e) {
+ String mesg = "Invalid configuration to initiate netconf session to the backend switch";
+ s_logger.error(mesg, e);
+ cloudRuntimeException = new CloudRuntimeException(mesg, e);
+ } catch (SAXException | IOException | XPathExpressionException e) {
+ String mesg = "Unable to add VLAN to Port";
+ s_logger.error(mesg, e);
+ cloudRuntimeException = new CloudRuntimeException(mesg, e);
+ }
+ closeConnection(juniper, cloudRuntimeException);
+ }
+
+ @Override
+ public synchronized void removePortFromVlan(BaremetalVlanStruct struct) {
+ JuniperDevice juniper = null;
+ CloudRuntimeException cloudRuntimeException = null;
+ try {
+ juniper = new JuniperDevice(struct.getSwitchIp(), NETCONF_PORT, struct.getSwitchUsername(), struct.getSwitchPassword());
+ if (struct.isRemoveAll()) {
+ juniper.clearAllVlansFromInterface(struct.getPort());
+ } else {
+ juniper.removeVlanFromInterface(struct.getPort(), struct.getVlan(), struct.getVlanType());
+ }
+ } catch (ParserConfigurationException e) {
+ String mesg = "Invalid configuration to initiate netconf session to the backend switch";
+ s_logger.error(mesg, e);
+ cloudRuntimeException = new CloudRuntimeException(mesg, e);
+ } catch (SAXException | IOException e) {
+ String mesg = String.format("Unable to remove VLAN %d from Port: %s, type : %s", struct.getVlan(), struct.getPort(), struct.getVlanType());
+ s_logger.error(mesg, e);
+ cloudRuntimeException = new CloudRuntimeException(mesg, e);
+ } catch (XPathExpressionException e) {
+ e.printStackTrace();
+ }
+ closeConnection(juniper, cloudRuntimeException);
+ }
+
+ private void closeConnection(JuniperDevice juniper, CloudRuntimeException cloudRuntimeException) {
+ if(juniper != null) {
+ juniper.close();
+ }
+ if(cloudRuntimeException != null) {
+ throw cloudRuntimeException;
+ }
+ }
+
+ public class JuniperDevice {
+ Device device;
+
+ public JuniperDevice(String host, int port, String user, String password) throws ParserConfigurationException, NetconfException {
+ device = new Device(host, user, password, null, port);
+ try {
+ device.connect();
+ } catch (Exception e) {
+ s_logger.error("Error while connecting to the switch", e);
+ throw e;
+ }
+ }
+
+ protected void close() {
+ device.close();
+ }
+
+ public void addVlanToInterface(String interfaceName, int vlanId, VlanType vlanType) throws IOException, SAXException, XPathExpressionException, ParserConfigurationException {
+ String configTemplate = "set interfaces %s unit 0 family ethernet-switching vlan members %d\n" +
+ "set interfaces %s unit 0 family ethernet-switching interface-mode trunk\n";
+
+ if(vlanId == BaremetalManagerImpl.pxeVlan.value()) {
+ configTemplate += String.format("set protocols lldp interface %s enable\n", interfaceName);
+ } else {
+ configTemplate += String.format("delete protocols lldp interface %s\n", interfaceName);
+ }
+
+ if (vlanType.equals(VlanType.UNTAGGED)) {
+ configTemplate += String.format("set interfaces %s native-vlan-id %d", interfaceName, vlanId);
+ }
+
+ String config = String.format(configTemplate, interfaceName, vlanId, interfaceName);
+
+ loadAndCommitConfigs(config);
+ }
+
+ public void removeVlanFromInterface(String interfaceName, int vlanId, VlanType vlanType) throws SAXException, IOException {
+ String config = "";
+
+ if (vlanType.equals(VlanType.UNTAGGED)) {
+ config += String.format("delete interfaces %s native-vlan-id\n", interfaceName);
+ }
+
+ config += String.format("delete interfaces %s unit 0 family ethernet-switching vlan members %d\n", interfaceName, vlanId);
+
+ this.device.loadSetConfiguration(config);
+
+ XML candidateConfig = this.device.getCandidateConfig("" + interfaceName + "");
+
+ if (!candidateConfig.toString().contains("")) {
+ config += String.format("delete interfaces %s unit 0 family ethernet-switching", interfaceName);
+ }
+
+ loadAndCommitConfigs(config);
+ }
+
+ void clearAllVlansFromInterface(String interfaceName) throws IOException, SAXException, XPathExpressionException, ParserConfigurationException {
+ String config = String.format("delete interfaces %s native-vlan-id\n", interfaceName);
+
+ for (int vl : this.getInterfaceVlans(interfaceName)) {
+ if (vl > 1) {
+ config += String.format("delete interfaces %s unit 0 family ethernet-switching vlan members %d\n", interfaceName, vl);
+ }
+ }
+
+ config += String.format("delete interfaces %s unit 0 family ethernet-switching", interfaceName);
+
+ loadAndCommitConfigs(config);
+ }
+
+ private void loadAndCommitConfigs(String config) throws IOException, SAXException {
+ this.device.loadSetConfiguration(config);
+
+ try {
+ this.device.commit();
+ } catch (Exception e) {
+ if(device != null) {
+ s_logger.error(this.device.getLastRPCReply());
+ }
+ throw e;
+ }
+ }
+
+ private List getInterfaceVlans(String interfaceName) throws ParserConfigurationException, XPathExpressionException {
+ List interfaceVlans = new ArrayList<>();
+
+ XMLBuilder rpcBuilder = new XMLBuilder();
+ XML vlanQuery = rpcBuilder.createNewRPC("get-ethernet-switching-interface-information").append("interface-name", interfaceName + ".0");
+ XML out = getConfig(vlanQuery.toString());
+
+ assert out != null;
+
+ Document doc = out.getOwnerDocument();
+ XPathFactory xPathfactory = XPathFactory.newInstance();
+ XPath xpath = xPathfactory.newXPath();
+ XPathExpression expr = xpath.compile("//l2iff-interface-vlan-id");
+
+ NodeList nl = (NodeList) expr.evaluate(doc, XPathConstants.NODESET);
+ for (int i =0; i DEFAULT_TIMEOUT_SEC ? timeoutSec : DEFAULT_TIMEOUT_SEC;
+
+ }
+
+ private void signRequest(HttpRequest request) {
+
+ long timestamp = System.currentTimeMillis() / 1000;
+ Map oauthParams = new HashMap();
+
+ //oauthParams.put("realm", "");
+ oauthParams.put("oauth_version", "1.0");
+ oauthParams.put("oauth_signature_method", "PLAINTEXT");
+
+ oauthParams.put("oauth_nonce", UUID.randomUUID().toString().replaceAll("-", ""));
+ oauthParams.put("oauth_timestamp", Long.toString(timestamp));
+
+ oauthParams.put("oauth_consumer_key", conn.getConsumerKey());
+ oauthParams.put("oauth_token", conn.getKey());
+
+ String signature = "";
+ try {
+ signature = "&" + URLEncoder.encode(conn.getSecret(), ENCODING_UTF8);
+
+ oauthParams.put("oauth_signature", signature);
+
+ String oauthHeaderValue = buildOauthHeader(oauthParams);
+
+ request.setHeader(HTTP_HEADER_AUTHORIZATION, oauthHeaderValue);
+ } catch (UnsupportedEncodingException e) {
+ s_logger.warn(e.getMessage());
+ throw new CloudRuntimeException("Unable to sign request " + e.getMessage());
+ }
+ }
+
+ private static String buildOauthHeader(Map oauthParams) throws UnsupportedEncodingException {
+
+ StringBuilder header = new StringBuilder();
+ header.append("OAuth ");
+ header.append(" realm=\"\", ");
+
+ for (Map.Entry entry : oauthParams.entrySet()) {
+ header.append(String.format("%s=\"%s\", ", entry.getKey(), URLEncoder.encode(entry.getValue(), ENCODING_UTF8)));
+ }
+
+ int len = header.length();
+ header.delete(len - 2, len - 1);
+
+ return header.toString();
+ }
+
+ public String executeApiRequest(HttpRequest request) throws IOException {
+
+ CloseableHttpClient httpclient = HttpClientBuilder.create().build();
+ String response = null;
+
+ if (null == httpclient) {
+ throw new RuntimeException("Unable to create httpClient for request");
+ }
+
+ try {
+ if (request.getFirstHeader(HEADER_CONTENT_TYPE) == null) {
+ request.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_JSON);
+ }
+ request.setHeader(HEADER_ACCEPT, HEADER_VALUE_JSON);
+ request.setHeader(HEADER_ACCEPT_ENCODING, HEADER_VALUE_TEXT_PLAIN);
+
+ signRequest(request);
+
+ HttpHost target = new HttpHost(conn.getIp(), conn.getPort(), conn.getScheme());
+
+ HttpResponse httpResponse = httpclient.execute(target, request);
+
+ HttpEntity entity = httpResponse.getEntity();
+ StatusLine status = httpResponse.getStatusLine();
+
+ if (status.getStatusCode() != HttpStatus.SC_NO_CONTENT) {
+ response = EntityUtils.toString(entity);
+
+ assert response != null;
+
+ if (status.getStatusCode() >= HttpStatus.SC_BAD_REQUEST) {
+ // check if this is an error
+ String errMesg = "Error: Non successful response: " + request.getRequestLine() + response;
+ s_logger.warn(errMesg);
+ throw new CloudRuntimeException(errMesg);
+ }
+ }
+ } catch (IOException e) {
+ String errMesg = "Error while trying to get HTTP object: " + request.getRequestLine();
+ s_logger.warn(errMesg, e);
+ throw new CloudRuntimeException("Error while sending request. Error " + e.getMessage());
+ }
+
+ return response;
+ }
+
+ public MaasObject.MaasNode addMachine(MaasObject.AddMachineParameters addMachineParameters) throws IOException {
+
+ HttpPost addMachineReq = new HttpPost(getApiUrl("machines"));
+ addMachineReq.setEntity(new StringEntity(gson.toJson(addMachineParameters)));
+
+ List params = new ArrayList<>();
+ params.add(new BasicNameValuePair("architecture", addMachineParameters.getArch()));
+ params.add(new BasicNameValuePair("power_type", addMachineParameters.getPowerType()));
+ params.add(new BasicNameValuePair("mac_addresses", addMachineParameters.getMacAddress()));
+ params.add(new BasicNameValuePair("power_parameters_power_user", addMachineParameters.getPowerUser()));
+ params.add(new BasicNameValuePair("power_parameters_power_pass", addMachineParameters.getPowerPassword()));
+ params.add(new BasicNameValuePair("power_parameters_power_address", addMachineParameters.getPowerAddress()));
+ addMachineReq.setEntity(new UrlEncodedFormEntity(params, ENCODING_UTF8));
+ addMachineReq.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_FORM);
+
+ String response = executeApiRequest(addMachineReq);
+
+ MaasObject.MaasNode node = gson.fromJson(response, MaasObject.MaasNode.class);
+
+ return waitTillReady(node.systemId);
+ }
+
+ public boolean deleteMachine(String systemId) throws IOException {
+
+ HttpDelete deleteMachineReq = new HttpDelete(getApiUrl("machines", systemId));
+
+ executeApiRequest(deleteMachineReq);
+
+ s_logger.info("deleted MAAS machine");
+
+ return true;
+ }
+
+ public void allocateMachine(MaasObject.AllocateMachineParameters allocateMachineParameters) throws IOException {
+
+ String url = addOperationToApiUrl(getApiUrl("machines"), "allocate");
+ HttpPost allocateReq = new HttpPost(url);
+
+ List params = new ArrayList<>();
+ params.add(new BasicNameValuePair("system_id", allocateMachineParameters.getSystemId()));
+ allocateReq.setEntity(new UrlEncodedFormEntity(params, ENCODING_UTF8));
+ allocateReq.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_FORM);
+
+ executeApiRequest(allocateReq);
+ }
+
+ public void addTagToMachine(String systemId, String tagName) throws IOException {
+ createTagIfNotExist(tagName);
+ modifyTagsOnMachine(systemId, "add", tagName);
+ }
+
+ public void removeTagFromMachine(String systemId, String tagName) throws IOException {
+ modifyTagsOnMachine(systemId, "remove", tagName);
+ deleteTagIfNotUsed(tagName, "machines");
+ }
+
+ private void createTagIfNotExist(String tagName) throws IOException {
+ try {
+ // trying to see if tag exists or not
+ HttpGet req = new HttpGet(getApiUrl("tags", tagName));
+ executeApiRequest(req);
+ } catch (Exception e) {
+ // tag does not exist on MaaS server, create it now
+ HttpPost req = new HttpPost(getApiUrl("tags"));
+
+ List params = new ArrayList<>();
+ params.add(new BasicNameValuePair("name", tagName));
+ req.setEntity(new UrlEncodedFormEntity(params, ENCODING_UTF8));
+ req.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_FORM);
+
+ executeApiRequest(req);
+ }
+ }
+
+ private void deleteTagIfNotUsed(String tagName, String target) throws IOException {
+ // trying to see if tag is being used on any target
+ String response = executeApiRequest(new HttpGet(addOperationToApiUrl(getApiUrl("tags", tagName), target)));
+
+ List nodes = gson.fromJson(response, new TypeToken>(){}.getType());
+
+ if (nodes.size() == 0) {
+ // delete tag
+ executeApiRequest(new HttpDelete(getApiUrl("tags", tagName)));
+ }
+ }
+
+ private void modifyTagsOnMachine(String systemId, String action, String tagName) throws UnsupportedEncodingException, IOException {
+ if (action.equals("remove")) {
+ try {
+ // trying to see if tag exists or not
+ HttpGet req = new HttpGet(getApiUrl("tags", tagName));
+ executeApiRequest(req);
+ } catch (Exception e) {
+ // do not try to delete a tag from a machine if the tag doesn't exist!
+ return;
+ }
+ }
+
+ String url = addOperationToApiUrl(getApiUrl("tags", tagName), "update_nodes");
+ HttpPost req = new HttpPost(url);
+
+ List params = new ArrayList<>();
+ params.add(new BasicNameValuePair(action, systemId));
+ req.setEntity(new UrlEncodedFormEntity(params, ENCODING_UTF8));
+ req.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_FORM);
+
+ executeApiRequest(req);
+ }
+
+ public MaasObject.MaasNode deployMachine(String systemId, MaasObject.DeployMachineParameters deployMachineParameters) throws IOException {
+
+ String url = addOperationToApiUrl(getApiUrl("machines", systemId), "deploy");
+ HttpPost deployMachineReq = new HttpPost(url);
+
+ List params = new ArrayList<>();
+ params.add(new BasicNameValuePair("distro_series", deployMachineParameters.getDistroSeries()));
+ deployMachineReq.setEntity(new UrlEncodedFormEntity(params, ENCODING_UTF8));
+ deployMachineReq.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_FORM);
+
+ executeApiRequest(deployMachineReq);
+
+ return waitTillDeployed(systemId);
+ }
+
+ public MaasObject.MaasNode releaseMachine(String systemId, boolean eraseDisk, boolean fullErase) throws IOException {
+
+ String url = addOperationToApiUrl(getApiUrl("machines", systemId), "release");
+ HttpPost releaseMachineReq = new HttpPost(url);
+
+ List params = new ArrayList<>();
+ params.add(new BasicNameValuePair("erase", Boolean.toString(eraseDisk)));
+ params.add(new BasicNameValuePair("quick_erase", Boolean.toString(!fullErase)));
+ releaseMachineReq.setEntity(new UrlEncodedFormEntity(params, ENCODING_UTF8));
+ releaseMachineReq.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_FORM);
+
+ executeApiRequest(releaseMachineReq);
+
+ return waitTillReady(systemId);
+ }
+
+ public MaasObject.MaasNode getMaasNode(String systemId) throws IOException {
+
+ HttpGet maasNode = new HttpGet(getApiUrl("machines", systemId));
+ String response = executeApiRequest(maasNode);
+
+ return gson.fromJson(response, MaasObject.MaasNode.class);
+ }
+
+ public MaasObject.MaasNode getMaasNodeByMac(String macAddress) throws IOException {
+
+ for (MaasObject.MaasNode node : getMaasNodes()) {
+ if (node.bootInterface.macAddress.equals(macAddress.toLowerCase())) {
+ return node;
+ }
+ }
+
+ return null;
+ }
+
+ public List getMaasNodes() throws IOException {
+ return getMaasNodes(null);
+ }
+
+ public List getMaasNodes(String pool) throws IOException {
+ String url = getApiUrl("machines");
+
+ if (StringUtils.isNotEmpty(pool)) {
+ url += "?pool=" + pool;
+ }
+
+ HttpGet maasNodeReq = new HttpGet(url);
+
+ String response = executeApiRequest(maasNodeReq);
+
+ Type listType = new TypeToken>(){}.getType();
+ return gson.fromJson(response, listType);
+ }
+
+ public MaasObject.MaasNode waitTillReady(String systemId) throws IOException {
+
+ int to = this.timeout;
+ MaasObject.MaasNode maasNode = null;
+ do {
+ maasNode = getMaasNode(systemId);
+ try {
+ Thread.sleep(POLL_TIMEOUT_SEC*1000);
+ } catch (InterruptedException e) {
+ return null;
+ }
+ to -= POLL_TIMEOUT_SEC;
+ } while ((maasNode != null && !maasNode.statusName.equals(MaasObject.MaasState.Ready.toString())) && to>0);
+
+ if (maasNode == null || (!maasNode.statusName.equals(MaasObject.MaasState.Ready.toString()))) {
+ throw new CloudRuntimeException("Operation Timed out: Unable to add node to MAAS with SystemID " + systemId);
+ }
+
+ return maasNode;
+ }
+
+ private MaasObject.MaasNode waitTillDeployed(String systemId) throws IOException {
+
+ int to = this.timeout;
+ MaasObject.MaasNode maasNode = null;
+ do {
+ maasNode = getMaasNode(systemId);
+ try {
+ Thread.sleep(POLL_TIMEOUT_SEC*1000);
+ } catch (InterruptedException e) {
+ return null;
+ }
+ to-=POLL_TIMEOUT_SEC;
+ } while ((maasNode != null && !maasNode.statusName.equals(MaasObject.MaasState.Deployed.toString())) && to>0);
+
+ if (maasNode == null || (!maasNode.statusName.equals(MaasObject.MaasState.Deployed.toString()))) {
+ throw new CloudRuntimeException("Unable to deploy node to MAAS with SystemID " + systemId);
+ }
+
+ return maasNode;
+ }
+
+ public void setInterface(String systemId, int interfaceId, Integer linkId, Integer subnetId, boolean enableDhcp) throws IOException {
+ String url;
+ List params;
+
+ if (linkId != null) {
+ url = addOperationToApiUrl(
+ getApiUrl("nodes", systemId, "interfaces", Integer.toString(interfaceId)),
+ "unlink_subnet"
+ );
+
+ HttpPost unlinkReq = new HttpPost(url);
+ params = new ArrayList<>();
+ params.add(new BasicNameValuePair("id", Integer.toString(linkId)));
+ unlinkReq.setEntity(new UrlEncodedFormEntity(params, ENCODING_UTF8));
+ unlinkReq.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_FORM);
+ executeApiRequest(unlinkReq);
+ }
+
+ url = addOperationToApiUrl(
+ getApiUrl("nodes", systemId, "interfaces", Integer.toString(interfaceId)),
+ "link_subnet"
+ );
+
+ HttpPost linkReq = new HttpPost(url);
+ params = new ArrayList<>();
+ params.add(new BasicNameValuePair("subnet", Integer.toString(subnetId)));
+ params.add(new BasicNameValuePair("mode", enableDhcp ? MODE_DHCP : MODE_LINK_UP));
+ params.add(new BasicNameValuePair("force", "True"));
+ linkReq.setEntity(new UrlEncodedFormEntity(params, ENCODING_UTF8));
+ linkReq.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_FORM);
+ executeApiRequest(linkReq);
+ }
+
+ public MaasObject.MaasSubnet getDhcpSubnet() throws IOException {
+ HttpGet subnetReq = new HttpGet(getApiUrl("subnets"));
+ String response = executeApiRequest(subnetReq);
+
+ Type listType = new TypeToken>(){}.getType();
+ List subnets = gson.fromJson(response, listType);
+
+ for (MaasObject.MaasSubnet subnet : subnets) {
+ if(subnet.vlan.dhcpOn){
+ return subnet;
+ }
+ }
+ return null;
+ }
+
+ public MaasObject.MaasInterface createBondInterface(String systemId, List phyInterfaceIds) throws IOException {
+ String url = addOperationToApiUrl(getApiUrl("nodes", systemId, "interfaces"), "create_bond");
+ HttpPost createBondReq = new HttpPost(url);
+
+ List params = new ArrayList<>();
+ params.add(new BasicNameValuePair("system_id", systemId));
+ params.add(new BasicNameValuePair("name", "bond0"));
+ for (Integer phyId : phyInterfaceIds) {
+ params.add(new BasicNameValuePair("parents", Integer.toString(phyId)));
+ }
+
+ createBondReq.setEntity(new UrlEncodedFormEntity(params, ENCODING_UTF8));
+ createBondReq.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_FORM);
+
+ String resp = executeApiRequest(createBondReq);
+
+ return gson.fromJson(resp, MaasObject.MaasInterface.class);
+ }
+
+ public void updateInterfaceMac(String systemId, int interfaceId, String mac) throws IOException {
+ String url = getApiUrl("nodes", systemId, "interfaces", Integer.toString(interfaceId));
+ List params = new ArrayList<>();
+ params.add(new BasicNameValuePair("mac_address", mac));
+ HttpPut updateMacReq = new HttpPut(url);
+
+ updateMacReq.setEntity(new UrlEncodedFormEntity(params, ENCODING_UTF8));
+ updateMacReq.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_FORM);
+
+ executeApiRequest(updateMacReq);
+ s_logger.debug("updated interface mac on " + systemId + " to " + mac);
+ }
+
+ public void updateHostname(String systemId, String newHostName) throws IOException {
+ String url = getApiUrl("machines", systemId);
+ HttpPut updateHostnameReq = new HttpPut(url);
+ MaasObject.UpdateHostnameParams params = new MaasObject.UpdateHostnameParams(newHostName);
+ updateHostnameReq.setEntity(new StringEntity(gson.toJson(params)));
+
+ executeApiRequest(updateHostnameReq);
+
+ }
+
+ private List getRackControllers() throws IOException {
+ String url = getApiUrl("rackcontrollers");
+ HttpGet req = new HttpGet(url);
+ String resp = executeApiRequest(req);
+
+ Type listType = new TypeToken>(){}.getType();
+ return gson.fromJson(resp, listType);
+ }
+
+ public List listImages() throws IOException {
+ List rc = getRackControllers();
+ if (rc != null && rc.size() > 0) {
+ //pick the first Rack Controller for now
+ String rcSystemId = rc.get(0).systemId;
+ String url = addOperationToApiUrl(getApiUrl("rackcontrollers", rcSystemId), "list_boot_images");
+ HttpGet listImgReq = new HttpGet(url);
+
+ String resp = executeApiRequest(listImgReq);
+ MaasObject.ListImagesResponse imgResp = gson.fromJson(resp, MaasObject.ListImagesResponse.class);
+ return imgResp.images;
+ }
+ return null;
+ }
+
+ private String getApiUrl(String... args) {
+
+ ArrayList urlList = new ArrayList(Arrays.asList(args));
+
+ urlList.add(0, API_PREFIX);
+ urlList.add(urlList.size(), "");
+ return StringUtils.join(urlList, "/");
+
+ }
+
+ private String addOperationToApiUrl(String url, String op) {
+ return url + "?op=" + op;
+ }
+}
diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasHostListner.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasHostListner.java
new file mode 100644
index 000000000000..8255a839628e
--- /dev/null
+++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasHostListner.java
@@ -0,0 +1,89 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.compute.maas;
+
+import com.cloud.agent.Listener;
+import com.cloud.agent.api.AgentControlAnswer;
+import com.cloud.agent.api.AgentControlCommand;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.Command;
+import com.cloud.agent.api.StartupCommand;
+import com.cloud.exception.ConnectionException;
+import com.cloud.host.Host;
+import com.cloud.host.Status;
+
+public class MaasHostListner implements Listener {
+ MaasResourceProvider maasResource;
+
+ public MaasHostListner(MaasResourceProvider maasResource) {
+ this.maasResource = maasResource;
+ }
+ @Override
+ public boolean processAnswers(long agentId, long seq, Answer[] answers) {
+ return false;
+ }
+
+ @Override
+ public boolean processCommands(long agentId, long seq, Command[] commands) {
+ return false;
+ }
+
+ @Override
+ public AgentControlAnswer processControlCommand(long agentId, AgentControlCommand cmd) {
+ return null;
+ }
+
+ @Override
+ public void processHostAdded(long hostId) {
+ maasResource.updateHostAddedDetails(hostId);
+ }
+
+ @Override
+ public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException {
+
+ }
+
+ @Override
+ public boolean processDisconnect(long agentId, Status state) {
+ return false;
+ }
+
+ @Override
+ public void processHostAboutToBeRemoved(long hostId) {
+
+ }
+
+ @Override
+ public void processHostRemoved(long hostId, long clusterId) {
+
+ }
+
+ @Override
+ public boolean isRecurring() {
+ return false;
+ }
+
+ @Override
+ public int getTimeout() {
+ return 0;
+ }
+
+ @Override
+ public boolean processTimeout(long agentId, long seq) {
+ return false;
+ }
+}
diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasManager.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasManager.java
new file mode 100644
index 000000000000..8b8acd2c00c6
--- /dev/null
+++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasManager.java
@@ -0,0 +1,40 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// Automatically generated by addcopyright.py at 01/29/2013
+// Apache License, Version 2.0 (the "License"); you may not use this
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// Automatically generated by addcopyright.py at 04/03/2012
+package org.apache.cloudstack.compute.maas;
+
+import java.io.IOException;
+import java.util.List;
+
+import javax.naming.ConfigurationException;
+
+import org.apache.cloudstack.compute.maas.api.ListMaasServiceOfferingsCmd;
+
+import com.cloud.utils.component.Manager;
+import com.cloud.utils.component.PluggableService;
+
+public interface MaasManager extends PluggableService, Manager {
+
+ MaasApiClient getMaasApiClient(long clusterId) throws ConfigurationException;
+
+ List listMaasServiceOfferings(ListMaasServiceOfferingsCmd cmd) throws ConfigurationException, IOException;
+}
diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasManagerImpl.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasManagerImpl.java
new file mode 100644
index 000000000000..fac8a203f208
--- /dev/null
+++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasManagerImpl.java
@@ -0,0 +1,287 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// Automatically generated by addcopyright.py at 01/29/2013
+// Apache License, Version 2.0 (the "License"); you may not use this
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// Automatically generated by addcopyright.py at 04/03/2012
+package org.apache.cloudstack.compute.maas;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import javax.inject.Inject;
+import javax.naming.ConfigurationException;
+
+import com.cloud.agent.AgentManager;
+
+import com.cloud.api.query.dao.HostJoinDao;
+import com.cloud.api.query.dao.UserVmJoinDao;
+import com.cloud.api.query.vo.HostJoinVO;
+import com.cloud.api.query.vo.UserVmJoinVO;
+import com.cloud.configuration.Config;
+import com.cloud.dc.ClusterDetailsDao;
+import com.cloud.dc.DataCenterVO;
+import com.cloud.dc.dao.DataCenterDao;
+import com.cloud.utils.crypt.DBEncryptionUtil;
+import com.cloud.vm.VirtualMachine;
+import org.apache.cloudstack.compute.maas.api.ListMaasServiceOfferingsCmd;
+import org.apache.cloudstack.context.CallContext;
+import org.apache.cloudstack.framework.config.ConfigKey;
+import org.apache.cloudstack.framework.config.Configurable;
+import org.apache.log4j.Logger;
+
+import com.cloud.api.query.dao.ServiceOfferingJoinDao;
+import com.cloud.api.query.vo.ServiceOfferingJoinVO;
+import com.cloud.user.AccountManager;
+import com.cloud.utils.component.ManagerBase;
+import com.cloud.utils.db.SearchBuilder;
+import com.cloud.utils.db.SearchCriteria;
+import com.cloud.utils.db.SearchCriteria.Op;
+import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
+
+public class MaasManagerImpl extends ManagerBase implements MaasManager, Configurable {
+
+ private static class OfferingStats {
+ int total = 0;
+ int available = 0;
+ int erasing = 0;
+ }
+
+ public static final Logger LOGGER = Logger.getLogger(MaasManagerImpl.class.getName());
+
+ @Inject private AgentManager _agentMgr;
+ @Inject private AccountManager accountMgr;
+ @Inject private DataCenterDao dcDao;
+ @Inject protected ConfigurationDao configDao;
+ @Inject private ClusterDetailsDao clusterDetailsDao;
+ @Inject private ServiceOfferingJoinDao svcOfferingJoinDao;
+ @Inject private HostJoinDao _hostJoinDao;
+ @Inject private UserVmJoinDao _userVmJoinDao;
+
+ @Override
+ public String getConfigComponentName() {
+ return MaasManager.class.getSimpleName();
+ }
+
+ @Override
+ public ConfigKey>[] getConfigKeys() {
+ return new ConfigKey>[] {};
+ }
+
+ @Override
+ public List> getCommands() {
+ List> cmds = new ArrayList>();
+ cmds.add(ListMaasServiceOfferingsCmd.class);
+ return cmds;
+ }
+
+ @Override
+ public List listMaasServiceOfferings(ListMaasServiceOfferingsCmd cmd) throws ConfigurationException, IOException {
+ List responses = new ArrayList<>();
+
+ SearchBuilder serviceOfferingJoinVOSearchBuilder = svcOfferingJoinDao.createSearchBuilder();
+ serviceOfferingJoinVOSearchBuilder.and("networkOfferingId", serviceOfferingJoinVOSearchBuilder.entity().getDeploymentPlanner(), Op.EQ);
+ SearchCriteria serviceOfferingJoinVOSearchCriteria = serviceOfferingJoinVOSearchBuilder.create();
+ serviceOfferingJoinVOSearchCriteria.addAnd("deploymentPlanner", SearchCriteria.Op.EQ, "BareMetalPlanner");
+ List offerings = svcOfferingJoinDao.search(serviceOfferingJoinVOSearchCriteria, null);
+
+ if (offerings == null || offerings.size() == 0) {
+ return responses;
+ }
+
+ List bareMetalHosts = new ArrayList<>();
+
+ if (cmd.getClusterId() != null) {
+ if (!accountMgr.isNormalUser(CallContext.current().getCallingAccount().getAccountId())) {
+ bareMetalHosts = getHostJoinVOSByClusterId(cmd);
+ }
+ } else {
+ bareMetalHosts = getHostJoinVOSByZoneId(cmd);
+ }
+
+ HashMap bareMetalHostsMap = new HashMap<>();
+
+ for (HostJoinVO host : bareMetalHosts) {
+ String key = createSpecKey(host.getTag(), host.getCpus(), host.getSpeed().intValue(), (host.getTotalMemory() / 1048576));
+ OfferingStats offeringStats = bareMetalHostsMap.get(key);
+
+ if(offeringStats == null) {
+ offeringStats = new OfferingStats();
+ bareMetalHostsMap.put(key, offeringStats);
+ }
+
+ offeringStats.total++;
+
+ UserVmJoinVO userVm = getVmByHostId(host.getId());
+ if(userVm == null) {
+ offeringStats.available++;
+ } else if(userVm.getState().equals(VirtualMachine.State.Expunging) || userVm.getState().equals(VirtualMachine.State.Destroyed)) {
+ offeringStats.erasing++;
+ }
+ }
+
+ offerings.forEach(svc -> {
+ String key = createSpecKey(svc.getHostTag(), svc.getCpu(), svc.getSpeed(), svc.getRamSize());
+
+ OfferingStats offeringStats = bareMetalHostsMap.get(key);
+ if(offeringStats == null) {
+ offeringStats = new OfferingStats();
+ }
+
+ MaasServiceOfferingsResponse response = new MaasServiceOfferingsResponse();
+ response.setObjectName("maasserviceoffering");;
+ response.setOfferingId(svc.getUuid());
+ response.setOfferingName(svc.getName());
+ response.setAvailable(offeringStats.available);
+ if (accountMgr.isRootAdmin(CallContext.current().getCallingAccount().getAccountId())) {
+ response.setTotal(offeringStats.total);
+ response.setErasing(offeringStats.erasing);
+ }
+
+ responses.add(response);
+ });
+ return responses;
+ }
+
+ private List getHostJoinVOSByClusterId(ListMaasServiceOfferingsCmd cmd) {
+ SearchBuilder hostJoinVOSearchBuilder = _hostJoinDao.createSearchBuilder();
+ hostJoinVOSearchBuilder.and("hypervisor_type", hostJoinVOSearchBuilder.entity().getHypervisorType(), Op.EQ);
+ hostJoinVOSearchBuilder.and("cluster_id", hostJoinVOSearchBuilder.entity().getClusterId(), Op.EQ);
+ SearchCriteria hostJoinVOSearchCriteria = hostJoinVOSearchBuilder.create();
+ hostJoinVOSearchCriteria.setParameters("hypervisor_type", "BareMetal");
+ hostJoinVOSearchCriteria.setParameters("cluster_id", cmd.getClusterId());
+ return _hostJoinDao.search(hostJoinVOSearchCriteria, null);
+ }
+
+ private List getHostJoinVOSByZoneId(ListMaasServiceOfferingsCmd cmd) {
+ List zoneIds = new ArrayList<>();
+ if(cmd.getZoneId() != null) {
+ zoneIds.add(cmd.getZoneId());
+ } else {
+ for(DataCenterVO dataCenterVO : dcDao.listAllZones()) {
+ zoneIds.add(dataCenterVO.getId());
+ }
+ }
+
+ SearchBuilder hostJoinVOSearchBuilder = _hostJoinDao.createSearchBuilder();
+ hostJoinVOSearchBuilder.and("hypervisor_type", hostJoinVOSearchBuilder.entity().getHypervisorType(), Op.EQ);
+ hostJoinVOSearchBuilder.and("data_center_id", hostJoinVOSearchBuilder.entity().getZoneId(), Op.IN);
+ SearchCriteria hostJoinVOSearchCriteria = hostJoinVOSearchBuilder.create();
+ hostJoinVOSearchCriteria.setParameters("hypervisor_type", "BareMetal");
+ hostJoinVOSearchCriteria.setParameters("data_center_id", zoneIds.toArray(new Object[zoneIds.size()]));
+ return _hostJoinDao.search(hostJoinVOSearchCriteria, null);
+ }
+
+ private UserVmJoinVO getVmByHostId(long hostId) {
+ SearchBuilder userVmJoinVOSearchBuilder = _userVmJoinDao.createSearchBuilder();
+ userVmJoinVOSearchBuilder.and("hypervisor_type", userVmJoinVOSearchBuilder.entity().getHypervisorType(), Op.EQ);
+ userVmJoinVOSearchBuilder.and().op("host_id", userVmJoinVOSearchBuilder.entity().getHostId(), Op.EQ);
+ userVmJoinVOSearchBuilder.or("last_host_id", userVmJoinVOSearchBuilder.entity().getLastHostId(), Op.EQ);
+ userVmJoinVOSearchBuilder.cp();
+ SearchCriteria userVmJoinVOSearchCriteria = userVmJoinVOSearchBuilder.create();
+ userVmJoinVOSearchCriteria.setParameters("hypervisor_type", "BareMetal");
+ userVmJoinVOSearchCriteria.setParameters("host_id", hostId);
+ userVmJoinVOSearchCriteria.setParameters("last_host_id", hostId);
+ return _userVmJoinDao.findOneBy(userVmJoinVOSearchCriteria);
+ }
+
+ @Override
+ public MaasApiClient getMaasApiClient(long clusterId) throws ConfigurationException {
+ Map clusterDetails = clusterDetailsDao.findDetails(clusterId);
+ String maasUrl = clusterDetails.get("baremetalMaasHost");
+ String maasApiKey = DBEncryptionUtil.decrypt(clusterDetails.get("baremetalMaaSKey"));
+
+ String[] maasAddress = maasUrl.split(":");
+ String maasScheme = null;
+ String maasIp = null;
+ Integer maasPort = -1;
+
+ try {
+ // scheme://ip_or_dns:port
+ if (maasAddress.length == 3) {
+ maasScheme = maasAddress[0];
+ maasIp = maasAddress[1].replace("/", "");
+ maasPort = Integer.parseInt(maasAddress[2].replace("/", ""));
+ }
+
+ // scheme://ip_or_dns OR ip_or_dns:port
+ else if (maasAddress.length == 2) {
+ if (maasAddress[0].equalsIgnoreCase("http") || maasAddress[0].equalsIgnoreCase("https")) {
+ maasScheme = maasAddress[0];
+ maasIp = maasAddress[1].replace("/", "");
+ } else {
+ maasIp = maasAddress[0].replace("/", "");
+ maasPort = Integer.parseInt(maasAddress[1].replace("/", ""));
+ }
+ }
+
+ // ip_or_dns
+ else if (maasAddress.length == 1) {
+ maasIp = maasAddress[0];
+ }
+
+ else {
+ throw new ConfigurationException(maasUrl + " is not a valid URL for MaaS server");
+ }
+ } catch (NumberFormatException e) {
+ if (maasAddress.length == 3) {
+ LOGGER.warn(maasAddress[2].replace("/", "") + " is not a valid port number", e);
+ } else if (maasAddress.length == 2) {
+ LOGGER.warn(maasAddress[1].replace("/", "") + " is not a valid port number", e);
+ }
+
+ throw e;
+ }
+
+ String[] maasSecrets = maasApiKey.split(":");
+
+ if (maasSecrets.length != 3) {
+ LOGGER.warn("MaaS API key is malformed");
+ throw new ConfigurationException("MaaS API key is malformed");
+ }
+
+ String maasConsumerKey = maasSecrets[0];
+ String maasKey = maasSecrets[1];
+ String maasSercret = maasSecrets[2];
+
+ int timeout = Integer.parseInt(configDao.getValue(Config.BaremetalProvisionDoneNotificationTimeout.key()));
+
+ return new MaasApiClient(maasScheme, maasIp, maasPort, maasKey, maasSercret, maasConsumerKey, timeout);
+ }
+
+ private String createSpecKey(String tags, int cpus, int speed, long memory) {
+ String key = String.format("%s,%s,%s", cpus, speed, memory);
+ if(tags != null && !tags.isEmpty()) {
+ String[] tagArray = tags.split(",");
+ key += String.join(",",
+ Arrays.stream(tagArray).
+ filter(tag -> tag.startsWith("bm")).
+ sorted(Comparator.naturalOrder()).
+ collect(Collectors.toList()));
+ }
+ return key;
+ }
+
+}
diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasObject.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasObject.java
new file mode 100644
index 000000000000..63790566cadc
--- /dev/null
+++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasObject.java
@@ -0,0 +1,342 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.compute.maas;
+import com.google.gson.annotations.SerializedName;
+
+import java.util.List;
+
+public class MaasObject {
+
+ private static final String ARCH_AMD64 = "amd64";
+ private static final String POWER_TYPE_IPMI = "ipmi";
+
+ enum MaasState {
+ Ready, Allocated, Deploying, Deployed;
+ }
+
+ enum InterfaceType {
+ physical, bond;
+ }
+
+ public static class MaasConnection {
+
+ public String scheme;
+ public String ip;
+ public int port;
+ public String key;
+ public String secret;
+ public String consumerKey;
+
+ public MaasConnection(String scheme, String ip, int port, String key, String secret, String consumerKey) {
+ this.scheme = scheme;
+ this.ip = ip;
+ this.port = port;
+ this.key = key;
+ this.secret = secret;
+ this.consumerKey = consumerKey;
+ }
+
+ public String getScheme() {
+ return scheme;
+ }
+
+ public String getIp() {
+ return ip;
+ }
+
+ public int getPort() {
+ return port;
+ }
+
+ public String getKey() {
+ return key;
+ }
+
+ public String getSecret() {
+ return secret;
+ }
+
+ public String getConsumerKey() {
+ return consumerKey;
+ }
+ }
+
+ public class MaasNode {
+
+ public String hostname;
+
+ @SerializedName("power_state")
+ public String powerState;
+
+ @SerializedName("power_type")
+ public String powerType;
+
+ @SerializedName("system_id")
+ public String systemId;
+
+ @SerializedName("status_name")
+ public String statusName;
+
+ @SerializedName("cpu_count")
+ public Integer cpuCount;
+
+ @SerializedName("cpu_speed")
+ public Long cpuSpeed;
+
+ @SerializedName("memory")
+ public Long memory;
+
+ @SerializedName("storage")
+ public Double storage;
+
+ @SerializedName("boot_interface")
+ public MaasInterface bootInterface;
+
+ @SerializedName("interface_set")
+ public MaasInterface[] interfaceSet;
+
+ public String getSystemId() {
+ return systemId;
+ }
+
+ public String getStatusName() {
+ return statusName;
+ }
+
+ public Integer getCpuCount() {
+ return cpuCount;
+ }
+
+ public Long getCpuSpeed() {
+ return cpuSpeed;
+ }
+
+ public Long getMemory() {
+ return memory;
+ }
+
+ public Double getStorage() {
+ return storage;
+ }
+
+ public MaasInterface getBootInterface() {
+ return bootInterface;
+ }
+
+ public MaasInterface[] getInterfaceSet() {
+ return interfaceSet;
+ }
+ }
+
+ public class MaasInterface {
+
+ public int id;
+
+ public String name;
+
+ public String type;
+
+ public MaasLink[] links;
+
+ public boolean enabled;
+
+ @SerializedName("mac_address")
+ public String macAddress;
+ }
+
+ public class MaasLink {
+ public int id;
+ public String mode;
+ public MaasSubnet subnet;
+ }
+
+ public class MaasSubnet {
+ public int id;
+ public String name;
+ public MaasVlan vlan;
+ }
+
+ public class MaasVlan {
+ public int id;
+
+ @SerializedName("dhcp_on")
+ public boolean dhcpOn;
+ }
+
+ public static class AddMachineParameters {
+
+ @SerializedName("mac_addresses") /* For now only one pxe mac address */
+ public String macAddress;
+
+ @SerializedName("power_type")
+ public String powerType;
+
+ @SerializedName("architecture")
+ public String arch;
+
+ @SerializedName("power_parameters_power_user")
+ public String powerUser;
+
+ @SerializedName("power_parameters_power_pass")
+ public String powerPassword;
+
+ @SerializedName("power_parameters_power_address")
+ public String powerAddress;
+
+ public String hostname;
+
+ public AddMachineParameters(String powerAddress, String macAddress, String powerUser, String powerPassword, String hostname) {
+ this.powerAddress = powerAddress;
+ this.macAddress = macAddress;
+ this.powerUser = powerUser;
+ this.powerPassword = powerPassword;
+ this.hostname = hostname;
+ this.arch = ARCH_AMD64;
+ this.powerType = POWER_TYPE_IPMI;
+ }
+
+ public String getMacAddress() {
+ return macAddress;
+ }
+
+ public String getPowerType() {
+ return powerType;
+ }
+
+ public String getArch() {
+ return arch;
+ }
+
+ public String getPowerUser() {
+ return powerUser;
+ }
+
+ public String getPowerPassword() {
+ return powerPassword;
+ }
+
+ public String getPowerAddress() {
+ return powerAddress;
+ }
+
+ public String getHostname() {
+ return hostname;
+ }
+ }
+
+ public static class DeployMachineParameters{
+
+ @SerializedName("distro_series")
+ String distroSeries;
+
+ public DeployMachineParameters(String distroSeries) {
+ this.distroSeries = distroSeries;
+ }
+
+ public String getDistroSeries() {
+ return distroSeries;
+ }
+ }
+
+ public static class AllocateMachineParameters {
+
+ @SerializedName("system_id")
+ String systemId;
+
+ public AllocateMachineParameters(String systemId) {
+ this.systemId = systemId;
+ }
+
+ public String getSystemId() {
+ return systemId;
+ }
+ }
+
+ public static class UnlinkSubnetParameters {
+ Integer id;
+
+ public UnlinkSubnetParameters(Integer id) {
+ this.id = id;
+ }
+ }
+
+ public static class UpdateHostnameParams {
+
+ String hostname;
+
+ public UpdateHostnameParams(String hostname) {
+ this.hostname = hostname;
+ }
+ }
+
+ public static class LinkSubnetParameters {
+ String mode;
+ Integer subnet;
+
+ public LinkSubnetParameters(String mode, Integer subnet) {
+ this.mode = mode;
+ this.subnet = subnet;
+ }
+ }
+
+ public static class ReleaseMachineParameters {
+ Boolean erase;
+
+ @SerializedName("secure_erase")
+ Boolean secureErase;
+
+ @SerializedName("quick_erase")
+ Boolean quickErase;
+
+ public ReleaseMachineParameters(Boolean erase, Boolean secureErase, Boolean quickErase) {
+ this.erase = erase;
+ this.secureErase = secureErase;
+ this.quickErase = quickErase;
+ }
+ }
+
+ public static class CreateBondInterfaceParameters {
+ String name;
+
+ List parents;
+
+ @SerializedName("system_id")
+ String systemId;
+
+ public CreateBondInterfaceParameters(String name, List parents, String systemId) {
+ this.name = name;
+ this.parents = parents;
+ this.systemId = systemId;
+ }
+ }
+
+ public static class RackController {
+ @SerializedName("system_id")
+ String systemId;
+ }
+
+ public static class BootImage {
+ String name;
+ }
+
+ public static class ListImagesResponse {
+ List images;
+ }
+}
diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasResourceProvider.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasResourceProvider.java
new file mode 100644
index 000000000000..d68eee8c3b6b
--- /dev/null
+++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasResourceProvider.java
@@ -0,0 +1,684 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.compute.maas;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import javax.annotation.PostConstruct;
+import javax.inject.Inject;
+import javax.naming.ConfigurationException;
+
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.compute.maas.MaasObject.MaasInterface;
+import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.log4j.Logger;
+import org.springframework.beans.factory.annotation.Configurable;
+
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.PlugNicAnswer;
+import com.cloud.agent.api.PlugNicCommand;
+import com.cloud.agent.api.ReadyAnswer;
+import com.cloud.agent.api.ReadyCommand;
+import com.cloud.agent.api.StartAnswer;
+import com.cloud.agent.api.StartCommand;
+import com.cloud.agent.api.StartupCommand;
+import com.cloud.agent.api.StartupRoutingCommand;
+import com.cloud.agent.api.UnPlugNicAnswer;
+import com.cloud.agent.api.UnPlugNicCommand;
+import com.cloud.agent.api.baremetal.DestroyCommand;
+import com.cloud.agent.api.to.NicTO;
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.api.query.dao.UserVmJoinDao;
+import com.cloud.api.query.vo.UserVmJoinVO;
+import com.cloud.baremetal.database.BaremetalRctDao;
+import com.cloud.baremetal.database.BaremetalRctVO;
+import com.cloud.baremetal.manager.BareMetalResource;
+import com.cloud.baremetal.manager.BaremetalManagerImpl;
+import com.cloud.baremetal.manager.BaremetalRct;
+import com.cloud.baremetal.manager.BaremetalVlanManager;
+import com.cloud.baremetal.manager.VlanType;
+import com.cloud.baremetal.networkservice.BareMetalResourceBase;
+import com.cloud.host.DetailVO;
+import com.cloud.host.Host.Type;
+import com.cloud.host.HostVO;
+import com.cloud.host.dao.HostDao;
+import com.cloud.host.dao.HostDetailsDao;
+import com.cloud.hypervisor.Hypervisor;
+import com.cloud.network.Network;
+import com.cloud.network.Networks;
+import com.cloud.network.dao.NetworkDao;
+import com.cloud.network.dao.NetworkVO;
+import com.cloud.storage.VMTemplateVO;
+import com.cloud.storage.dao.VMTemplateDao;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.utils.script.OutputInterpreter;
+import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.dao.VMInstanceDao;
+import com.google.gson.Gson;
+
+@Configurable
+public class MaasResourceProvider extends BareMetalResourceBase implements BareMetalResource {
+
+ private static final Logger s_logger = Logger.getLogger(MaasResourceProvider.class);
+ private static final String MAAS_ID_KEY = "MaasSystemId";
+
+ private String maasUniqueId = null;
+ private MaasObject.MaasNode maasNode = null;
+ private MaasApiClient maasApi;
+
+ protected static ConfigurationDao configDao;
+ protected static VMInstanceDao vmDao;
+
+ private static BaremetalVlanManager vlanMgr;
+ private static NetworkDao networkDao;
+ private static HostDao hostDao;
+ private static VMTemplateDao templateDao;
+ private static HostDetailsDao hostDetailsDao;
+ private static MaasManager maasManager;
+ private static BaremetalRctDao rctDao;
+ private static AgentManager agentMgr;
+ private static UserVmJoinDao userVmJoinDao;
+
+ @Inject protected ConfigurationDao _configDao;
+ @Inject protected VMInstanceDao _vmDao;
+
+ @Inject private BaremetalVlanManager _vlanMgr;
+ @Inject private NetworkDao _networkDao;
+ @Inject private HostDao _hostDao;
+ @Inject private VMTemplateDao _templateDao;
+ @Inject private HostDetailsDao _hostDetailsDao;
+ @Inject private MaasManager _maasManager;
+ @Inject private BaremetalRctDao _rctDao;
+ @Inject private AgentManager _agentMgr;
+ @Inject private UserVmJoinDao _userVmJoinDao;
+ private MaasHostListner hostListner;
+
+ private Gson gson = new Gson();
+
+ @PostConstruct
+ void init() {
+ if (_configDao != null) {
+ configDao = _configDao;
+ }
+ if (_vmDao != null) {
+ vmDao = _vmDao;
+ }
+ if (_vlanMgr != null) {
+ vlanMgr = _vlanMgr;
+ }
+ if (_networkDao != null) {
+ networkDao = _networkDao;
+ }
+ if (_hostDao != null) {
+ hostDao = _hostDao;
+ }
+ if (_templateDao != null) {
+ templateDao = _templateDao;
+ }
+ if (_hostDetailsDao != null) {
+ hostDetailsDao = _hostDetailsDao;
+ }
+ if (_maasManager != null) {
+ maasManager = _maasManager;
+ }
+ if (_rctDao != null) {
+ rctDao = _rctDao;
+ }
+ if (_agentMgr != null) {
+ agentMgr = _agentMgr;
+ }
+ if (_userVmJoinDao != null) {
+ userVmJoinDao = _userVmJoinDao;
+ }
+ }
+
+ @Override
+ public boolean configure(String name, Map params) throws ConfigurationException {
+ ipmiIface = "lanplus";
+ configure(name, params, configDao, vmDao);
+
+ if (params.keySet().size() == 0) {
+ return true;
+ }
+
+ // MaaS Import Node
+ if (ApiConstants.BAREMETAL_MAAS_ACTION_IMPORT.equals((String) params.get(ApiConstants.BAREMETAL_MAAS_ACTION))) {
+ maasUniqueId = (String) params.get(ApiConstants.BAREMETAL_MAAS_NODE_ID);
+
+ if (maasUniqueId == null) {
+ throw new ConfigurationException("Unable to get the host unique id");
+ }
+ }
+
+ if (StringUtils.isNotEmpty((String) params.get("MaasSystemId")) && StringUtils.isEmpty(maasUniqueId)) {
+ maasUniqueId = (String) params.get("MaasSystemId");
+ }
+
+ if (configDao == null) {
+ return true;
+ }
+
+ maasApi = maasManager.getMaasApiClient(Long.parseLong(_cluster));
+ hostListner = new MaasHostListner(this);
+ agentMgr.registerForHostEvents(hostListner, true, false, true);
+
+ return true;
+ }
+
+ @Override
+ public Type getType() {
+ return com.cloud.host.Host.Type.Routing;
+ }
+
+ @Override
+ public StartupCommand[] initialize() {
+ StartupRoutingCommand cmd = new StartupRoutingCommand(0, 0, 0, 0, null, Hypervisor.HypervisorType.BareMetal,
+ new HashMap());
+
+ cmd.setDataCenter(_zone);
+ cmd.setPod(_pod);
+ cmd.setCluster(_cluster);
+ cmd.setGuid(_uuid);
+ cmd.setName(maasUniqueId);
+ cmd.setPrivateIpAddress(_ip);
+ cmd.setStorageIpAddress(_ip);
+ cmd.setVersion(BareMetalResourceBase.class.getPackage().getImplementationVersion());
+ cmd.setCpus((int) _cpuNum);
+ cmd.setSpeed(_cpuCapacity);
+ cmd.setMemory(_memCapacity);
+ cmd.setPrivateMacAddress(_mac);
+ cmd.setPublicMacAddress(_mac);
+ return new StartupCommand[] { cmd };
+ }
+
+ protected Answer execute(DestroyCommand cmd) {
+
+ try {
+ maasNode = maasApi.getMaasNode(maasNode.getSystemId());
+ assert maasNode != null;
+ } catch (IOException e) {
+ throw new CloudRuntimeException("Unable to get MAAS node", e);
+ }
+
+ try {
+ VirtualMachineTO vm = cmd.getVm();
+ VMInstanceVO vmvo = vmDao.findById(vm.getId());
+ vmvo.setHostId(hostId); //hostid is unset, set it here so we don't get NPE downstream
+
+ for (NicTO nic : vm.getNics()) {
+ Network nw = networkDao.findByUuid(nic.getNetworkUuid());
+ if (nw != null) {
+ int vlan = Integer.parseInt(Networks.BroadcastDomainType.getValue(nw.getBroadcastUri()));
+ releaseVlan(vlan, nic.isDefaultNic()? VlanType.UNTAGGED: VlanType.TAGGED, false);
+ }
+ }
+
+ if (!doScript(_setPxeBootCommand)) {
+ throw new CloudRuntimeException("Set " + _ip + " boot dev to PXE failed");
+ }
+
+ if (!doScript(_powerOffCommand)) {
+ throw new CloudRuntimeException("Unable to power off " + _ip);
+ }
+
+ if (BaremetalManagerImpl.pxeVlan.value() != null) {
+ prepareVlan(BaremetalManagerImpl.pxeVlan.value(), VlanType.UNTAGGED);
+ }
+
+ UserVmJoinVO uservm = userVmJoinDao.findById(vmvo.getId());
+
+ maasApi.removeTagFromMachine(maasNode.getSystemId(), "accountid_" + uservm.getAccountUuid());
+ maasApi.removeTagFromMachine(maasNode.getSystemId(), "domainid_" + uservm.getDomainUuid());
+
+ if (StringUtils.isNotEmpty(uservm.getProjectUuid())) {
+ maasApi.removeTagFromMachine(maasNode.getSystemId(), "projectid_" + uservm.getProjectUuid());
+ }
+
+ if (!maasNode.getStatusName().equals(MaasObject.MaasState.Ready.toString())){
+ Integer eraseStrategy = BaremetalManagerImpl.diskEraseOnDestroy.value();
+ boolean eraseDisk = eraseStrategy == 1 || eraseStrategy == 2;
+ boolean fullErase = eraseStrategy == 2;
+ maasApi.releaseMachine(maasNode.getSystemId(), eraseDisk, fullErase);
+ }
+
+ String hostname = "HOST-" + Long.toString(hostId);
+ maasApi.updateHostname(maasNode.getSystemId(), hostname);
+
+ } catch (IOException e) {
+ s_logger.warn("Unable to destroy the node on MAAS " + maasNode.getSystemId(), e);
+ //TODO: Move the node back to the right VLAN
+ //TODO: Do we move the node to Broken state? Do we make the status as alert on Cloudstack?
+ return new Answer(cmd, false, e.getMessage());
+ }
+
+ return new Answer(cmd, true, "Success");
+ }
+
+ protected StartAnswer execute(StartCommand cmd) {
+
+ VirtualMachineTO vm = cmd.getVirtualMachine();
+ VMInstanceVO vmvo = vmDao.findById(vm.getId());
+
+ if (vmvo == null) {
+ throw new CloudRuntimeException("Unable to find VM in the DB " + vm.getName());
+ }
+
+ OutputInterpreter.AllLinesParser interpreter = new OutputInterpreter.AllLinesParser();
+ if (!doScript(_getStatusCommand, interpreter)) {
+ return new StartAnswer(cmd, "Cannot get current power status of " + getName());
+ }
+
+ NicTO defaultNic = getDefaultNic(vm);
+ if (defaultNic == null) {
+ throw new CloudRuntimeException("Unable to get the default nic for VM " + vm.getId());
+ }
+
+ HostVO host = hostDao.findById(vmvo.getHostId());
+ if (host == null) {
+ throw new CloudRuntimeException("Unable to get the host for VM " + vm.getId());
+ }
+
+ //find the switch which is responsible for this mac
+ Network nw = networkDao.findByUuid(defaultNic.getNetworkUuid());
+ if (nw == null) {
+ throw new CloudRuntimeException("Unable to get the network for VM " + vm.getId() + " With network ID " + defaultNic.getNetworkUuid());
+ }
+ int vlan = Integer.parseInt(Networks.BroadcastDomainType.getValue(nw.getBroadcastUri()));
+
+ try {
+ maasNode = maasApi.getMaasNode(maasNode.getSystemId());
+ assert maasNode != null;
+ } catch (IOException e) {
+ throw new CloudRuntimeException("Unable to get info from maas node");
+ }
+
+ //if the host is already deployed, just start it
+ if (vmvo.getLastHostId() != null ) {
+ if (vmvo.getLastHostId().equals(hostId) && maasNode.getStatusName().equals(MaasObject.MaasState.Deployed.toString())) {
+ if (!doScript(_bootOrRebootCommand)) {
+ throw new CloudRuntimeException("IPMI reboot failed for host " + _ip);
+ }
+ return new StartAnswer(cmd);
+ } else {
+ s_logger.warn("Bad state, VM has lastHostId but MAAS is not in deployed state");
+ // XXX: Do something here
+ return new StartAnswer(cmd, "Unable to start VM because the baremetal is in bad state");
+ }
+ }
+
+ //deploy OS on the host using MAAS
+ long templateId = vmvo.getTemplateId();
+ VMTemplateVO template = templateDao.findById(templateId);
+ String templateUrl = template.getUrl();
+
+ assert templateUrl != null;
+
+ checkTemplateOnMaas(templateUrl);
+
+ if (VirtualMachine.State.Starting != vmvo.getState()) {
+ throw new CloudRuntimeException(String.format("baremetal instance[name:%s, state:%s] is not in state of Starting", vmvo.getInstanceName(), vmvo.getState()));
+ }
+
+ if (!maasNode.statusName.equals(MaasObject.MaasState.Ready.toString())) {
+ throw new CloudRuntimeException(String.format("Maas State is not in ready %s %s", vmvo.getInstanceName(), maasNode.systemId));
+ }
+
+ try {
+
+ // Before we prepare VLANs, we must be sure that there
+ // are no other VLANs on the ports just to be safe
+ if (BaremetalManagerImpl.pxeVlan.value() != null) {
+ releaseVlan(BaremetalManagerImpl.pxeVlan.value(), VlanType.UNTAGGED, true);
+ prepareVlan(BaremetalManagerImpl.pxeVlan.value(), VlanType.UNTAGGED);
+ }
+
+ maasApi.updateHostname(maasNode.getSystemId(), vm.getName());
+ setupMaasBonding(maasNode, defaultNic.getMac());
+
+ MaasObject.AllocateMachineParameters allocateMachineParameters = new MaasObject.AllocateMachineParameters(maasNode.getSystemId());
+ maasApi.allocateMachine(allocateMachineParameters);
+
+ UserVmJoinVO uservm = userVmJoinDao.findById(vmvo.getId());
+
+ maasApi.addTagToMachine(maasNode.getSystemId(), "accountid_" + uservm.getAccountUuid());
+ maasApi.addTagToMachine(maasNode.getSystemId(), "domainid_" + uservm.getDomainUuid());
+
+ if (StringUtils.isNotEmpty(uservm.getProjectUuid())) {
+ maasApi.addTagToMachine(maasNode.getSystemId(), "projectid_" + uservm.getProjectUuid());
+ }
+
+ MaasObject.DeployMachineParameters deployMachineParameters = new MaasObject.DeployMachineParameters(templateUrl);
+ maasNode = maasApi.deployMachine(maasNode.getSystemId(), deployMachineParameters);
+
+ if (!doScript(_setDiskBootCommand)) {
+ throw new CloudRuntimeException("Set " + _ip + " boot dev to Disk failed");
+ }
+
+ // Before we prepare VLANs, we must to remove
+ // default PXE VLAN on the ports just to be safe
+ if (BaremetalManagerImpl.pxeVlan.value() != null) {
+ releaseVlan(BaremetalManagerImpl.pxeVlan.value(), VlanType.UNTAGGED, false);
+ }
+ prepareVlan(vlan, VlanType.UNTAGGED);
+
+ // reboot the host so that it picks up the new config from VR DHCP
+ if (!doScript(_bootOrRebootCommand)) {
+ throw new CloudRuntimeException("IPMI reboot failed for host " + _ip);
+ }
+
+ } catch (Exception e) {
+ s_logger.error(e.getMessage(), e);
+
+ try {
+ releaseVlan(vlan, VlanType.UNTAGGED, false);
+ } catch (Exception ex) {
+ s_logger.error("Failed cleanup of VLANs ", ex);
+ }
+
+ try {
+ maasNode = maasApi.getMaasNode(maasNode.getSystemId());
+ Integer eraseStrategy = BaremetalManagerImpl.diskEraseOnDestroy.value();
+ boolean eraseDisk = eraseStrategy == 1 || eraseStrategy == 2;
+ boolean fullErase = eraseStrategy == 2;
+ maasApi.releaseMachine(maasNode.getSystemId(), eraseDisk, fullErase);
+ } catch (IOException ex) {
+ //XXX: put node into alert state, manual intervention required
+ s_logger.error("Unable to release node " + maasNode.getSystemId(), ex);
+ }
+
+ doScript(_powerOffCommand);
+ return new StartAnswer(cmd, e.getMessage());
+ }
+
+ vmvo.setState(VirtualMachine.State.Running);
+ vmvo.setLastHostId(vmvo.getHostId());
+ vmDao.update(vmvo.getId(), vmvo);
+
+ s_logger.debug(String.format("received baremetal provision done notification for vm[id:%s name:%s] running on host[mac:%s, ip:%s]",
+ vm.getId(), vmvo.getInstanceName(), vmvo.getPrivateMacAddress(), vmvo.getPrivateIpAddress()));
+
+ s_logger.debug("Start bare metal vm " + vm.getName() + "successfully");
+ _vmName = vm.getName();
+ return new StartAnswer(cmd);
+ }
+
+ private void checkTemplateOnMaas(String templateUrl) {
+ try {
+ boolean imgFound = false;
+ for (MaasObject.BootImage img: maasApi.listImages()) {
+ if (img.name.contains(templateUrl)) {
+ imgFound = true;
+ break;
+ }
+ }
+
+ if (!imgFound) {
+ throw new CloudRuntimeException("Template " + templateUrl + " Not found in MAAS");
+ }
+ } catch (IOException e) {
+ throw new CloudRuntimeException("Unable to list boot images for MAAS", e);
+ }
+ }
+
+ protected ReadyAnswer execute(ReadyCommand cmd) {
+ return new ReadyAnswer(cmd);
+ }
+
+ protected PlugNicAnswer execute(PlugNicCommand cmd) {
+
+ NicTO nic = cmd.getNic();
+ NetworkVO nw = networkDao.findByUuid(nic.getNetworkUuid());
+ int vlan = Integer.parseInt(Networks.BroadcastDomainType.getValue(nw.getBroadcastUri()));
+
+ try {
+ prepareVlan(vlan, VlanType.TAGGED);
+ } catch (Exception e) {
+ String errMesg = "Unable to add Nic " + nic.getUuid() + " to network " + nw.getId();
+ s_logger.warn(errMesg, e);
+ releaseVlan(vlan, VlanType.TAGGED, false);
+ throw new CloudRuntimeException(errMesg, e);
+ }
+
+ return new PlugNicAnswer(cmd, true, "Nic " + nic.getUuid() + " Added to network " + nw.getId());
+ }
+
+ protected UnPlugNicAnswer execute(UnPlugNicCommand cmd) {
+
+ NicTO nic = cmd.getNic();
+ NetworkVO nw = networkDao.findByUuid(nic.getNetworkUuid());
+ int vlan = Integer.parseInt(Networks.BroadcastDomainType.getValue(nw.getBroadcastUri()));
+
+ if (nic.isDefaultNic()) {
+ throw new CloudRuntimeException("Cannot unplug default NIC for baremetal");
+ }
+
+ try {
+ releaseVlan(vlan, VlanType.TAGGED, false);
+ } catch (Exception e) {
+ String errMesg = "Unable to add Nic " + nic.getUuid() + " to network " + nw.getId();
+ s_logger.warn(errMesg, e);
+ prepareVlan(vlan, VlanType.TAGGED);
+ throw new CloudRuntimeException(errMesg, e);
+ }
+
+ return new UnPlugNicAnswer(cmd, true, "Nic " + nic.getUuid() + " Added to network " + nw.getId());
+ }
+
+ @Override
+ public boolean start() {
+ if (_zone == null) {
+ return true;
+ }
+ if (configDao == null) {
+ return true;
+ }
+
+ // Node Create
+ if (StringUtils.isEmpty(maasUniqueId)) {
+ MaasObject.AddMachineParameters maasMachine = new MaasObject.AddMachineParameters(_ip, _mac, _username, _password, _uuid);
+
+ try {
+ if (hostId == null) {
+ addMassMachine(maasMachine);
+ } else {
+ DetailVO maasNodeId = hostDetailsDao.findDetail(hostId, MAAS_ID_KEY);
+ if (maasNodeId != null) {
+ maasNode = maasApi.getMaasNode(maasNodeId.getValue());
+ if(maasNode == null) {
+ maasUniqueId = maasNode.getSystemId();
+ addMassMachine(maasMachine);
+ }
+ }
+ }
+ } catch (IOException e) {
+ String errMesg = "Error adding machine " + _ip + " Error: " + e.getMessage() + " Check MAAS and remove host if already added and retry again";
+ s_logger.warn(errMesg, e);
+ throw new CloudRuntimeException(errMesg, e);
+ }
+
+ HostVO host = hostDao.findByGuid(_uuid);
+ if (host != null) {
+ updateHostAddedDetails(host.getId());
+ }
+ }
+
+ // Node Import
+ else {
+ try {
+ maasNode = maasApi.getMaasNode(maasUniqueId);
+ if(maasNode != null) {
+ maasUniqueId = maasNode.getSystemId();
+ _cpuNum = maasNode.getCpuCount();
+ _cpuCapacity = maasNode.getCpuSpeed();
+ _memCapacity = maasNode.getMemory() * 1024 * 1024;
+
+ MaasInterface minterface = Arrays.asList(maasNode.getInterfaceSet())
+ .stream()
+ .filter(i -> i.type.equals("physical"))
+ .findFirst()
+ .get();
+
+ if (minterface != null) {
+ _mac = minterface.macAddress;
+ }
+ }
+ } catch (IOException e) {
+ String errMesg = "Error adding machine " + maasUniqueId + " Error: " + e.getMessage() + " Check MAAS and add the selecte node.";
+ s_logger.warn(errMesg, e);
+ throw new CloudRuntimeException(errMesg, e);
+ }
+ }
+
+ return true;
+ }
+
+ private void addMassMachine(MaasObject.AddMachineParameters maasMachine) throws IOException {
+ if (BaremetalManagerImpl.pxeVlan.value() != null) {
+ vlanMgr.prepareVlan(BaremetalManagerImpl.pxeVlan.value(), _mac, VlanType.UNTAGGED);
+ }
+
+ maasNode = maasApi.addMachine(maasMachine);
+
+ //make the default NIC DHCP
+ MaasObject.MaasInterface bootInterface = maasNode.getBootInterface();
+ int interfaceId = bootInterface.id;
+ int linkId = bootInterface.links[0].id;
+ int subnetId = bootInterface.links[0].subnet.id;
+ maasApi.setInterface(maasNode.getSystemId(), interfaceId, linkId, subnetId, true);
+
+ //make sure all the other interfaces are on the same fabric/vlan to enable bonding
+ for (MaasObject.MaasInterface iface : maasNode.getInterfaceSet()) {
+ if (!iface.macAddress.equals(bootInterface.macAddress)) {
+ if (BaremetalManagerImpl.pxeVlan.value() != null) {
+ vlanMgr.prepareVlan(BaremetalManagerImpl.pxeVlan.value(), iface.macAddress, VlanType.UNTAGGED);
+ }
+ Integer lId = null;
+ if (iface.links != null && iface.links.length > 0) {
+ lId = iface.links[0].id;
+ }
+ maasApi.setInterface(maasNode.getSystemId(), iface.id, lId, subnetId, false);
+ }
+ }
+
+ //update maas node
+ maasNode = maasApi.getMaasNode(maasNode.getSystemId());
+ }
+
+ public void updateHostAddedDetails(long hostId) {
+ if (this.hostId == null) {
+ this.hostId = hostId;
+ DetailVO maasIdDetail = new DetailVO(hostId, MAAS_ID_KEY, maasNode.getSystemId());
+ hostDetailsDao.persist(maasIdDetail);
+ }
+ }
+
+ private NicTO getDefaultNic(VirtualMachineTO vm) {
+ for (NicTO nic : vm.getNics()) {
+ if (nic.isDefaultNic()) {
+ return nic;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Returns all the MACs that are connected to the switch for this host.
+ * @param node MaasNode
+ * @return
+ */
+ protected List getAllConnectedMacs(MaasObject.MaasNode node) {
+ Set rackMacs = new HashSet();
+ Set maasMacs = new HashSet();
+
+ List vos = rctDao.listAll();
+ if (vos.isEmpty()) {
+ throw new CloudRuntimeException("no rack configuration found, please call addBaremetalRct to add one");
+ }
+
+ BaremetalRctVO vo = vos.get(0);
+ BaremetalRct rct = gson.fromJson(vo.getRct(), BaremetalRct.class);
+
+ for (BaremetalRct.Rack rack : rct.getRacks()) {
+ for (BaremetalRct.HostEntry host : rack.getHosts()) {
+ rackMacs.add(host.getMac());
+ }
+ }
+
+ for (MaasObject.MaasInterface maasInterface : node.interfaceSet) {
+ maasMacs.add(maasInterface.macAddress);
+ }
+
+ maasMacs.retainAll(rackMacs);
+ return new ArrayList(maasMacs);
+ }
+
+ protected boolean isConnectedInterface(MaasObject.MaasNode node, String macAddress) {
+ return getAllConnectedMacs(node).contains(macAddress);
+ }
+
+ public void setupMaasBonding(MaasObject.MaasNode node, String mac) throws IOException {
+ MaasObject.MaasInterface bondInterface = null;
+ List phyInterfaceIds = new ArrayList<>();
+
+ for (MaasObject.MaasInterface maasInterface: node.interfaceSet) {
+ if (maasInterface.type.equals(MaasObject.InterfaceType.bond.toString())) {
+ bondInterface = maasInterface;
+ } else if (maasInterface.type.equals(MaasObject.InterfaceType.physical.toString())
+ && isConnectedInterface(node, maasInterface.macAddress)) {
+ phyInterfaceIds.add(maasInterface.id);
+ }
+ }
+
+ if (bondInterface == null) {
+ assert phyInterfaceIds.size() >= 2;
+ bondInterface = maasApi.createBondInterface(node.systemId, phyInterfaceIds);
+ }
+
+ MaasObject.MaasSubnet dhcpSubnet = maasApi.getDhcpSubnet();
+ maasApi.setInterface(node.systemId, bondInterface.id, bondInterface.links[0].id, dhcpSubnet.id, true);
+ maasApi.updateInterfaceMac(node.systemId, bondInterface.id, mac);
+ }
+
+ private void releaseVlan(int vlan, VlanType type, boolean releaseAll) {
+ for (String mac : getAllConnectedMacs(maasNode)) {
+ if (releaseAll) {
+ vlanMgr.releaseAllVlan(mac, type);
+ } else {
+ vlanMgr.releaseVlan(vlan, mac, type);
+ }
+ }
+ }
+
+ private void prepareVlan(int vlan, VlanType type) {
+ for (String mac : getAllConnectedMacs(maasNode)) {
+ vlanMgr.prepareVlan(vlan, mac, type);
+ }
+ }
+}
diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasServiceOfferingsResponse.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasServiceOfferingsResponse.java
new file mode 100644
index 000000000000..fbeccd5cb7d0
--- /dev/null
+++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasServiceOfferingsResponse.java
@@ -0,0 +1,90 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// Automatically generated by addcopyright.py at 01/29/2013
+package org.apache.cloudstack.compute.maas;
+
+import com.google.gson.annotations.SerializedName;
+
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.BaseResponse;
+import org.apache.cloudstack.api.EntityReference;
+
+import com.cloud.serializer.Param;
+
+@EntityReference(value = MaasServiceOfferingsResponse.class)
+public class MaasServiceOfferingsResponse extends BaseResponse {
+ @SerializedName(ApiConstants.BAREMETAL_MAAS_OFFERING_ID)
+ @Param(description = "service offering id")
+ private String offeringId;
+
+ @SerializedName(ApiConstants.BAREMETAL_MAAS_OFFERING_NAME)
+ @Param(description = "TODO")
+ private String offeringName;
+
+ @SerializedName(ApiConstants.BAREMETAL_MAAS_AVIALBALE_COUNT)
+ @Param(description = "TODO")
+ private Integer available;
+
+ @SerializedName(ApiConstants.BAREMETAL_MAAS_TOTAL_COUNT)
+ @Param(description = "TODO")
+ private Integer total;
+
+ @SerializedName(ApiConstants.BAREMETAL_MAAS_ERASING_COUNT)
+ @Param(description = "TODO")
+ private Integer erasing;
+
+ public String getOfferingId() {
+ return offeringId;
+ }
+
+ public void setOfferingId(String offeringId) {
+ this.offeringId = offeringId;
+ }
+
+ public String getOfferingName() {
+ return offeringName;
+ }
+
+ public void setOfferingName(String offeringName) {
+ this.offeringName = offeringName;
+ }
+
+ public Integer getAvailable() {
+ return available;
+ }
+
+ public void setAvailable(Integer available) {
+ this.available = available;
+ }
+
+ public Integer getTotal() {
+ return total;
+ }
+
+ public void setTotal(Integer total) {
+ this.total = total;
+ }
+
+ public Integer getErasing() {
+ return erasing;
+ }
+
+ public void setErasing(Integer erasing) {
+ this.erasing = erasing;
+ }
+}
diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/api/ListMaasServiceOfferingsCmd.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/api/ListMaasServiceOfferingsCmd.java
new file mode 100644
index 000000000000..2474303ed968
--- /dev/null
+++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/api/ListMaasServiceOfferingsCmd.java
@@ -0,0 +1,99 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// Automatically generated by addcopyright.py at 01/29/2013
+// Apache License, Version 2.0 (the "License"); you may not use this
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// Automatically generated by addcopyright.py at 04/03/2012
+package org.apache.cloudstack.compute.maas.api;
+
+import java.util.List;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.acl.RoleType;
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.ApiErrorCode;
+import org.apache.cloudstack.api.BaseListCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.BaseCmd.CommandType;
+import org.apache.cloudstack.api.response.ClusterResponse;
+import org.apache.cloudstack.api.response.ListResponse;
+import org.apache.cloudstack.api.response.ZoneResponse;
+import org.apache.cloudstack.compute.maas.MaasServiceOfferingsResponse;
+import org.apache.cloudstack.compute.maas.MaasManager;
+import org.apache.log4j.Logger;
+
+@APICommand(
+ name = "listMaasServiceOfferings",
+ description = "list baremetal maas service offerings",
+ responseObject = MaasServiceOfferingsResponse.class,
+ requestHasSensitiveInfo = false,
+ responseHasSensitiveInfo = false,
+ authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}
+)
+public class ListMaasServiceOfferingsCmd extends BaseListCmd {
+ private static final Logger LOGGER = Logger.getLogger(ListMaasServiceOfferingsCmd.class);
+ private static final String NAME = "listmaasserviceofferingsresponse";
+
+ @Inject
+ private MaasManager manager;
+
+ // ///////////////////////////////////////////////////
+ // ////////////// API parameters /////////////////////
+ // ///////////////////////////////////////////////////
+ @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, description = "The ID of the zone to lists maas service offerings for")
+ private Long zoneId;
+
+ @Parameter(name = ApiConstants.CLUSTER_ID, type = CommandType.UUID, entityType = ClusterResponse.class, description = "The ID of the cluster to lists maas service offerings for")
+ private Long clusterId;
+
+ /////////////////////////////////////////////////////
+ /////////////////// Accessors ///////////////////////
+ /////////////////////////////////////////////////////
+ public Long getZoneId() {
+ return zoneId;
+ }
+ public Long getClusterId() {
+ return clusterId;
+ }
+
+ /////////////////////////////////////////////////////
+ /////////////// API Implementation///////////////////
+ /////////////////////////////////////////////////////
+ @Override
+ public void execute() {
+ try {
+ List responses = manager.listMaasServiceOfferings(this);
+ ListResponse response = new ListResponse();
+ response.setResponses(responses, responses.size());
+ response.setResponseName(getCommandName());
+ this.setResponseObject(response);
+ } catch (Exception e) {
+ LOGGER.debug("Exception happend while executing ListMaasServiceOfferingsCmd", e);
+ throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
+ }
+ }
+
+ @Override
+ public String getCommandName() {
+ return NAME;
+ }
+}
diff --git a/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/baremetal-discoverer/spring-baremetal-discoverer-context.xml b/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/baremetal-discoverer/spring-baremetal-discoverer-context.xml
index 12b287714a84..476d2ccdb589 100644
--- a/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/baremetal-discoverer/spring-baremetal-discoverer-context.xml
+++ b/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/baremetal-discoverer/spring-baremetal-discoverer-context.xml
@@ -27,6 +27,12 @@
http://www.springframework.org/schema/context/spring-context.xsd"
>
+
+
+
+
+
+
diff --git a/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/core/spring-baremetal-core-context.xml b/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/core/spring-baremetal-core-context.xml
index 993fe40458c0..dc6ef492888a 100755
--- a/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/core/spring-baremetal-core-context.xml
+++ b/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/core/spring-baremetal-core-context.xml
@@ -27,15 +27,21 @@
http://www.springframework.org/schema/context/spring-context.xsd"
>
+
+
+
+
diff --git a/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/maas-compute/module.properties b/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/maas-compute/module.properties
new file mode 100644
index 000000000000..22aaa235210c
--- /dev/null
+++ b/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/maas-compute/module.properties
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+name=maas-compute
+parent=compute
diff --git a/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/maas-compute/spring-maas-compute-context.xml b/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/maas-compute/spring-maas-compute-context.xml
new file mode 100644
index 000000000000..77c549f39741
--- /dev/null
+++ b/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/maas-compute/spring-maas-compute-context.xml
@@ -0,0 +1,30 @@
+
+
+
+
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
index d75d03d85a43..b05a61638a8e 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
@@ -2319,9 +2319,12 @@ public String configureVPCNetworkUsage(final String privateIpAddress, final Stri
}
public long[] getVPCNetworkStats(final String privateIP, final String publicIp, final String option) {
- final String result = configureVPCNetworkUsage(privateIP, publicIp, option, null);
+ String result = configureVPCNetworkUsage(privateIP, publicIp, option, null);
final long[] stats = new long[2];
if (result != null) {
+ if (result.contains(",")) {
+ result = result.split(",")[0];
+ }
final String[] splitResult = result.split(":");
int i = 0;
while (i < splitResult.length - 1) {
diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3VirtualRoutingSupport.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3VirtualRoutingSupport.java
index a9d673958779..1bcd258874d9 100644
--- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3VirtualRoutingSupport.java
+++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3VirtualRoutingSupport.java
@@ -147,6 +147,9 @@ private NetworkUsageAnswer vpcNetworkUsage(NetworkUsageCommand cmd) {
if (result == null || result.isEmpty()) {
LOGGER.error(" vpc network usage get returns empty ");
}
+ if (result.contains(",")) {
+ result = result.split(",")[0];
+ }
long[] stats = new long[2];
if (result != null) {
String[] splitResult = result.split(":");
diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java
index 04e9dd4cc969..959ce101e112 100644
--- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java
+++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java
@@ -1133,6 +1133,9 @@ protected NetworkUsageAnswer VPCNetworkUsage(NetworkUsageCommand cmd) {
if (result == null || result.isEmpty()) {
s_logger.error(" vpc network usage get returns empty ");
}
+ if (result.contains(",")) {
+ result = result.split(",")[0];
+ }
long[] stats = new long[2];
if (result != null) {
String[] splitResult = result.split(":");
diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java
index 49af92cd51d0..ce8d10d94ae4 100644
--- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java
+++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java
@@ -54,6 +54,8 @@
import org.apache.cloudstack.diagnostics.CopyToSecondaryStorageAnswer;
import org.apache.cloudstack.diagnostics.CopyToSecondaryStorageCommand;
import org.apache.cloudstack.diagnostics.DiagnosticsService;
+import org.apache.cloudstack.framework.config.ConfigKey;
+import org.apache.cloudstack.framework.config.Configurable;
import org.apache.cloudstack.hypervisor.xenserver.ExtraConfigurationUtility;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
@@ -180,12 +182,12 @@
* before you do any changes in this code here.
*
*/
-public abstract class CitrixResourceBase extends ServerResourceBase implements ServerResource, HypervisorResource, VirtualRouterDeployer {
+public abstract class CitrixResourceBase extends ServerResourceBase implements ServerResource, HypervisorResource, VirtualRouterDeployer, Configurable {
/**
* used to describe what type of resource a storage device is of
*/
public enum SRType {
- EXT, ISO, LVM, LVMOHBA, LVMOISCSI,
+ EXT, ISO, LVM, LVMOHBA, LVMOISCSI, VHDOISCSI, VDILUN,
/**
* used for resigning metadata (like SR UUID and VDI UUID when a
* particular storage manager is installed on a XenServer host (for back-end snapshots to work))
@@ -298,6 +300,13 @@ private static boolean isAlienVm(final VM vm, final Connection conn) throws XenA
protected String _configDriveSRName = "ConfigDriveISOs";
public String _attachIsoDeviceNum = "3";
+ public static final ConfigKey XenServerManagedStorageSrType = new ConfigKey<>("Advanced", String.class,
+ "xenserver.managedstorage.srtype",
+ "lvmoiscsi",
+ "The type of SR to use when using managed storage for VDI-per-LUN (lvmoiscsi or vdilun)",
+ true,
+ ConfigKey.Scope.Zone);
+
protected XenServerUtilitiesHelper xenServerUtilitiesHelper = new XenServerUtilitiesHelper();
protected int _wait;
@@ -1218,7 +1227,7 @@ public VBD createVbd(final Connection conn, final DiskTO volume, final String vm
return vbd;
}
- public VDI createVdi(final SR sr, final String vdiNameLabel, final Long volumeSize) throws Types.XenAPIException, XmlRpcException {
+ public VDI createVdi(final SR sr, final String vdiNameLabel, final Long volumeSize, Map smConfig) throws Types.XenAPIException, XmlRpcException {
final Connection conn = getConnection();
final VDI.Record vdir = new VDI.Record();
@@ -1231,6 +1240,10 @@ public VDI createVdi(final SR sr, final String vdiNameLabel, final Long volumeSi
final long unavailableSrSpace = sr.getPhysicalUtilisation(conn);
final long availableSrSpace = totalSrSpace - unavailableSrSpace;
+ if (smConfig != null) {
+ vdir.smConfig = smConfig;
+ }
+
if (availableSrSpace < volumeSize) {
throw new CloudRuntimeException("Available space for SR cannot be less than " + volumeSize + ".");
}
@@ -1240,6 +1253,31 @@ public VDI createVdi(final SR sr, final String vdiNameLabel, final Long volumeSi
return VDI.create(conn, vdir);
}
+ public VDI introduceVDI(final SR sr, final String vdiNameLabel, final Long volumeSize, String uuid, String iqn) throws Types.XenAPIException, XmlRpcException {
+
+ final Connection conn = getConnection();
+ Map smConfig = new HashMap<>();
+
+ smConfig.put("targetIQN", iqn);
+
+ if (uuid == null) {
+ uuid = UUID.randomUUID().toString();
+ }
+ try {
+ return VDI.introduce(conn, uuid, vdiNameLabel, vdiNameLabel, sr, Types.VdiType.USER,
+ false, false, new HashMap(), uuid, new HashMap(),
+ smConfig, false, volumeSize, volumeSize, null, false, new Date(0), null);
+ } catch (Types.XenAPIException e) {
+ if (e.shortDescription.contains("VDI could not be found") || ((e instanceof Types.InternalError) && ((Types.InternalError)e).message.contains("Vdi_does_not_exist"))) {
+ // We could not find a VDI, this can happen when we try to attach a newly created
+ // We return null here. For all other exceptions, we raise them
+ return null;
+ }
+ s_logger.error("Error introducing VDI " + e.getMessage());
+ throw new CloudRuntimeException(e.getMessage());
+ }
+ }
+
public void createVGPU(final Connection conn, final StartCommand cmd, final VM vm, final GPUDeviceTO gpuDevice) throws XenAPIException, XmlRpcException {
}
@@ -1369,6 +1407,10 @@ public VM createVmFromTemplate(final Connection conn, final VirtualMachineTO vmS
}
}
+ if(vmSpec.getFormat().equals(Storage.ImageFormat.PXEBOOT)) {
+ vmr.HVMBootParams.put("order", "ndc");
+ }
+
final VM vm = VM.create(conn, vmr);
s_logger.debug("Created VM " + vm.getUuid(conn) + " for " + vmSpec.getName());
@@ -1941,6 +1983,7 @@ protected void finalizeVmMetaData(final VM vm, final VM.Record vmr, final Connec
final Map platform = com.cloud.utils.StringUtils.stringToMap(platformstring);
syncPlatformAndCoresPerSocketSettings(coresPerSocket, platform);
vm.setPlatform(conn, platform);
+ calculateCorePerSocket(vm, conn, vmSpec);
} else {
final String timeoffset = details.get(VmDetailConstants.TIME_OFFSET);
if (timeoffset != null) {
@@ -1948,9 +1991,13 @@ protected void finalizeVmMetaData(final VM vm, final VM.Record vmr, final Connec
platform.put(VmDetailConstants.TIME_OFFSET, timeoffset);
vm.setPlatform(conn, platform);
}
- if (coresPerSocket != null) {
+
+ calculateCorePerSocket(vm, conn, vmSpec);
+
+ final String nestedHvm = details.get("nested.hvm");
+ if (nestedHvm != null) {
final Map platform = vm.getPlatform(conn);
- syncPlatformAndCoresPerSocketSettings(coresPerSocket, platform);
+ platform.put("exp-nested-hvm", nestedHvm);
vm.setPlatform(conn, platform);
}
}
@@ -1986,6 +2033,18 @@ protected void setVmBootDetails(final VM vm, final Connection conn, String bootT
vm.setPlatform(conn, platform);
}
+ private void calculateCorePerSocket(final VM vm, final Connection conn, final VirtualMachineTO vmSpec) throws XmlRpcException, XenAPIException {
+ int coresPerSocketCalculated;
+ int cpus = vmSpec.getCpus();
+ if (cpus % 2 == 1) {
+ coresPerSocketCalculated = 1;
+ } else {
+ coresPerSocketCalculated = cpus / 2;
+ }
+ final Map platform = vm.getPlatform(conn);
+ platform.put("cores-per-socket", Integer.toString(coresPerSocketCalculated));
+ vm.setPlatform(conn, platform);
+ }
/**
* This method just creates a XenServer network following the tunnel network
* naming convention
@@ -2497,6 +2556,61 @@ public SR getIscsiSR(final Connection conn, final String srNameLabel, final Stri
}
}
+ public SR getVdiLunSr(Connection conn, String storageHost) {
+ try {
+ final Map deviceConfig = new HashMap();
+ final Set srs = SR.getAll(conn);
+ for (final SR sr : srs) {
+ if (!(SRType.VDILUN.equals(sr.getType(conn)))) {
+ continue;
+ }
+ final Set pbds = sr.getPBDs(conn);
+ if (pbds.isEmpty()) {
+ continue;
+ }
+
+ final PBD pbd = pbds.iterator().next();
+ final Map dc = pbd.getDeviceConfig(conn);
+ if (dc == null) {
+ continue;
+ }
+ if (dc.get("target") == null) {
+ continue;
+ }
+
+ if (storageHost.equals(dc.get("target"))) {
+ return sr;
+ }
+ }
+
+ // came here, could not find an SR, create one
+ deviceConfig.put("target", storageHost);
+ String srNameLabel = "Cloudstack-VDILUN-SR-" + storageHost;
+ final Host host = Host.getByUuid(conn, _host.getUuid());
+ return SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, SRType.VDILUN.toString(),
+ "user", true, new HashMap());
+ } catch (Exception e) {
+ String mesg = "Unable to find/create VDILUN SR due to: " + e.getMessage();
+ s_logger.warn(mesg);
+ throw new CloudRuntimeException(mesg);
+ }
+ }
+
+ public String getTargetIqn(String iqnPath) {
+ if (iqnPath.endsWith("/")) {
+ iqnPath = iqnPath.substring(0, iqnPath.length() - 1);
+ }
+
+ final String tmp[] = iqnPath.split("/");
+ if (tmp.length != 3) {
+ final String msg = "Wrong iscsi path " + iqnPath + " it should be /targetIQN/LUN";
+ s_logger.warn(msg);
+ throw new CloudRuntimeException(msg);
+ }
+ final String targetiqn = tmp[1].trim();
+ return targetiqn;
+ }
+
private SR introduceAndPlugIscsiSr(Connection conn, String pooluuid, String srNameLabel, String type, Map smConfig, Map deviceConfig,
boolean ignoreIntroduceException) throws XmlRpcException, XenAPIException {
SR sr = null;
@@ -3586,10 +3700,24 @@ protected String getXMLNodeValue(final Node n) {
return n.getChildNodes().item(0).getNodeValue();
}
- public void handleSrAndVdiDetach(final String iqn, final Connection conn) throws Exception {
- final SR sr = getStorageRepository(conn, iqn);
+ public void handleManagedSrAndVdiDetach(final String iqn, final String storageHost, final Connection conn) throws Exception {
+ SR sr = null;
+ if (SRType.VDILUN.equals(XenServerManagedStorageSrType.value())) {
+ sr = getVdiLunSr(conn, storageHost);
+ String targetIqn = getTargetIqn(iqn);
+ VDI vdi = getVDIbyLocationandSR(conn, targetIqn, sr);
+ if (vdi != null){
+ vdi.forget(conn);
+ }
+
+ } else {
+ sr = getStorageRepository(conn, iqn);
+ removeSR(conn, sr);
+ }
+ }
+
+ public void handleManagedSrRemove() {
- removeSR(conn, sr);
}
protected void destroyUnattachedVBD(Connection conn, VM vm) {
@@ -4234,6 +4362,14 @@ public VDI prepareManagedDisk(final Connection conn, final DiskTO disk, final lo
return null;
}
+ // TODO for VDILUN sr, we need to first find the SR by the target IP (not by IQN)
+ // then if such an SR exists, we have to look at its sm_config map to see if
+ // a VDI exists which matches the given IQN. If we find such a VDI, we return it,
+ // else, we *introduce* that VDI into the SR, this will ensure that the data on
+ // the LUN is not zeroed out (VDI create does that). Now there is a caveat, if the
+ // volume is cloned, we need to introduce it, if it is a fresh volume, we need to
+ // create it (as the LUN will not have the VDI inside it yet)
+
final String iqn = details.get(DiskTO.IQN);
final Set srNameLabels = SR.getByNameLabel(conn, iqn);
@@ -4244,7 +4380,7 @@ public VDI prepareManagedDisk(final Connection conn, final DiskTO disk, final lo
final String vdiNameLabel = Volume.Type.ROOT.equals(disk.getType()) ? ("ROOT-" + vmId) : (vmName + "-DATA");
- return prepareManagedStorage(conn, details, null, vdiNameLabel);
+ return prepareManagedStorage(conn, details, disk.getPath(), vdiNameLabel);
}
protected SR prepareManagedSr(final Connection conn, final Map details) {
@@ -4261,6 +4397,8 @@ protected SR prepareManagedSr(final Connection conn, final Map d
final String volumedesc = storageHost + ":" + mountpoint;
return getNfsSR(conn, poolid, namelable, storageHost, mountpoint, volumedesc);
+ } else if (SRType.VDILUN.equals(XenServerManagedStorageSrType.value())) {
+ return getVdiLunSr(conn, storageHost);
} else {
return getIscsiSR(conn, iScsiName, storageHost, iScsiName, chapInitiatorUsername, chapInitiatorSecret, false, SRType.LVMOISCSI.toString(), true);
}
@@ -4271,16 +4409,28 @@ protected VDI prepareManagedStorage(final Connection conn, final Map smConfig = new HashMap<>();
+ String iqn = getTargetIqn(details.get(DiskTO.IQN));
+ smConfig.put("targetIQN", iqn);
Set vdisInSr = sr.getVDIs(conn);
- // If a VDI already exists in the SR (in case we cloned from a template cache), use that.
- if (vdisInSr.size() == 1) {
- vdi = vdisInSr.iterator().next();
+ if (SRType.VDILUN.equals(XenServerManagedStorageSrType.value())) {
+
+ vdi = getVDIbyLocationandSR(conn, iqn, sr);
+ if (vdi == null) {
+ vdi = introduceVDI(sr, vdiNameLabel, volumeSize, path, iqn);
+ }
+
+ } else {
+ // If a VDI already exists in the SR (in case we cloned from a template cache), use that.
+ if (vdisInSr.size() == 1) {
+ vdi = vdisInSr.iterator().next();
+ }
}
if (vdi == null) {
- vdi = createVdi(sr, vdiNameLabel, volumeSize);
+ vdi = createVdi(sr, vdiNameLabel, volumeSize, smConfig);
} else {
// If vdi is not null, it must have already been created, so check whether a resize of the volume was performed.
// If true, resize the VDI to the volume size.
@@ -4707,6 +4857,14 @@ public void scaleVM(final Connection conn, final VM vm, final VirtualMachineTO v
// vm.addToVCPUsParamsLive(conn, "weight",
// Integer.toString(cpuWeight));
callHostPlugin(conn, "vmops", "add_to_VCPUs_params_live", "key", "weight", "value", Integer.toString(cpuWeight), "vmname", vmSpec.getName());
+
+ // Recalculating cores per socket
+ final VM.Record vmr = vm.getRecord(conn);
+ try {
+ finalizeVmMetaData(vm, vmr, conn, vmSpec);
+ } catch (final Exception e) {
+ throw new CloudRuntimeException("Unable to finalize VM MetaData: " + vmSpec);
+ }
}
}
@@ -5750,4 +5908,12 @@ private void umountNfs(Connection conn, String remoteDir, String localDir) {
s_logger.warn(errMsg);
}
}
+
+ public ConfigKey>[] getConfigKeys(){
+ return new ConfigKey>[] {XenServerManagedStorageSrType};
+ }
+
+ public String getConfigComponentName(){
+ return CitrixResourceBase.class.getSimpleName();
+ }
}
diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java
index 7c289de80c75..0640c6a88208 100644
--- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java
+++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java
@@ -75,6 +75,7 @@
import com.cloud.storage.Storage;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.resource.StorageProcessor;
+import com.cloud.utils.NumbersUtil;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.storage.S3.ClientOptions;
import com.google.common.annotations.VisibleForTesting;
@@ -175,6 +176,14 @@ public ResignatureAnswer resignature(final ResignatureCommand cmd) {
final String storageHost = details.get(DiskTO.STORAGE_HOST);
final String chapInitiatorUsername = details.get(DiskTO.CHAP_INITIATOR_USERNAME);
final String chapInitiatorSecret = details.get(DiskTO.CHAP_INITIATOR_SECRET);
+ final ResignatureAnswer resignatureAnswer = new ResignatureAnswer();
+
+ if (SRType.VDILUN.equals(CitrixResourceBase.XenServerManagedStorageSrType.value())) {
+ resignatureAnswer.setSize(NumbersUtil.parseLong(details.get(DiskTO.VOLUME_SIZE), 0));
+ resignatureAnswer.setPath(details.get(DiskTO.PATH));
+ resignatureAnswer.setFormat(ImageFormat.VHD);
+ return resignatureAnswer;
+ }
newSr = hypervisorResource.getIscsiSR(conn, iScsiName, storageHost, iScsiName, chapInitiatorUsername, chapInitiatorSecret, true, false);
@@ -186,8 +195,6 @@ public ResignatureAnswer resignature(final ResignatureCommand cmd) {
VDI vdi = vdis.iterator().next();
- final ResignatureAnswer resignatureAnswer = new ResignatureAnswer();
-
resignatureAnswer.setSize(vdi.getVirtualSize(conn));
resignatureAnswer.setPath(vdi.getUuid(conn));
resignatureAnswer.setFormat(ImageFormat.VHD);
@@ -496,7 +503,11 @@ public Answer dettachVolume(final DettachCommand cmd) {
}
if (cmd.isManaged()) {
- hypervisorResource.handleSrAndVdiDetach(cmd.get_iScsiName(), conn);
+
+ final PrimaryDataStoreTO store = (PrimaryDataStoreTO) data.getDataStore();
+ String storageHost = store.getHost();
+
+ hypervisorResource.handleManagedSrAndVdiDetach(cmd.get_iScsiName(), storageHost, conn);
}
return new DettachAnswer(disk);
@@ -506,11 +517,14 @@ public Answer dettachVolume(final DettachCommand cmd) {
}
}
- protected VDI createVdi(final Connection conn, final String vdiName, final SR sr, final long size) throws BadServerResponse, XenAPIException, XmlRpcException {
+ protected VDI createVdi(final Connection conn, final String vdiName, final SR sr, final long size, Map smConfig) throws BadServerResponse, XenAPIException, XmlRpcException {
final VDI.Record vdir = new VDI.Record();
vdir.nameLabel = vdiName;
vdir.SR = sr;
vdir.type = Types.VdiType.USER;
+ if (smConfig != null) {
+ vdir.smConfig = smConfig;
+ }
vdir.virtualSize = size;
final VDI vdi = VDI.create(conn, vdir);
@@ -601,7 +615,7 @@ public Answer deleteVolume(final DeleteCommand cmd) {
}
protected boolean IsISCSI(final String type) {
- return SRType.LVMOHBA.equals(type) || SRType.LVMOISCSI.equals(type) || SRType.LVM.equals(type);
+ return SRType.LVMOHBA.equals(type) || SRType.LVMOISCSI.equals(type) || SRType.LVM.equals(type) || SRType.VDILUN.equals(type);
}
private String copy_vhd_from_secondarystorage(final Connection conn, final String mountpoint, final String sruuid, final int wait) {
diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java
index 68236f92ac44..f6a3de30db30 100644
--- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java
+++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java
@@ -259,6 +259,7 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) {
final Connection conn = hypervisorResource.getConnection();
SR srcSr = null;
SR destSr = null;
+ VDI destVdi = null;
boolean removeSrAfterCopy = false;
Task task = null;
@@ -329,6 +330,14 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) {
details.put(DiskTO.CHAP_INITIATOR_SECRET, chapInitiatorSecret);
destSr = hypervisorResource.prepareManagedSr(conn, details);
+ if (CitrixResourceBase.SRType.VDILUN.equals(CitrixResourceBase.XenServerManagedStorageSrType.value())) {
+ // we create a destination VDI as the SR is just a placeholder for LUNs
+ Map smConfig = new HashMap<>();
+ //TODO: Auth
+ smConfig.put("targetIQN", hypervisorResource.getTargetIqn(managedStoragePoolName));
+ destVdi = createVdi(conn, managedStoragePoolRootVolumeName, destSr,
+ Long.parseLong(managedStoragePoolRootVolumeSize), smConfig);
+ }
} else {
final String srName = CitrixHelper.getSRNameLabel(destStore.getUuid(), destStore.getPoolType(), destStore.getPath());
final Set srs = SR.getByNameLabel(conn, srName);
@@ -344,7 +353,7 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) {
}
}
- task = srcVdi.copyAsync(conn, destSr, null, null);
+ task = srcVdi.copyAsync(conn, destSr, null, destVdi);
// poll every 1 seconds ,
hypervisorResource.waitForTask(conn, task, 1000, wait * 1000);
@@ -409,6 +418,14 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) {
}
if (removeSrAfterCopy && destSr != null) {
+ if (CitrixResourceBase.SRType.VDILUN.equals(CitrixResourceBase.XenServerManagedStorageSrType.value()) &&
+ destVdi != null) {
+ try {
+ destVdi.dbForget(conn);
+ } catch (XenAPIException | XmlRpcException e) {
+ s_logger.warn(e);
+ }
+ }
hypervisorResource.removeSR(conn, destSr);
}
}
@@ -524,6 +541,8 @@ public Answer backupSnapshot(final CopyCommand cmd) {
String snapshotBackupUuid = null;
boolean fullbackup = Boolean.parseBoolean(options.get("fullSnapshot"));
Long physicalSize = null;
+ VDI srcVdi = null;
+
try {
SR primaryStorageSR = null;
@@ -536,11 +555,27 @@ public Answer backupSnapshot(final CopyCommand cmd) {
final String storageHost = srcDetails.get(DiskTO.STORAGE_HOST);
final String chapInitiatorUsername = srcDetails.get(DiskTO.CHAP_INITIATOR_USERNAME);
final String chapInitiatorSecret = srcDetails.get(DiskTO.CHAP_INITIATOR_SECRET);
- final String srType = CitrixResourceBase.SRType.LVMOISCSI.toString();
+ final String srType = CitrixResourceBase.XenServerManagedStorageSrType.value();
+
+ if (CitrixResourceBase.SRType.VDILUN.equals(srType)){
+ //introduce the IQN VDI
+ String targetIqn = hypervisorResource.getTargetIqn(iScsiName);
+ primaryStorageSR = hypervisorResource.getVdiLunSr(conn, storageHost);
+ srcVdi = hypervisorResource.getVDIbyLocationandSR(conn, targetIqn, primaryStorageSR);
+
+ if (srcVdi == null) {
+ String tempUuid = UUID.randomUUID().toString();
+ srcVdi = hypervisorResource.introduceVDI(primaryStorageSR, snapshotTO.getName(), snapshotTO.getPhysicalSize(),
+ tempUuid, targetIqn);
+ }
- primaryStorageSR = hypervisorResource.getIscsiSR(conn, iScsiName, storageHost, iScsiName, chapInitiatorUsername, chapInitiatorSecret, false, srType, true);
+ } else {
+
+ primaryStorageSR = hypervisorResource.getIscsiSR(conn, iScsiName, storageHost, iScsiName,
+ chapInitiatorUsername, chapInitiatorSecret, false, srType, true);
+ srcVdi = primaryStorageSR.getVDIs(conn).iterator().next();
+ }
- final VDI srcVdi = primaryStorageSR.getVDIs(conn).iterator().next();
if (srcVdi == null) {
throw new InternalErrorException("Could not Find a VDI on the SR: " + primaryStorageSR.getNameLabel(conn));
}
@@ -565,6 +600,9 @@ public Answer backupSnapshot(final CopyCommand cmd) {
final String folder = destPath;
String finalPath = null;
+ // make sure if secondary storage is capable of doing partial backup or not
+ fullbackup = fullbackup || !destStore.isPartialBackupCapable();
+
final String localMountPoint = BaseMountPointOnHost + File.separator + UUID.nameUUIDFromBytes(secondaryStorageUrl.getBytes()).toString();
if (fullbackup) {
SR snapshotSr = null;
@@ -636,7 +674,12 @@ public Answer backupSnapshot(final CopyCommand cmd) {
}
if (primaryStore.isManaged()) {
- hypervisorResource.removeSR(conn, primaryStorageSR);
+ if (CitrixResourceBase.SRType.VDILUN.equals(CitrixResourceBase.XenServerManagedStorageSrType.value()) &&
+ srcVdi != null) {
+ srcVdi.forget(conn);
+ } else {
+ hypervisorResource.removeSR(conn, primaryStorageSR);
+ }
}
}
} else {
@@ -823,8 +866,10 @@ public Answer createVolumeFromSnapshot(final CopyCommand cmd) {
}
SR srcSr = null;
VDI destVdi = null;
+ Map smConfig = null;
SR primaryStorageSR = null;
+ final String srType = CitrixResourceBase.XenServerManagedStorageSrType.value();
try {
if (pool.isManaged()) {
@@ -834,9 +879,18 @@ public Answer createVolumeFromSnapshot(final CopyCommand cmd) {
final String storageHost = destDetails.get(DiskTO.STORAGE_HOST);
final String chapInitiatorUsername = destDetails.get(DiskTO.CHAP_INITIATOR_USERNAME);
final String chapInitiatorSecret = destDetails.get(DiskTO.CHAP_INITIATOR_SECRET);
- final String srType = CitrixResourceBase.SRType.LVMOISCSI.toString();
- primaryStorageSR = hypervisorResource.getIscsiSR(conn, iScsiName, storageHost, iScsiName, chapInitiatorUsername, chapInitiatorSecret, false, srType, true);
+
+ if (CitrixResourceBase.SRType.VDILUN.equals(srType)) {
+
+ primaryStorageSR = hypervisorResource.getVdiLunSr(conn, storageHost);
+ smConfig = new HashMap<>();
+ smConfig.put("targetIQN", hypervisorResource.getTargetIqn(iScsiName));
+
+ } else {
+ primaryStorageSR = hypervisorResource.getIscsiSR(conn, iScsiName, storageHost, iScsiName,
+ chapInitiatorUsername, chapInitiatorSecret, false, srType, true);
+ }
} else {
primaryStorageSR = hypervisorResource.getSRByNameLabelandHost(conn, primaryStorageNameLabel);
@@ -847,7 +901,7 @@ public Answer createVolumeFromSnapshot(final CopyCommand cmd) {
}
final String nameLabel = "cloud-" + UUID.randomUUID().toString();
- destVdi = createVdi(conn, nameLabel, primaryStorageSR, volume.getSize());
+ destVdi = createVdi(conn, nameLabel, primaryStorageSR, volume.getSize(), smConfig);
volumeUUID = destVdi.getUuid(conn);
final String snapshotInstallPath = snapshot.getPath();
final int index = snapshotInstallPath.lastIndexOf(File.separator);
@@ -899,7 +953,17 @@ public Answer createVolumeFromSnapshot(final CopyCommand cmd) {
}
if (pool.isManaged()) {
- hypervisorResource.removeSR(conn, primaryStorageSR);
+ if (CitrixResourceBase.SRType.VDILUN.equals(srType)) {
+ if (destVdi != null) {
+ try {
+ destVdi.forget(conn);
+ } catch (Exception e) {
+ s_logger.warn("Error removing vdi after copy " + e.getMessage());
+ }
+ }
+ } else {
+ hypervisorResource.removeSR(conn, primaryStorageSR);
+ }
}
if (!result && destVdi != null) {
@@ -1059,7 +1123,7 @@ public Answer createTemplateFromSnapshot(final CopyCommand cmd) {
final DataTO destData = cmd.getDestTO();
if (srcData.getDataStore() instanceof PrimaryDataStoreTO && destData.getDataStore() instanceof NfsTO) {
- return createTemplateFromSnapshot2(cmd);
+ return createTemplateFromSnapshotManagedStorage(cmd);
}
final int wait = cmd.getWait();
@@ -1129,7 +1193,7 @@ public Answer createTemplateFromSnapshot(final CopyCommand cmd) {
final long templateVirtualSize = snapshotChains.get(0).getVirtualSize(conn);
- destVdi = createVdi(conn, nameLabel, destSr, templateVirtualSize);
+ destVdi = createVdi(conn, nameLabel, destSr, templateVirtualSize, null);
final String destVdiUuid = destVdi.getUuid(conn);
@@ -1198,7 +1262,7 @@ public Answer createTemplateFromSnapshot(final CopyCommand cmd) {
}
}
- private Answer createTemplateFromSnapshot2(final CopyCommand cmd) {
+ public Answer createTemplateFromSnapshotManagedStorage(final CopyCommand cmd) {
final Connection conn = hypervisorResource.getConnection();
final SnapshotObjectTO snapshotObjTO = (SnapshotObjectTO)cmd.getSrcTO();
@@ -1227,6 +1291,8 @@ private Answer createTemplateFromSnapshot2(final CopyCommand cmd) {
VDI destVdi = null;
boolean result = false;
+ String srType = CitrixResourceBase.XenServerManagedStorageSrType.value();
+ VDI srcVdi = null;
try {
final Map srcDetails = cmd.getOptions();
@@ -1235,11 +1301,23 @@ private Answer createTemplateFromSnapshot2(final CopyCommand cmd) {
final String storageHost = srcDetails.get(DiskTO.STORAGE_HOST);
final String chapInitiatorUsername = srcDetails.get(DiskTO.CHAP_INITIATOR_USERNAME);
final String chapInitiatorSecret = srcDetails.get(DiskTO.CHAP_INITIATOR_SECRET);
- String srType;
- srType = CitrixResourceBase.SRType.LVMOISCSI.toString();
+ if (CitrixResourceBase.SRType.VDILUN.equals(srType)) {
+ String iqn = hypervisorResource.getTargetIqn(iScsiName);
+ srcSr = hypervisorResource.getVdiLunSr(conn, storageHost);
+ srcVdi = hypervisorResource.getVDIbyLocationandSR(conn, iqn, srcSr);
- srcSr = hypervisorResource.getIscsiSR(conn, iScsiName, storageHost, iScsiName, chapInitiatorUsername, chapInitiatorSecret, false, srType, true);
+ if (srcVdi == null) {
+ String tempUuid = UUID.randomUUID().toString();
+ srcVdi = hypervisorResource.introduceVDI(srcSr, snapshotObjTO.getName(), snapshotObjTO.getPhysicalSize(),
+ tempUuid, iqn);
+ }
+
+ } else {
+ srcSr = hypervisorResource.getIscsiSR(conn, iScsiName, storageHost, iScsiName, chapInitiatorUsername, chapInitiatorSecret, false, srType, true);
+ // there should only be one VDI in this SR
+ srcVdi = srcSr.getVDIs(conn).iterator().next();
+ }
final String destNfsPath = destUri.getHost() + ":" + destUri.getPath();
final String localDir = BASE_MOUNT_POINT_ON_REMOTE + UUID.nameUUIDFromBytes(destNfsPath.getBytes());
@@ -1250,8 +1328,6 @@ private Answer createTemplateFromSnapshot2(final CopyCommand cmd) {
destSr = createFileSR(conn, localDir + "/" + destDir);
- // there should only be one VDI in this SR
- final VDI srcVdi = srcSr.getVDIs(conn).iterator().next();
destVdi = srcVdi.copy(conn, destSr);
@@ -1314,7 +1390,17 @@ private Answer createTemplateFromSnapshot2(final CopyCommand cmd) {
}
if (srcSr != null) {
- hypervisorResource.removeSR(conn, srcSr);
+ if (CitrixResourceBase.SRType.VDILUN.equals(srType)) {
+ if (srcVdi != null) {
+ try {
+ srcVdi.forget(conn);
+ } catch (Exception e) {
+ s_logger.warn("Error cleaning srcVdi for src snapshot " + snapshotObjTO.getId());
+ }
+ }
+ } else {
+ hypervisorResource.removeSR(conn, srcSr);
+ }
}
if (destSr != null) {
diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56NetworkUsageCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56NetworkUsageCommandWrapper.java
index ad414a4ea321..dff8271d805b 100644
--- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56NetworkUsageCommandWrapper.java
+++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56NetworkUsageCommandWrapper.java
@@ -79,13 +79,17 @@ protected NetworkUsageAnswer executeNetworkUsage(final NetworkUsageCommand comma
}
final ExecutionResult result = xenServer56.executeInVR(command.getPrivateIP(), "vpc_netusage.sh", args);
- final String detail = result.getDetails();
+ String detail = result.getDetails();
if (!result.isSuccess()) {
throw new Exception(" vpc network usage plugin call failed ");
}
+
if (option.equals("get") || option.equals("vpn")) {
final long[] stats = new long[2];
if (detail != null) {
+ if (detail.contains(",")) {
+ detail = detail.split(",")[0];
+ }
final String[] splitResult = detail.split(":");
int i = 0;
while (i < splitResult.length - 1) {
diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteStoragePoolCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteStoragePoolCommandWrapper.java
index 766335812390..9f2cdccb84f2 100644
--- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteStoragePoolCommandWrapper.java
+++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteStoragePoolCommandWrapper.java
@@ -64,12 +64,18 @@ public Answer execute(final DeleteStoragePoolCommand command, final CitrixResour
return answer;
} catch (final Exception e) {
- final String msg = "DeleteStoragePoolCommand XenAPIException:" + e.getMessage() + " host:" + citrixResourceBase.getHost().getUuid() +
- " pool: " + poolTO.getHost() + poolTO.getPath();
+ // if error is "Can not see storage pool" return "success" it most
+ // probably has been already removed, otherwise throw an actual error.
+ if (e.getMessage().contains("Can not see storage pool")) {
+ return new Answer(command, true, "success");
+ } else {
+ final String msg = "DeleteStoragePoolCommand XenAPIException:" + e.getMessage() + " host:" + citrixResourceBase.getHost().getUuid() +
+ " pool: " + poolTO.getHost() + poolTO.getPath();
- s_logger.error(msg, e);
+ s_logger.error(msg, e);
- return new Answer(command, false, msg);
+ return new Answer(command, false, msg);
+ }
}
}
}
\ No newline at end of file
diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixModifyStoragePoolCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixModifyStoragePoolCommandWrapper.java
index 888aa81d3746..83e3116472b1 100644
--- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixModifyStoragePoolCommandWrapper.java
+++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixModifyStoragePoolCommandWrapper.java
@@ -48,12 +48,25 @@ public Answer execute(final ModifyStoragePoolCommand command, final CitrixResour
final Connection conn = citrixResourceBase.getConnection();
final StorageFilerTO pool = command.getPool();
final boolean add = command.getAdd();
+ final Map tInfo = new HashMap();
+
+
if (add) {
try {
String srName = command.getStoragePath();
if (srName == null) {
srName = CitrixHelper.getSRNameLabel(pool.getUuid(), pool.getType(), pool.getPath());
}
+
+ if(CitrixResourceBase.SRType.VDILUN.equals(CitrixResourceBase.XenServerManagedStorageSrType.value()) &&
+ pool.isManaged()){
+
+ final SR sr = citrixResourceBase.getVdiLunSr(conn, pool.getHost());
+ long capacity = sr.getPhysicalSize(conn); // TODO handle this gracefully
+
+ return new ModifyStoragePoolAnswer(command, capacity, capacity, tInfo);
+ }
+
final SR sr = citrixResourceBase.getStorageRepository(conn, srName);
citrixResourceBase.setupHeartbeatSr(conn, sr, false);
final long capacity = sr.getPhysicalSize(conn);
@@ -63,7 +76,6 @@ public Answer execute(final ModifyStoragePoolCommand command, final CitrixResour
s_logger.warn(msg);
return new Answer(command, false, msg);
}
- final Map tInfo = new HashMap();
final ModifyStoragePoolAnswer answer = new ModifyStoragePoolAnswer(command, capacity, available, tInfo);
return answer;
} catch (final XenAPIException e) {
diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixResizeVolumeCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixResizeVolumeCommandWrapper.java
index e7505cc2f34e..711ee88ae24c 100755
--- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixResizeVolumeCommandWrapper.java
+++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixResizeVolumeCommandWrapper.java
@@ -59,9 +59,11 @@ public Answer execute(final ResizeVolumeCommand command, final CitrixResourceBas
resizeSr(conn, command);
}
- VDI vdi = citrixResourceBase.getVDIbyUuid(conn, volId);
+ VDI vdi = citrixResourceBase.getVDIbyUuid(conn, volId, false);
- vdi.resize(conn, newSize);
+ if (vdi != null) {
+ vdi.resize(conn, newSize);
+ }
return new ResizeVolumeAnswer(command, true, "success", newSize);
} catch (Exception ex) {
diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStopCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStopCommandWrapper.java
index 1a74ff4385bf..bf9867cd05fc 100644
--- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStopCommandWrapper.java
+++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStopCommandWrapper.java
@@ -43,6 +43,8 @@
import com.xensource.xenapi.SR;
import com.xensource.xenapi.Types.VmPowerState;
import com.xensource.xenapi.Types.XenAPIException;
+import com.xensource.xenapi.VBD;
+import com.xensource.xenapi.VDI;
import com.xensource.xenapi.VGPU;
import com.xensource.xenapi.VIF;
import com.xensource.xenapi.VM;
@@ -138,6 +140,19 @@ public Answer execute(final StopCommand command, final CitrixResourceBase citrix
command.setGpuDevice(new GPUDeviceTO(null, null, groupDetails));
}
+ if (CitrixResourceBase.SRType.VDILUN.equals(CitrixResourceBase.XenServerManagedStorageSrType.value())) {
+ Set vbds = vm.getVBDs(conn);
+ for (VBD vbd : vbds) {
+ VDI vdi = vbd.getVDI(conn);
+ if (!vdi.isNull()) {
+ SR sr = vdi.getSR(conn);
+ if (sr.getType(conn).equals(CitrixResourceBase.SRType.VDILUN.toString())) {
+ vdi.forget(conn);
+ }
+ }
+ }
+ }
+
final Set vifs = vm.getVIFs(conn);
final List networks = new ArrayList();
for (final VIF vif : vifs) {
diff --git a/plugins/network-elements/nuage-vsp/pom.xml b/plugins/network-elements/nuage-vsp/pom.xml
new file mode 100644
index 000000000000..a54414c233a4
--- /dev/null
+++ b/plugins/network-elements/nuage-vsp/pom.xml
@@ -0,0 +1,46 @@
+
+
+ 4.0.0
+ cloud-plugin-network-vsp
+ Apache CloudStack Plugin - Nuage VSP
+
+ org.apache.cloudstack
+ cloudstack-plugins
+ 4.13.2.0-SNAPSHOT
+ ../../pom.xml
+
+
+
+ nuage-vsp
+ http://cs.mv.nuagenetworks.net/releases/
+
+
+
+ 1.0.8
+
+
+
+ net.nuagenetworks.vsp
+ nuage-vsp-acs-client
+ ${nuage.vsp.client.version}
+
+
+
diff --git a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightElement.java b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightElement.java
index 9bae4bd19e6f..658e691d19e5 100644
--- a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightElement.java
+++ b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightElement.java
@@ -19,19 +19,6 @@
package org.apache.cloudstack.network.opendaylight;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import javax.inject.Inject;
-import javax.naming.ConfigurationException;
-
-import org.apache.log4j.Logger;
-import org.springframework.stereotype.Component;
-
-import org.apache.cloudstack.network.opendaylight.agent.commands.StartupOpenDaylightControllerCommand;
-
import com.cloud.agent.api.StartupCommand;
import com.cloud.deploy.DeployDestination;
import com.cloud.exception.ConcurrentOperationException;
@@ -55,6 +42,16 @@
import com.cloud.vm.NicProfile;
import com.cloud.vm.ReservationContext;
import com.cloud.vm.VirtualMachineProfile;
+import org.apache.cloudstack.network.opendaylight.agent.commands.StartupOpenDaylightControllerCommand;
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+
+import javax.inject.Inject;
+import javax.naming.ConfigurationException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
@Component
public class OpendaylightElement extends AdapterBase implements ConnectivityProvider, ResourceStateAdapter {
@@ -156,7 +153,7 @@ public HostVO createHostVOForDirectConnectAgent(HostVO host, StartupCommand[] st
@Override
public DeleteHostAnswer deleteHost(HostVO host, boolean isForced, boolean isForceDeleteStorage) throws UnableDeleteHostException {
- return new DeleteHostAnswer(true);
+ return null;
}
private static Map> setCapabilities() {
diff --git a/plugins/storage/image/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackImageStoreDriverImpl.java b/plugins/storage/image/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackImageStoreDriverImpl.java
index 8abf802d9de6..5017bf90d7cb 100644
--- a/plugins/storage/image/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackImageStoreDriverImpl.java
+++ b/plugins/storage/image/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackImageStoreDriverImpl.java
@@ -22,11 +22,6 @@
import javax.inject.Inject;
-import com.cloud.agent.api.storage.DeleteEntityDownloadURLCommand;
-import com.cloud.host.dao.HostDao;
-import com.cloud.storage.Upload;
-import org.apache.log4j.Logger;
-
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
@@ -35,13 +30,17 @@
import org.apache.cloudstack.storage.image.NfsImageStoreDriverImpl;
import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
import org.apache.cloudstack.storage.image.store.ImageStoreImpl;
+import org.apache.log4j.Logger;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.storage.CreateEntityDownloadURLCommand;
+import com.cloud.agent.api.storage.DeleteEntityDownloadURLCommand;
import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.NfsTO;
import com.cloud.configuration.Config;
+import com.cloud.host.dao.HostDao;
import com.cloud.storage.Storage.ImageFormat;
+import com.cloud.storage.Upload;
import com.cloud.utils.exception.CloudRuntimeException;
public class CloudStackImageStoreDriverImpl extends NfsImageStoreDriverImpl {
@@ -73,6 +72,7 @@ public String createEntityExtractUrl(DataStore store, String installPath, ImageF
String uuid = UUID.randomUUID().toString() + "." + format.getFileExtension();
CreateEntityDownloadURLCommand cmd = new CreateEntityDownloadURLCommand(((ImageStoreEntity)store).getMountPoint(),
path, uuid, dataObject == null ? null: dataObject.getTO());
+ cmd.setSecUrl(((ImageStoreEntity) store).getUrl());
Answer ans = null;
if (ep == null) {
String errMsg = "No remote endpoint to send command, check if host or ssvm is down?";
diff --git a/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/driver/SwiftImageStoreDriverImpl.java b/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/driver/SwiftImageStoreDriverImpl.java
index 7e1486214bcf..6afb4a052af0 100644
--- a/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/driver/SwiftImageStoreDriverImpl.java
+++ b/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/driver/SwiftImageStoreDriverImpl.java
@@ -18,17 +18,16 @@
*/
package org.apache.cloudstack.storage.datastore.driver;
-import java.net.URL;
-import java.util.Map;
-import java.util.UUID;
-
-import javax.inject.Inject;
-
+import com.cloud.agent.api.storage.DownloadAnswer;
+import com.cloud.agent.api.to.DataObjectType;
+import com.cloud.agent.api.to.DataStoreTO;
+import com.cloud.agent.api.to.SwiftTO;
import com.cloud.configuration.Config;
+import com.cloud.storage.RegisterVolumePayload;
+import com.cloud.storage.Storage.ImageFormat;
+import com.cloud.storage.download.DownloadMonitor;
import com.cloud.utils.SwiftUtil;
-import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
-import org.apache.log4j.Logger;
-
+import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
@@ -36,21 +35,22 @@
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
import org.apache.cloudstack.engine.subsystem.api.storage.StorageCacheManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
+import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.storage.command.DownloadCommand;
import org.apache.cloudstack.storage.datastore.db.ImageStoreDetailsDao;
import org.apache.cloudstack.storage.image.BaseImageStoreDriverImpl;
import org.apache.cloudstack.storage.image.store.ImageStoreImpl;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
+import org.apache.cloudstack.storage.to.VolumeObjectTO;
+import org.apache.log4j.Logger;
-import com.cloud.agent.api.storage.DownloadAnswer;
-import com.cloud.agent.api.to.DataObjectType;
-import com.cloud.agent.api.to.DataStoreTO;
-import com.cloud.agent.api.to.SwiftTO;
-import com.cloud.storage.Storage.ImageFormat;
-import com.cloud.template.VirtualMachineTemplate;
-import com.cloud.utils.exception.CloudRuntimeException;
+import javax.inject.Inject;
+import java.net.URL;
+import java.util.Map;
+import java.util.UUID;
public class SwiftImageStoreDriverImpl extends BaseImageStoreDriverImpl {
private static final Logger s_logger = Logger.getLogger(SwiftImageStoreDriverImpl.class);
@@ -63,6 +63,8 @@ public class SwiftImageStoreDriverImpl extends BaseImageStoreDriverImpl {
StorageCacheManager cacheManager;
@Inject
ConfigurationDao _configDao;
+ @Inject
+ private DownloadMonitor _downloadMonitor;
@Override
public DataStoreTO getStoreTO(DataStore store) {
@@ -100,12 +102,28 @@ public String createEntityExtractUrl(DataStore store, String installPath, ImageF
@Override
public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback callback) {
- Long maxTemplateSizeInBytes = getMaxTemplateSizeInBytes();
- VirtualMachineTemplate tmpl = _templateDao.findById(data.getId());
+
+ DownloadCommand downloadCommand = null;
+ if (data.getType() == DataObjectType.TEMPLATE) {
+ Long maxTemplateSizeInBytes = getMaxTemplateSizeInBytes();
+ downloadCommand = new DownloadCommand((TemplateObjectTO) (data.getTO()), maxTemplateSizeInBytes);
+ }else if (data.getType() == DataObjectType.VOLUME){
+ Long maxDownloadSizeInBytes = getMaxVolumeSizeInBytes();
+ VolumeInfo volumeInfo = (VolumeInfo) data;
+ RegisterVolumePayload payload = (RegisterVolumePayload) volumeInfo.getpayload();
+ ImageFormat format = ImageFormat.valueOf(payload.getFormat());
+ downloadCommand = new DownloadCommand((VolumeObjectTO) (data.getTO()), maxDownloadSizeInBytes, payload.getChecksum(), payload.getUrl(), format);
+ }
+
+ if (downloadCommand == null){
+ String errMsg = "Unable to build download command, DataObject is of neither VOLUME or TEMPLATE type";
+ s_logger.error(errMsg);
+ throw new CloudRuntimeException(errMsg);
+ }
+
DataStore cacheStore = cacheManager.getCacheStorage(dataStore.getScope());
- DownloadCommand dcmd = new DownloadCommand((TemplateObjectTO)(data.getTO()), maxTemplateSizeInBytes);
- dcmd.setCacheStore(cacheStore.getTO());
- dcmd.setProxy(getHttpProxy());
+ downloadCommand.setCacheStore(cacheStore.getTO());
+ downloadCommand.setProxy(getHttpProxy());
EndPoint ep = _epSelector.select(data);
if (ep == null) {
@@ -120,11 +138,11 @@ public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCal
if (data.getType() == DataObjectType.TEMPLATE) {
caller.setCallback(caller.getTarget().createTemplateAsyncCallback(null, null));
+ ep.sendMessageAsync(downloadCommand, caller);
} else if (data.getType() == DataObjectType.VOLUME) {
caller.setCallback(caller.getTarget().createVolumeAsyncCallback(null, null));
+ _downloadMonitor.downloadVolumeToStorage(data,caller);
}
- ep.sendMessageAsync(dcmd, caller);
-
}
}
diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java
index 703f9a1e4e43..14a61e028577 100644
--- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java
+++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java
@@ -17,6 +17,8 @@
package org.apache.cloudstack.storage.datastore.driver;
+import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
+
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.HashMap;
@@ -37,6 +39,8 @@
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
+import org.apache.cloudstack.framework.config.ConfigKey;
+import org.apache.cloudstack.framework.config.Configurable;
import org.apache.cloudstack.storage.command.CommandResult;
import org.apache.cloudstack.storage.command.CreateObjectAnswer;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
@@ -61,7 +65,6 @@
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
-import com.cloud.hypervisor.Hypervisor;
import com.cloud.storage.ResizeVolumePayload;
import com.cloud.storage.Snapshot;
import com.cloud.storage.SnapshotVO;
@@ -82,47 +85,38 @@
import com.google.common.base.Preconditions;
import com.google.common.primitives.Ints;
-import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
-
-public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
+public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver, Configurable {
private static final Logger s_logger = Logger.getLogger(DateraPrimaryDataStoreDriver.class);
private static final int s_lockTimeInSeconds = 300;
private static final int s_lowestHypervisorSnapshotReserve = 10;
-
- @Inject
- private ClusterDao _clusterDao;
- @Inject
- private ClusterDetailsDao _clusterDetailsDao;
- @Inject
- private HostDao _hostDao;
- @Inject
- private SnapshotDao _snapshotDao;
- @Inject
- private SnapshotDetailsDao _snapshotDetailsDao;
- @Inject
- private PrimaryDataStoreDao _storagePoolDao;
- @Inject
- private StoragePoolDetailsDao _storagePoolDetailsDao;
- @Inject
- private VolumeDao _volumeDao;
- @Inject
- private VMTemplatePoolDao tmpltPoolDao;
- @Inject
- private PrimaryDataStoreDao storagePoolDao;
- @Inject
- private VolumeDetailsDao volumeDetailsDao;
- @Inject
- private SnapshotDetailsDao snapshotDetailsDao;
- @Inject
- private VolumeDataFactory volumeDataFactory;
+ private static final int KBPS_MULTIPLIER = 4; //4k blocks
+ private static final String SEPERATOR_SNAPSHOT = "::";
+
+ @Inject private ClusterDao _clusterDao;
+ @Inject private ClusterDetailsDao _clusterDetailsDao;
+ @Inject private HostDao _hostDao;
+ @Inject private SnapshotDao _snapshotDao;
+ @Inject private SnapshotDetailsDao _snapshotDetailsDao;
+ @Inject private PrimaryDataStoreDao _storagePoolDao;
+ @Inject private StoragePoolDetailsDao _storagePoolDetailsDao;
+ @Inject private VolumeDao _volumeDao;
+ @Inject private VMTemplatePoolDao tmpltPoolDao;
+ @Inject private PrimaryDataStoreDao storagePoolDao;
+ @Inject private VolumeDetailsDao volumeDetailsDao;
+ @Inject private SnapshotDetailsDao snapshotDetailsDao;
+ @Inject private VolumeDataFactory volumeDataFactory;
+
+ private static final ConfigKey MaxIopsScalingFactor = new ConfigKey("Advanced", Float.class, "storage.managedstorage.datera.iops.factor", "1.0",
+ "The amount by which to scale the bandwidth when applying Datera.", true, ConfigKey.Scope.Zone);
/**
- * Returns a map which lists the capabilities that this storage device can
- * offer. Currently supported STORAGE_SYSTEM_SNAPSHOT: Has the ability to create
- * native snapshots CAN_CREATE_VOLUME_FROM_SNAPSHOT: Can create new volumes from
- * native snapshots. CAN_CREATE_VOLUME_FROM_VOLUME: Device can clone volumes.
- * This is used for template caching.
+ * Returns a map which lists the capabilities that this storage device can offer. Currently supported
+ * STORAGE_SYSTEM_SNAPSHOT: Has the ability to create native snapshots
+ * CAN_CREATE_VOLUME_FROM_SNAPSHOT: Can create new volumes from native snapshots.
+ * CAN_CREATE_VOLUME_FROM_VOLUME: Device can clone volumes. This is used for template caching.
+ *
* @return a Map which determines the capabilities of the driver
+ *
*/
@Override
public Map getCapabilities() {
@@ -131,7 +125,6 @@ public Map getCapabilities() {
mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.TRUE.toString());
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString());
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString());
- mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString());
return mapCapabilities;
}
@@ -154,7 +147,7 @@ public ChapInfo getChapInfo(DataObject dataObject) {
/**
* Fetches an App Instance from Datera, throws exception if it doesn't find it
- * @param conn Datera Connection
+ * @param conn Datera Connection
* @param appInstanceName Name of the Aplication Instance
* @return application instance
*/
@@ -168,7 +161,7 @@ public DateraObject.AppInstance getDateraAppInstance(DateraObject.DateraConnecti
throw new CloudRuntimeException(dateraError.getMessage());
}
- if (appInstance == null) {
+ if (appInstance == null){
throw new CloudRuntimeException("App instance not found " + appInstanceName);
}
@@ -176,22 +169,19 @@ public DateraObject.AppInstance getDateraAppInstance(DateraObject.DateraConnecti
}
/**
- * Given a {@code dataObject} this function makes sure that the {@code host} has
- * access to it. All hosts which are in the same cluster are added to an
- * initiator group and that group is assigned to the appInstance. If an
- * initiator group does not exist, it is created. If the host does not have an
- * initiator registered on dataera, that is created and added to the initiator
- * group
+ * Given a {@code dataObject} this function makes sure that the {@code host} has access to it.
+ * All hosts which are in the same cluster are added to an initiator group and that group is assigned
+ * to the appInstance. If an initiator group does not exist, it is created. If the host does not have
+ * an initiator registered on dataera, that is created and added to the initiator group
+ *
* @param dataObject The volume that needs to be accessed
- * @param host The host which needs to access the volume
- * @param dataStore Identifies which primary storage the volume resides in
+ * @param host The host which needs to access the volume
+ * @param dataStore Identifies which primary storage the volume resides in
* @return True if access is granted. False otherwise
*/
@Override
public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) {
- s_logger.debug("grantAccess() called");
-
Preconditions.checkArgument(dataObject != null, "'dataObject' should not be 'null'");
Preconditions.checkArgument(host != null, "'host' should not be 'null'");
Preconditions.checkArgument(dataStore != null, "'dataStore' should not be 'null'");
@@ -223,20 +213,18 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore
List hosts = _hostDao.findByClusterId(clusterId);
if (!DateraUtil.hostsSupport_iScsi(hosts)) {
- s_logger.debug("hostsSupport_iScsi() :Host does NOT support iscsci");
return false;
}
// We don't have the initiator group, create one
- String initiatorGroupName = DateraUtil.INITIATOR_GROUP_PREFIX + "-" + cluster.getUuid();
- s_logger.debug("Will use initiator group " + String.valueOf(initiatorGroupName));
+ String initiatorGroupName = DateraUtil.INITIATOR_GROUP_PREFIX + "-" + cluster.getUuid();
initiatorGroup = DateraUtil.getInitiatorGroup(conn, initiatorGroupName);
if (initiatorGroup == null) {
- s_logger.debug("create initiator group " + String.valueOf(initiatorGroupName));
+
initiatorGroup = DateraUtil.createInitiatorGroup(conn, initiatorGroupName);
- // Save it to the DB
+ //Save it to the DB
ClusterDetailsVO clusterDetail = new ClusterDetailsVO(clusterId, initiatorGroupKey, initiatorGroupName);
_clusterDetailsDao.persist(clusterDetail);
@@ -244,36 +232,20 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore
initiatorGroup = DateraUtil.getInitiatorGroup(conn, initiatorGroupName);
}
- Preconditions.checkNotNull(initiatorGroup, "initiatorGroup should not be Null");
+ Preconditions.checkNotNull(initiatorGroup);
- // We create an initiator for every host in this cluster and add it to the
- // initator group
+ // We create an initiator for every host in this cluster and add it to the initator group
addClusterHostsToInitiatorGroup(conn, clusterId, initiatorGroupName);
- // assgin the initiatorgroup to appInstance
-
+ //assgin the initiatorgroup to appInstance
if (!isInitiatorGroupAssignedToAppInstance(conn, initiatorGroup, appInstance)) {
DateraUtil.assignGroupToAppInstance(conn, initiatorGroupName, appInstanceName);
- int retries = DateraUtil.DEFAULT_RETRIES;
- while (!isInitiatorGroupAssignedToAppInstance(conn, initiatorGroup, appInstance) && retries > 0) {
- Thread.sleep(DateraUtil.POLL_TIMEOUT_MS);
- retries--;
- }
-
- Preconditions.checkArgument(isInitiatorGroupAssignedToAppInstance(conn, initiatorGroup, appInstance),
- "Initgroup is not assigned to appinstance");
- // FIXME: Sleep anyways
- s_logger.debug("sleep " + String.valueOf(DateraUtil.POLL_TIMEOUT_MS) + " msec for ACL to be applied");
-
- Thread.sleep(DateraUtil.POLL_TIMEOUT_MS); // ms
- s_logger.debug(
- "Initiator group " + String.valueOf(initiatorGroupName) + " is assigned to " + appInstanceName);
-
+ DateraUtil.pollAppInstanceAvailable(conn, appInstanceName);
}
return true;
- } catch (DateraObject.DateraError | UnsupportedEncodingException | InterruptedException dateraError) {
- s_logger.warn(dateraError.getMessage(), dateraError);
+ } catch (DateraObject.DateraError | UnsupportedEncodingException dateraError) {
+ s_logger.warn(dateraError.getMessage(), dateraError );
throw new CloudRuntimeException("Unable to grant access to volume " + dateraError.getMessage());
} finally {
lock.unlock();
@@ -281,31 +253,28 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore
}
}
- private void addClusterHostsToInitiatorGroup(DateraObject.DateraConnection conn, long clusterId,
- String initiatorGroupName) throws DateraObject.DateraError, UnsupportedEncodingException {
+ private void addClusterHostsToInitiatorGroup(DateraObject.DateraConnection conn, long clusterId, String initiatorGroupName) throws DateraObject.DateraError, UnsupportedEncodingException {
List clusterHosts = _hostDao.findByClusterId(clusterId);
DateraObject.InitiatorGroup initiatorGroup = DateraUtil.getInitiatorGroup(conn, initiatorGroupName);
for (HostVO host : clusterHosts) {
- // check if we have an initiator for the host
+ //check if we have an initiator for the host
String iqn = host.getStorageUrl();
DateraObject.Initiator initiator = DateraUtil.getInitiator(conn, iqn);
- String initiatorName = "";
- // initiator can not be found, create it
+
+ //initiator not found, create it
if (initiator == null) {
- initiatorName = DateraUtil.INITIATOR_PREFIX + "-" + host.getUuid();
+ String initiatorName = DateraUtil.INITIATOR_PREFIX + "-" + host.getUuid();
initiator = DateraUtil.createInitiator(conn, initiatorName, iqn);
- s_logger.debug("Initiator " + initiatorName + " with " + iqn + "added ");
-
}
+
Preconditions.checkNotNull(initiator);
if (!DateraUtil.isInitiatorPresentInGroup(initiator, initiatorGroup)) {
- s_logger.debug("Add " + initiatorName + " to " + initiatorGroupName);
DateraUtil.addInitiatorToGroup(conn, initiator.getPath(), initiatorGroupName);
}
}
@@ -313,23 +282,21 @@ private void addClusterHostsToInitiatorGroup(DateraObject.DateraConnection conn,
/**
* Checks if an initiator group is assigned to an appInstance
- * @param conn Datera connection
+ * @param conn Datera connection
* @param initiatorGroup Initiator group to check
- * @param appInstance App Instance
- * @return True if initiator group is assigned to app instnace, false otherwise
+ * @param appInstance App Instance
+ * @return True if initiator group is assigned to app instnace, false otherwise
+ *
* @throws DateraObject.DateraError
*/
- private boolean isInitiatorGroupAssignedToAppInstance(DateraObject.DateraConnection conn,
- DateraObject.InitiatorGroup initiatorGroup, DateraObject.AppInstance appInstance)
- throws DateraObject.DateraError {
+ private boolean isInitiatorGroupAssignedToAppInstance(DateraObject.DateraConnection conn, DateraObject.InitiatorGroup initiatorGroup, DateraObject.AppInstance appInstance) throws DateraObject.DateraError {
- Map assignedInitiatorGroups = DateraUtil
- .getAppInstanceInitiatorGroups(conn, appInstance.getName());
+ List assignedInitiatorGroups = DateraUtil.getAppInstanceInitiatorGroups(conn, appInstance.getName());
Preconditions.checkNotNull(assignedInitiatorGroups);
- for (DateraObject.InitiatorGroup ig : assignedInitiatorGroups.values()) {
+ for (DateraObject.InitiatorGroup ig : assignedInitiatorGroups) {
if (initiatorGroup.getName().equals(ig.getName())) {
return true;
}
@@ -338,16 +305,17 @@ private boolean isInitiatorGroupAssignedToAppInstance(DateraObject.DateraConnect
return false;
}
+
/**
- * Removes access of the initiator group to which {@code host} belongs from the
- * appInstance given by {@code dataObject}
+ * Removes access of the initiator group to which {@code host} belongs from the appInstance
+ * given by {@code dataObject}
+ *
* @param dataObject Datera volume
- * @param host the host which is currently having access to the volume
- * @param dataStore The primary store to which volume belongs
+ * @param host the host which is currently having access to the volume
+ * @param dataStore The primary store to which volume belongs
*/
@Override
public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) {
- s_logger.debug("revokeAccess() called");
Preconditions.checkArgument(dataObject != null, "'dataObject' should not be 'null'");
Preconditions.checkArgument(host != null, "'host' should not be 'null'");
@@ -367,7 +335,7 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore)
try {
- String initiatorGroupName = DateraUtil.INITIATOR_GROUP_PREFIX + "-" + cluster.getUuid();
+ String initiatorGroupName = DateraUtil.INITIATOR_GROUP_PREFIX + "-" + cluster.getUuid();
DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePoolId, _storagePoolDetailsDao);
@@ -375,16 +343,16 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore)
DateraObject.InitiatorGroup initiatorGroup = DateraUtil.getInitiatorGroup(conn, initiatorGroupName);
if (initiatorGroup != null && appInstance != null) {
-
DateraUtil.removeGroupFromAppInstance(conn, initiatorGroupName, appInstanceName);
- int retries = DateraUtil.DEFAULT_RETRIES;
- while (isInitiatorGroupAssignedToAppInstance(conn, initiatorGroup, appInstance) && retries > 0) {
- Thread.sleep(DateraUtil.POLL_TIMEOUT_MS);
- retries--;
- }
+ DateraUtil.pollAppInstanceAvailable(conn, appInstanceName);
+ }
+
+ if (dataObject.getType().equals(DataObjectType.TEMPLATE)) {
+ //Having the template offline reduces the time taken to clone
+ DateraUtil.updateAppInstanceAdminState(conn, appInstanceName, DateraObject.AppState.OFFLINE);
}
- } catch (DateraObject.DateraError | UnsupportedEncodingException | InterruptedException dateraError) {
+ } catch (DateraObject.DateraError | UnsupportedEncodingException dateraError) {
String errMesg = "Error revoking access for Volume : " + dataObject.getId();
s_logger.warn(errMesg, dateraError);
throw new CloudRuntimeException(errMesg);
@@ -395,10 +363,11 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore)
}
/**
- * Returns the size of template on this primary storage. If we already have a
- * template on this storage, we return 0
+ * Returns the size of template on this primary storage. If we already have a template on this
+ * storage, we return 0
+ *
* @param templateInfo Information about the template
- * @param storagePool The pool where we want to store the template
+ * @param storagePool The pool where we want to store the template
* @return Size in bytes
*/
@Override
@@ -409,72 +378,53 @@ public long getBytesRequiredForTemplate(TemplateInfo templateInfo, StoragePool s
if (lstTemplatePoolRefs != null) {
for (VMTemplateStoragePoolVO templatePoolRef : lstTemplatePoolRefs) {
if (templatePoolRef.getTemplateId() == templateInfo.getId()) {
- // This indicates that we already have this template stored on this primary
- // storage, so
+ // This indicates that we already have this template stored on this primary storage, so
// we do not require additional space.
return 0;
}
}
}
- // This indicates that we do not have a copy of this template on this primary
- // storage, so
- // we need to take it into consideration from a space standpoint (ex. when a new
- // VM is spun
+ // This indicates that we do not have a copy of this template on this primary storage, so
+ // we need to take it into consideration from a space standpoint (ex. when a new VM is spun
// up and wants to use this particular template for its root disk).
return getDataObjectSizeIncludingHypervisorSnapshotReserve(templateInfo, storagePool);
}
- /**
- * Returns Datera appInstanceName
- * @param dataObject volume or template
- * @return Derived Datera appInstanceName based on dataObject, Eg.
- * CS-V-ROOT-123-6db58e3f-14c4-45ac-95e9-60e3a00ce7d0
- */
private String getAppInstanceName(DataObject dataObject) {
-
ArrayList name = new ArrayList<>();
- name.add(DateraUtil.APPINSTANCE_PREFIX); // CS
+ name.add(DateraUtil.APPINSTANCE_PREFIX);
+ name.add(dataObject.getType().toString());
+ name.add(dataObject.getUuid());
- String dataObjectTypeString = dataObject.getType().name(); // TEMPLATE, VOLUME, SNAPSHOT
- String dataObjectTypeBrief;
- dataObjectTypeBrief = StringUtils.substring(dataObjectTypeString, 0, 1);
- name.add(dataObjectTypeBrief); // T, V
-
- switch (dataObject.getType()) {
- case TEMPLATE:
- TemplateInfo templateInfo = (TemplateInfo) dataObject;
-
- name.add(dataObject.getUuid()); // 6db58e3f-14c4-45ac-95e9-60e3a00ce7d0
-
- // For cached templates, we will also add the storage pool ID
+ if (dataObject.getType() == DataObjectType.TEMPLATE){
+ //For cached templates, we will also add the pool ID
name.add(String.valueOf(dataObject.getDataStore().getId()));
- break;
-
- case VOLUME:
- VolumeInfo volumeInfo = (VolumeInfo) dataObject;
- String volumeName = volumeInfo.getName();
- name.add(String.valueOf(volumeName));
- name.add(dataObject.getUuid()); // 6db58e3f-14c4-45ac-95e9-60e3a00ce7d0
-
- VolumeVO volumeVo = _volumeDao.findById(dataObject.getId());
- s_logger.debug("volumeName : " + volumeName);
- break;
+ }
- case SNAPSHOT:
- name.add(dataObject.getUuid()); // 6db58e3f-14c4-45ac-95e9-60e3a00ce7d0
+ return StringUtils.join("-", name.toArray());
+ }
+ private String getDescription(DataObject dataObject) {
+ String desc = "CSAccountId-";
+ switch (dataObject.getType()) {
+ case VOLUME:
+ desc += Long.toString(((VolumeInfo) (dataObject)).getAccountId());
+ break;
+ case TEMPLATE:
+ desc+= Long.toString(((TemplateInfo)(dataObject)).getAccountId());
+ break;
+ case SNAPSHOT:
+ desc+= Long.toString(((SnapshotInfo)(dataObject)).getAccountId());
+ break;
}
-
- String appInstanceName = StringUtils.join("-", name.toArray());
- return StringUtils.substring(appInstanceName, 0, DateraUtil.APPINSTANCE_MAX_LENTH);
+ return desc;
}
// Not being used right now as Datera doesn't support min IOPS
private long getDefaultMinIops(long storagePoolId) {
- StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId,
- DateraUtil.CLUSTER_DEFAULT_MIN_IOPS);
+ StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, DateraUtil.CLUSTER_DEFAULT_MIN_IOPS);
String clusterDefaultMinIops = storagePoolDetail.getValue();
@@ -484,12 +434,10 @@ private long getDefaultMinIops(long storagePoolId) {
/**
* If user doesn't specify the IOPS, use this IOPS
* @param storagePoolId the primary storage
- * @return default max IOPS for this storage configured when the storage is
- * added
+ * @return default max IOPS for this storage configured when the storage is added
*/
private long getDefaultMaxIops(long storagePoolId) {
- StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId,
- DateraUtil.CLUSTER_DEFAULT_MAX_IOPS);
+ StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, DateraUtil.CLUSTER_DEFAULT_MAX_IOPS);
String clusterDefaultMaxIops = storagePoolDetail.getValue();
@@ -497,14 +445,12 @@ private long getDefaultMaxIops(long storagePoolId) {
}
/**
- * Return the default number of replicas to use (configured at storage addition
- * time)
+ * Return the default number of replicas to use (configured at storage addition time)
* @param storagePoolId the primary storage
* @return the number of replicas to use
*/
private int getNumReplicas(long storagePoolId) {
- StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId,
- DateraUtil.NUM_REPLICAS);
+ StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, DateraUtil.NUM_REPLICAS);
String clusterDefaultReplicas = storagePoolDetail.getValue();
@@ -512,38 +458,6 @@ private int getNumReplicas(long storagePoolId) {
}
- /**
- * Return the default volume placement to use (configured at storage addition
- * time)
- * @param storagePoolId the primary storage
- * @return volume placement string
- */
- private String getVolPlacement(long storagePoolId) {
- StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId,
- DateraUtil.VOL_PLACEMENT);
-
- String clusterDefaultVolPlacement = storagePoolDetail.getValue();
-
- return clusterDefaultVolPlacement;
-
- }
-
- /**
- * Return the default IP pool name to use (configured at storage addition time)
- * @param storagePoolId the primary storage
- * @return IP pool name
- */
- private String getIpPool(long storagePoolId) {
- String ipPool = DateraUtil.DEFAULT_IP_POOL;
- StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, DateraUtil.IP_POOL);
- if (storagePoolDetail != null) {
- ipPool = storagePoolDetail.getValue();
- }
- s_logger.debug("ipPool: " + ipPool);
- return ipPool;
-
- }
-
@Override
public long getUsedBytes(StoragePool storagePool) {
return getUsedBytes(storagePool, Long.MIN_VALUE);
@@ -551,14 +465,15 @@ public long getUsedBytes(StoragePool storagePool) {
/**
* Get the total space used by all the entities on the storage.
+ *
* Total space = volume space + snapshot space + template space
- * @param storagePool Primary storage
- * @param volumeIdToIgnore Ignore this volume (used when we delete a volume and
- * want to update the space)
+ *
+ * @param storagePool Primary storage
+ * @param volumeIdToIgnore Ignore this volume (used when we delete a volume and want to update the space)
* @return size in bytes
*/
private long getUsedBytes(StoragePool storagePool, long volumeIdToIgnore) {
- long usedSpaceBytes = 0;
+ long usedSpace = 0;
List lstVolumes = _volumeDao.findByPoolId(storagePool.getId(), null);
@@ -571,18 +486,20 @@ private long getUsedBytes(StoragePool storagePool, long volumeIdToIgnore) {
VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volume.getId(), DateraUtil.VOLUME_SIZE);
if (volumeDetail != null && volumeDetail.getValue() != null) {
- long volumeSizeGib = Long.parseLong(volumeDetail.getValue());
- long volumeSizeBytes = DateraUtil.gibToBytes((int) (volumeSizeGib));
- usedSpaceBytes += volumeSizeBytes;
- } else {
- DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePool.getId(),
- _storagePoolDetailsDao);
+ long volumeSize = Long.parseLong(volumeDetail.getValue());
+
+ usedSpace += volumeSize;
+ }
+ else {
try {
+ DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePool.getId(), _storagePoolDetailsDao);
String appInstanceName = getAppInstanceName(volumeDataFactory.getVolume(volume.getId()));
DateraObject.AppInstance appInstance = DateraUtil.getAppInstance(conn, appInstanceName);
if (appInstance != null) {
- usedSpaceBytes += DateraUtil.gibToBytes(appInstance.getSize());
+ long size = DateraUtil.gbToBytes(appInstance.getSize());
+ usedSpace += size;
+ updateVolumeDetails(volume.getId(), size);
}
} catch (DateraObject.DateraError dateraError) {
String errMesg = "Error getting used bytes for storage pool : " + storagePool.getId();
@@ -593,22 +510,21 @@ private long getUsedBytes(StoragePool storagePool, long volumeIdToIgnore) {
}
}
+
List lstSnapshots = _snapshotDao.listAll();
if (lstSnapshots != null) {
for (SnapshotVO snapshot : lstSnapshots) {
- SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(snapshot.getId(),
- DateraUtil.STORAGE_POOL_ID);
+ SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(snapshot.getId(), DateraUtil.STORAGE_POOL_ID);
// if this snapshot belongs to the storagePool that was passed in
- if (snapshotDetails != null && snapshotDetails.getValue() != null
- && Long.parseLong(snapshotDetails.getValue()) == storagePool.getId()) {
+ if (snapshotDetails != null && snapshotDetails.getValue() != null && Long.parseLong(snapshotDetails.getValue()) == storagePool.getId()) {
snapshotDetails = _snapshotDetailsDao.findDetail(snapshot.getId(), DateraUtil.VOLUME_SIZE);
if (snapshotDetails != null && snapshotDetails.getValue() != null) {
long snapshotSize = Long.parseLong(snapshotDetails.getValue());
- usedSpaceBytes += snapshotSize;
+ usedSpace += snapshotSize;
}
}
}
@@ -618,17 +534,17 @@ private long getUsedBytes(StoragePool storagePool, long volumeIdToIgnore) {
if (lstTemplatePoolRefs != null) {
for (VMTemplateStoragePoolVO templatePoolRef : lstTemplatePoolRefs) {
- usedSpaceBytes += templatePoolRef.getTemplateSize();
+ usedSpace += templatePoolRef.getTemplateSize();
}
}
- s_logger.debug("usedSpaceBytes: " + toHumanReadableSize(usedSpaceBytes));
+ s_logger.debug("usedSpaceBytes: " + toHumanReadableSize(usedSpace));
- return usedSpaceBytes;
+ return usedSpace;
}
/**
- * Get total IOPS used by the storage array. Since Datera doesn't support min
- * IOPS, return zero for now
+ * Get total IOPS used by the storage array. Since Datera doesn't support min IOPS,
+ * return zero for now
* @param storagePool primary storage
* @return total IOPS used
*/
@@ -639,10 +555,9 @@ public long getUsedIops(StoragePool storagePool) {
}
/**
- * Rreturns the size of the volume including the hypervisor snapshot reserve
- * (HSR).
+ * Rreturns the size of the volume including the hypervisor snapshot reserve (HSR).
* @param dataObject Volume or a Template
- * @param pool primary storage where it resides
+ * @param pool primary storage where it resides
* @return size in bytes
*/
@@ -652,41 +567,33 @@ public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataO
long volumeSize = 0;
switch (dataObject.getType()) {
- case VOLUME:
+ case VOLUME:
+ VolumeInfo volume = (VolumeInfo) dataObject;
+ volumeSize = volume.getSize();
+ Integer hypervisorSnapshotReserve = volume.getHypervisorSnapshotReserve();
- VolumeInfo volume = (VolumeInfo) dataObject;
- volumeSize = volume.getSize();
- Integer hypervisorSnapshotReserve = volume.getHypervisorSnapshotReserve();
+ if (hypervisorSnapshotReserve != null) {
+ hypervisorSnapshotReserve = Math.max(hypervisorSnapshotReserve, s_lowestHypervisorSnapshotReserve);
+ } else {
+ hypervisorSnapshotReserve = s_lowestHypervisorSnapshotReserve;
+ }
- if (hypervisorSnapshotReserve != null) {
- hypervisorSnapshotReserve = Math.max(hypervisorSnapshotReserve, s_lowestHypervisorSnapshotReserve);
volumeSize += volumeSize * (hypervisorSnapshotReserve / 100f);
- }
- s_logger.debug("Volume size: " + toHumanReadableSize(volumeSize));
- break;
+ break;
- case TEMPLATE:
-
- TemplateInfo templateInfo = (TemplateInfo) dataObject;
- long templateSize = templateInfo.getSize() != null ? templateInfo.getSize() : 0;
-
- if (templateInfo.getHypervisorType() == Hypervisor.HypervisorType.KVM) {
- volumeSize = templateSize;
- } else {
- volumeSize = (long) (templateSize + templateSize * (s_lowestHypervisorSnapshotReserve / 100f));
- }
- s_logger.debug("Template volume size:" + toHumanReadableSize(volumeSize));
+ case TEMPLATE:
- break;
+ TemplateInfo templateInfo = (TemplateInfo)dataObject;
+ volumeSize = (long)(templateInfo.getSize() + templateInfo.getSize() * (s_lowestHypervisorSnapshotReserve / 100f));
+ break;
}
return volumeSize;
}
/**
- * Deletes a volume from Datera. If we are using native snapshots, we first
- * check if the volume is holding a native snapshot, if it does, then we don't
- * delete it from Datera but instead mark it so that when the snapshot is
- * deleted, we delete the volume
+ * Deletes a volume from Datera. If we are using native snapshots, we first check if the volume is holding
+ * a native snapshot, if it does, then we don't delete it from Datera but instead mark it so that when
+ * the snapshot is deleted, we delete the volume
*
* @param volumeInfo The volume which needs to be deleted
* @param storagePoolId Primary storage where volume resides
@@ -694,19 +601,17 @@ public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataO
private void deleteVolume(VolumeInfo volumeInfo, long storagePoolId) {
DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePoolId, _storagePoolDetailsDao);
- Long volumeStoragePoolId = volumeInfo.getPoolId();
+ Long volumeStoragePoolId = volumeInfo.getPoolId();
long volumeId = volumeInfo.getId();
if (volumeStoragePoolId == null) {
- return; // this volume was never assigned to a storage pool, so no SAN volume should
- // exist for it
+ return; // this volume was never assigned to a storage pool, so no SAN volume should exist for it
}
try {
- // If there are native snapshots on this appInstance, we want to keep it on
- // Datera
- // but remove it from cloudstack
+ //If there are native snapshots on this appInstance, we want to keep it on Datera
+ //but remove it from cloudstack
if (shouldDeleteVolume(volumeId, null)) {
DateraUtil.deleteAppInstance(conn, getAppInstanceName(volumeInfo));
}
@@ -727,28 +632,30 @@ private void deleteVolume(VolumeInfo volumeInfo, long storagePoolId) {
}
/**
- * given a {@code volumeInfo} and {@code storagePoolId}, creates an App instance
- * on Datera. Updates the usedBytes count in the DB for this storage pool. A
- * volume could be created in 3 ways
+ * given a {@code volumeInfo} and {@code storagePoolId}, creates an App instance on Datera.
+ * Updates the usedBytes count in the DB for this storage pool. A volume could be created in
+ * 3 ways
*
- * 1) A fresh volume with no data: New volume created from Cloudstack
+ * 1) A fresh volume with no data:
+ * New volume created from Cloudstack
*
- * 2) A volume created from a native snapshot. This is used when creating volume
- * from snapshot and native snapshots are supported
+ * 2) A volume created from a native snapshot.
+ * This is used when creating volume from
+ * snapshot and native snapshots are supported
*
- * 3) A volume created by cloning from another volume: This is used when
- * creating volume from template or volume from snapshot stored as another
- * volume when native snapshots are not supported by the hypervisor
+ * 3) A volume created by cloning from another volume:
+ * This is used when creating volume from template or
+ * volume from snapshot stored as another volume when
+ * native snapshots are not supported by the hypervisor
*
*
- * @param volumeInfo Info about the volume like size,QoS
+ * @param volumeInfo Info about the volume like size,QoS
* @param storagePoolId The pool to create the vo
- * @return returns the IQN path which will be used by storage substem
+ * @return returns the IQN path which will be used by storage substem
*
*/
private String createVolume(VolumeInfo volumeInfo, long storagePoolId) {
- s_logger.debug("createVolume() called");
Preconditions.checkArgument(volumeInfo != null, "volumeInfo cannot be null");
Preconditions.checkArgument(storagePoolId > 0, "storagePoolId should be > 0");
@@ -761,79 +668,68 @@ private String createVolume(VolumeInfo volumeInfo, long storagePoolId) {
long csSnapshotId = getCsIdForCloning(volumeInfo.getId(), "cloneOfSnapshot");
long csTemplateId = getCsIdForCloning(volumeInfo.getId(), "cloneOfTemplate");
- s_logger.debug("csTemplateId is " + String.valueOf(csTemplateId));
try {
if (csSnapshotId > 0) {
- // creating volume from snapshot. The snapshot could either be a native snapshot
- // or another volume.
- s_logger.debug("Creating volume from snapshot ");
+ //creating volume from snapshot. The snapshot could either be a native snapshot
+ //or another volume.
appInstance = createDateraClone(conn, csSnapshotId, volumeInfo, storagePoolId, DataObjectType.SNAPSHOT);
} else if (csTemplateId > 0) {
// create volume from template. Invoked when creating new ROOT volume
- s_logger.debug("Creating volume from template ");
-
appInstance = createDateraClone(conn, csTemplateId, volumeInfo, storagePoolId, DataObjectType.TEMPLATE);
String appInstanceName = appInstance.getName();
- long volumeSize = getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeInfo,
- storagePoolDao.findById(storagePoolId));
+ long volumeSize = getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeInfo, storagePoolDao.findById(storagePoolId));
- // expand the template
- if (volumeSize > DateraUtil.gibToBytes(appInstance.getSize())) {
+ //expand the template
+ if (volumeSize > DateraUtil.gbToBytes(appInstance.getSize())) {
// Expand the volume to include HSR depending on the volume's service offering
- DateraUtil.updateAppInstanceSize(conn, appInstanceName, DateraUtil.bytesToGib(volumeSize));
+ DateraUtil.updateAppInstanceSize(conn, appInstanceName, DateraUtil.bytesToGb(volumeSize));
// refresh appInstance
appInstance = DateraUtil.getAppInstance(conn, appInstanceName);
Preconditions.checkNotNull(appInstance);
- // update IOPS
- if ((volumeInfo.getMaxIops() != null) && (volumeInfo.getMaxIops() != appInstance.getTotalIops())) {
+
+ //update IOPS
+ if (volumeInfo.getMaxIops() != null && volumeInfo.getMaxIops() != toIops(appInstance.getTotalBandwidthKiBps())) {
int newIops = Ints.checkedCast(volumeInfo.getMaxIops());
- DateraUtil.updateAppInstanceIops(conn, appInstanceName, newIops);
+ DateraUtil.updateAppInstanceIops(conn, appInstanceName, toBandwidthKiBps(newIops));
}
+
// refresh appInstance
appInstance = DateraUtil.getAppInstance(conn, appInstanceName);
}
} else {
- // Just create a standard volume
- s_logger.debug("Creating a standard volume ");
+ //Just create a standard volume
appInstance = createDateraVolume(conn, volumeInfo, storagePoolId);
}
- } catch (UnsupportedEncodingException | DateraObject.DateraError e) {
+ } catch(UnsupportedEncodingException| DateraObject.DateraError e) {
String errMesg = "Unable to create Volume Error: " + e.getMessage();
s_logger.warn(errMesg);
throw new CloudRuntimeException(errMesg, e);
}
- if (appInstance == null) {
- String errMesg = "appInstance returned null";
- s_logger.warn(errMesg);
- throw new CloudRuntimeException(errMesg);
- }
-
Preconditions.checkNotNull(appInstance);
+
String iqn = appInstance.getIqn();
String iqnPath = DateraUtil.generateIqnPath(iqn);
- VolumeVO volumeVo = _volumeDao.findById(volumeInfo.getId());
- s_logger.debug("volume ID : " + volumeInfo.getId());
- s_logger.debug("volume uuid : " + volumeInfo.getUuid());
+ VolumeVO volume = _volumeDao.findById(volumeInfo.getId());
- volumeVo.set_iScsiName(iqnPath);
- volumeVo.setFolder(appInstance.getName());
- volumeVo.setPoolType(Storage.StoragePoolType.IscsiLUN);
- volumeVo.setPoolId(storagePoolId);
+ volume.set_iScsiName(iqnPath);
+ volume.setFolder(appInstance.getName());
+ volume.setPoolType(Storage.StoragePoolType.IscsiLUN);
+ volume.setPoolId(storagePoolId);
- _volumeDao.update(volumeVo.getId(), volumeVo);
+ _volumeDao.update(volume.getId(), volume);
- updateVolumeDetails(volumeVo.getId(), appInstance.getSize());
+ updateVolumeDetails(volume.getId(), appInstance.getSize());
StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);
@@ -847,116 +743,99 @@ private String createVolume(VolumeInfo volumeInfo, long storagePoolId) {
return appInstance.getIqn();
}
+ private Long toIops(Integer totalBandwidthKiBps) {
+
+ if (totalBandwidthKiBps == null) {
+ return null;
+ }
+
+ return (long) (Math.round(totalBandwidthKiBps / (KBPS_MULTIPLIER * MaxIopsScalingFactor.value())) + 1);
+ }
+
+ private int toBandwidthKiBps(int iops) {
+ return Math.round(iops * KBPS_MULTIPLIER * MaxIopsScalingFactor.value());
+ }
+
/**
- * Helper function to create a Datera app instance. Throws an exception if
- * unsuccessful
- * @param conn Datera connection
+ * Helper function to create a Datera app instance. Throws an exception if unsuccessful
+ * @param conn Datera connection
* @param volumeInfo Volume information
* @param storagePoolId primary storage
- * @return The AppInstance which is created
+ * @return The AppInstance which is created
* @throws UnsupportedEncodingException
- * @throws DateraObject.DateraError
+ * @throws DateraObject.DateraError
*/
- private DateraObject.AppInstance createDateraVolume(DateraObject.DateraConnection conn, VolumeInfo volumeInfo,
- long storagePoolId) throws UnsupportedEncodingException, DateraObject.DateraError {
+ private DateraObject.AppInstance createDateraVolume(DateraObject.DateraConnection conn, VolumeInfo volumeInfo, long storagePoolId) throws UnsupportedEncodingException, DateraObject.DateraError {
- s_logger.debug("createDateraVolume() called");
- DateraObject.AppInstance appInstance = null;
- try {
+ int minIops = Ints.checkedCast(getDefaultMinIops(storagePoolId));
+ int maxIops = Ints.checkedCast(getDefaultMaxIops(storagePoolId));
- int minIops = Ints.checkedCast(
- volumeInfo.getMinIops() != null ? volumeInfo.getMinIops() : getDefaultMinIops(storagePoolId));
-
- // int minIops = Ints.checkedCast(volumeInfo.getMinIops());
-
- int maxIops = Ints.checkedCast(
- volumeInfo.getMaxIops() != null ? volumeInfo.getMaxIops() : getDefaultMaxIops(storagePoolId));
+ if (volumeInfo.getMinIops() != null) {
+ minIops = Ints.checkedCast(volumeInfo.getMinIops());
+ }
- // int maxIops = Ints.checkedCast(volumeInfo.getMaxIops());
+ if (volumeInfo.getMaxIops() != null) {
+ maxIops = Ints.checkedCast(Math.max(minIops, Ints.checkedCast(volumeInfo.getMaxIops())));
+ }
- if (maxIops <= 0) { // We don't care about min iops for now
- maxIops = Ints.checkedCast(getDefaultMaxIops(storagePoolId));
- }
+ int replicas = getNumReplicas(storagePoolId);
- int replicas = getNumReplicas(storagePoolId);
- String volumePlacement = getVolPlacement(storagePoolId);
- String ipPool = getIpPool(storagePoolId);
+ long volumeSizeBytes = getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeInfo, _storagePoolDao.findById(storagePoolId));
+ int volumeSizeGb = DateraUtil.bytesToGb(volumeSizeBytes);
- long volumeSizeBytes = getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeInfo,
- _storagePoolDao.findById(storagePoolId));
- int volumeSizeGib = DateraUtil.bytesToGib(volumeSizeBytes);
- if (volumePlacement == null) {
- appInstance = DateraUtil.createAppInstance(conn, getAppInstanceName(volumeInfo), volumeSizeGib, maxIops,
- replicas);
- } else {
- appInstance = DateraUtil.createAppInstance(conn, getAppInstanceName(volumeInfo), volumeSizeGib, maxIops,
- replicas, volumePlacement, ipPool);
- }
- } catch (Exception ex) {
- s_logger.debug("createDateraVolume() failed");
- s_logger.error(ex);
- }
- return appInstance;
+ return DateraUtil.createAppInstance(conn, getAppInstanceName(volumeInfo), getDescription(volumeInfo), volumeSizeGb, toBandwidthKiBps(maxIops), replicas);
}
/**
- * This function creates a new AppInstance on datera by cloning. We can clone
- * either from a volume snapshot (in case of native snapshots) or clone from
- * another app Instance in case of templates or snapshots as volumes
+ * This function creates a new AppInstance on datera by cloning. We can clone either from a volume snapshot (in case of native snapshots)
+ * or clone from another app Instance in case of templates or snapshots as volumes
*
- * @param conn Datera Connection
- * @param dataObjectId The ID of the clone, used to fetch details on how to
- * clone
- * @param volumeInfo Information about the clone
+ * @param conn Datera Connection
+ * @param dataObjectId The ID of the clone, used to fetch details on how to clone
+ * @param volumeInfo Information about the clone
* @param storagePoolId Primary store to create the clone on
- * @param dataType Type of the source (snapshot or template)
+ * @param dataType Type of the source (snapshot or template)
* @return The cloned AppInstance
*/
- private DateraObject.AppInstance createDateraClone(DateraObject.DateraConnection conn, long dataObjectId,
- VolumeInfo volumeInfo, long storagePoolId, DataObjectType dataType)
- throws UnsupportedEncodingException, DateraObject.DateraError {
-
- s_logger.debug("createDateraClone() called");
+ private DateraObject.AppInstance createDateraClone(DateraObject.DateraConnection conn, long dataObjectId, VolumeInfo volumeInfo, long storagePoolId, DataObjectType dataType) throws UnsupportedEncodingException, DateraObject.DateraError {
String clonedAppInstanceName = getAppInstanceName(volumeInfo);
String baseAppInstanceName = null;
DateraObject.AppInstance appInstance = null;
- String ipPool = getIpPool(storagePoolId);
if (dataType == DataObjectType.SNAPSHOT) {
SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(dataObjectId, DateraUtil.SNAPSHOT_ID);
// Clone volume from a snapshot
if (snapshotDetails != null && snapshotDetails.getValue() != null) {
- s_logger.debug("Clone volume from a snapshot");
- appInstance = DateraUtil.cloneAppInstanceFromSnapshot(conn, clonedAppInstanceName,
- snapshotDetails.getValue(), ipPool);
+ String[] tokens = snapshotDetails.getValue().split(SEPERATOR_SNAPSHOT);
+ Preconditions.checkArgument(tokens.length == 2);
+ String srcAppInstanceName = tokens[0];
+ String snapshotTime = tokens[1];
+
+ appInstance = DateraUtil.cloneAppInstanceFromSnapshot(conn, clonedAppInstanceName, getDescription(volumeInfo), srcAppInstanceName, snapshotTime);
if (volumeInfo.getMaxIops() != null) {
int totalIops = Math.min(DateraUtil.MAX_IOPS, Ints.checkedCast(volumeInfo.getMaxIops()));
- DateraUtil.updateAppInstanceIops(conn, clonedAppInstanceName, totalIops);
+ DateraUtil.updateAppInstanceIops(conn, clonedAppInstanceName, toBandwidthKiBps(totalIops));
appInstance = DateraUtil.getAppInstance(conn, clonedAppInstanceName);
}
- if (appInstance == null) {
- throw new CloudRuntimeException("Unable to create an app instance from snapshot "
- + volumeInfo.getId() + " type " + dataType);
+ if (appInstance == null){
+ throw new CloudRuntimeException("Unable to create an app instance from snapshot " + volumeInfo.getId() + " type " + dataType);
}
return appInstance;
} else {
- // Clone volume from an appInstance
- s_logger.debug("Clone volume from an appInstance");
-
+ //Clone volume from an appInstance
snapshotDetails = snapshotDetailsDao.findDetail(dataObjectId, DateraUtil.VOLUME_ID);
baseAppInstanceName = snapshotDetails.getValue();
}
} else if (dataType == DataObjectType.TEMPLATE) {
- s_logger.debug("Clone volume from a template");
VMTemplateStoragePoolVO templatePoolRef = tmpltPoolDao.findByPoolTemplate(storagePoolId, dataObjectId, null);
@@ -966,68 +845,53 @@ private DateraObject.AppInstance createDateraClone(DateraObject.DateraConnection
}
if (baseAppInstanceName == null) {
- throw new CloudRuntimeException(
- "Unable to find a base volume to clone " + volumeInfo.getId() + " type " + dataType);
+ throw new CloudRuntimeException("Unable to find a base volume to clone " + volumeInfo.getId() + " type " + dataType);
}
- // Clone the app Instance
- appInstance = DateraUtil.cloneAppInstanceFromVolume(conn, clonedAppInstanceName, baseAppInstanceName, ipPool);
+ //Clone the app Instance
+ appInstance = DateraUtil.cloneAppInstanceFromVolume(conn, clonedAppInstanceName, getDescription(volumeInfo), baseAppInstanceName);
- if (dataType == DataObjectType.TEMPLATE) {
- // Only update volume parameters if clone from cached template
- // Update maxIops
- if (volumeInfo.getMaxIops() != null) {
- int totalIops = Math.min(DateraUtil.MAX_IOPS, Ints.checkedCast(volumeInfo.getMaxIops()));
+ if (volumeInfo.getMaxIops() != null) {
- DateraUtil.updateAppInstanceIops(conn, clonedAppInstanceName, totalIops);
- appInstance = DateraUtil.getAppInstance(conn, clonedAppInstanceName);
- }
- // Update placementMode
- String newPlacementMode = getVolPlacement(storagePoolId);
- if (newPlacementMode != null) {
- DateraUtil.updateAppInstancePlacement(conn, clonedAppInstanceName, newPlacementMode);
- }
+ int totalIops = Math.min(DateraUtil.MAX_IOPS, Ints.checkedCast(volumeInfo.getMaxIops()));
+
+ DateraUtil.updateAppInstanceIops(conn, clonedAppInstanceName, toBandwidthKiBps(totalIops));
appInstance = DateraUtil.getAppInstance(conn, clonedAppInstanceName);
}
- if (appInstance == null) {
- throw new CloudRuntimeException("Unable to create an app instance from snapshot or template "
- + volumeInfo.getId() + " type " + dataType);
+
+ if (appInstance == null){
+ throw new CloudRuntimeException("Unable to create an app instance from snapshot " + volumeInfo.getId() + " type " + dataType);
}
- s_logger.debug("Datera - Cloned " + baseAppInstanceName + " to " + clonedAppInstanceName);
return appInstance;
}
/**
- * This function gets invoked when you want to do operations on a snapshot. The
- * snapshot could be a native snapshot and you want to create a template out of
- * it. Since snapshots don't have an IQN, we create a temp volume for this
- * snapshot which will be used to carry out further operations. This function
- * also handles deletion of temp volumes. A flag in the snapshot details table
- * decides which action is performed.
+ * This function gets invoked when you want to do operations on a snapshot.
+ * The snapshot could be a native snapshot and you want to create a template out of it.
+ * Since snapshots don't have an IQN, we create a temp volume for this snapshot
+ * which will be used to carry out further operations. This function also handles deletion of
+ * temp volumes. A flag in the snapshot details table decides which action is performed.
*
- * @param snapshotInfo snapshot on Datera
+ * @param snapshotInfo snapshot on Datera
* @param storagePoolId primary store ID
*/
private void createTempVolume(SnapshotInfo snapshotInfo, long storagePoolId) {
- s_logger.debug("createTempVolume() from snapshot called");
- String ipPool = getIpPool(storagePoolId);
+
long csSnapshotId = snapshotInfo.getId();
SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, DateraUtil.SNAPSHOT_ID);
if (snapshotDetails == null || snapshotDetails.getValue() == null) {
- throw new CloudRuntimeException("'createTempVolume(SnapshotInfo, long)' should not be invoked unless "
- + DateraUtil.SNAPSHOT_ID + " exists.");
+ throw new CloudRuntimeException("'createTempVolume(SnapshotInfo, long)' should not be invoked unless " + DateraUtil.SNAPSHOT_ID + " exists.");
}
DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePoolId, _storagePoolDetailsDao);
snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, "tempVolume");
- if (snapshotDetails != null && snapshotDetails.getValue() != null
- && snapshotDetails.getValue().equalsIgnoreCase("create")) {
+ if (snapshotDetails != null && snapshotDetails.getValue() != null && snapshotDetails.getValue().equalsIgnoreCase("create")) {
snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, DateraUtil.SNAPSHOT_ID);
String snapshotName = snapshotDetails.getValue();
@@ -1036,11 +900,14 @@ private void createTempVolume(SnapshotInfo snapshotInfo, long storagePoolId) {
DateraObject.AppInstance clonedAppInstance;
try {
- clonedAppInstance = DateraUtil.cloneAppInstanceFromSnapshot(conn, clonedAppInstanceName, snapshotName,
- ipPool);
- DateraUtil.pollAppInstanceAvailable(conn, clonedAppInstanceName);
+
+ // split the snapshot name to appInstanceName and the snapshot timestamp
+ String[] tokens = snapshotName.split(SEPERATOR_SNAPSHOT);
+ Preconditions.checkArgument(tokens.length == 2);
+
+ clonedAppInstance = DateraUtil.cloneAppInstanceFromSnapshot(conn, clonedAppInstanceName, getDescription(snapshotInfo), tokens[0], tokens[1]);
} catch (DateraObject.DateraError | UnsupportedEncodingException e) {
- String errMesg = "Unable to create temp volume " + csSnapshotId + "Error:" + e.getMessage();
+ String errMesg = "Unable to create temp volume " + csSnapshotId + "Error:" + e.getMessage();
s_logger.error(errMesg, e);
throw new CloudRuntimeException(errMesg, e);
}
@@ -1048,16 +915,14 @@ private void createTempVolume(SnapshotInfo snapshotInfo, long storagePoolId) {
if (clonedAppInstance == null) {
throw new CloudRuntimeException("Unable to clone volume for snapshot " + snapshotName);
}
- s_logger.debug("Temp app_instance " + clonedAppInstanceName + " created");
+
addTempVolumeToDb(csSnapshotId, clonedAppInstanceName);
handleSnapshotDetails(csSnapshotId, DiskTO.IQN, DateraUtil.generateIqnPath(clonedAppInstance.getIqn()));
- } else if (snapshotDetails != null && snapshotDetails.getValue() != null
- && snapshotDetails.getValue().equalsIgnoreCase("delete")) {
+ } else if (snapshotDetails != null && snapshotDetails.getValue() != null && snapshotDetails.getValue().equalsIgnoreCase("delete")) {
snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, DateraUtil.VOLUME_ID);
try {
- s_logger.debug("Deleting temp app_instance " + snapshotDetails.getValue());
DateraUtil.deleteAppInstance(conn, snapshotDetails.getValue());
} catch (UnsupportedEncodingException | DateraObject.DateraError dateraError) {
String errMesg = "Error deleting temp volume " + dateraError.getMessage();
@@ -1068,29 +933,28 @@ private void createTempVolume(SnapshotInfo snapshotInfo, long storagePoolId) {
snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, DiskTO.IQN);
snapshotDetailsDao.remove(snapshotDetails.getId());
- } else {
+ }
+ else {
throw new CloudRuntimeException("Invalid state in 'createTempVolume(SnapshotInfo, long)'");
}
}
/**
- * This function gets invoked when we want to create a volume that caches the
- * template on the primary storage. This 'template volume' will then be cloned
- * to create new ROOT volumes.
+ * This function gets invoked when we want to create a volume that caches the template on the primary
+ * storage. This 'template volume' will then be cloned to create new ROOT volumes.
*
- * @param templateInfo Information about the template like id, size
+ * @param templateInfo Information about the template like id, size
* @param storagePoolId the primary store to create this volume on
* @return IQN of the template volume
*/
public String createTemplateVolume(TemplateInfo templateInfo, long storagePoolId) {
- s_logger.debug("createTemplateVolume() as cache template called");
verifySufficientBytesForStoragePool(templateInfo, storagePoolId);
DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePoolId, _storagePoolDetailsDao);
- String iqn = null;
- String appInstanceName = null;
+ String iqn;
+
try {
long templateSizeBytes = getDataObjectSizeIncludingHypervisorSnapshotReserve(templateInfo,
@@ -1098,17 +962,12 @@ public String createTemplateVolume(TemplateInfo templateInfo, long storagePoolId
s_logger.debug("cached VM template sizeBytes: " + toHumanReadableSize(templateSizeBytes));
- int templateSizeGib = DateraUtil.bytesToGib(templateSizeBytes);
-
+ int templateSizeGb = DateraUtil.bytesToGb(templateSizeBytes);
int templateIops = DateraUtil.MAX_IOPS;
int replicaCount = getNumReplicas(storagePoolId);
- appInstanceName = getAppInstanceName(templateInfo);
- String volumePlacement = getVolPlacement(storagePoolId);
- String ipPool = getIpPool(storagePoolId);
- s_logger.debug("cached VM template app_instance: " + appInstanceName + " ipPool: " + ipPool + " sizeGib: " + String.valueOf(templateSizeGib));
- DateraObject.AppInstance appInstance = DateraUtil.createAppInstance(conn, appInstanceName, templateSizeGib,
- templateIops, replicaCount, volumePlacement, ipPool);
+ DateraObject.AppInstance appInstance = DateraUtil.createAppInstance(conn, getAppInstanceName(templateInfo),
+ getDescription(templateInfo), templateSizeGb, toBandwidthKiBps(templateIops), replicaCount);
if (appInstance == null) {
throw new CloudRuntimeException("Unable to create Template volume " + templateInfo.getId());
@@ -1121,7 +980,7 @@ public String createTemplateVolume(TemplateInfo templateInfo, long storagePoolId
templatePoolRef.setInstallPath(DateraUtil.generateIqnPath(iqn));
templatePoolRef.setLocalDownloadPath(appInstance.getName());
- templatePoolRef.setTemplateSize(DateraUtil.gibToBytes(appInstance.getSize()));
+ templatePoolRef.setTemplateSize(DateraUtil.bytesToGb(appInstance.getSize()));
tmpltPoolDao.update(templatePoolRef.getId(), templatePoolRef);
@@ -1135,43 +994,35 @@ public String createTemplateVolume(TemplateInfo templateInfo, long storagePoolId
storagePoolDao.update(storagePoolId, storagePool);
- } catch (UnsupportedEncodingException | DateraObject.DateraError dateraError) {
- if (DateraObject.DateraErrorTypes.ConflictError.equals(dateraError)) {
- String errMesg = "template app Instance " + appInstanceName + " exists";
- s_logger.debug(errMesg, dateraError);
- } else {
- String errMesg = "Unable to create template app Instance " + dateraError.getMessage();
- s_logger.error(errMesg, dateraError);
- throw new CloudRuntimeException(errMesg, dateraError);
- }
+ } catch (UnsupportedEncodingException | DateraObject.DateraError e) {
+ String errMesg = "Unable to create app Instance " + e.getMessage();
+ s_logger.error(errMesg, e);
+ throw new CloudRuntimeException(errMesg, e);
}
+
return DateraUtil.generateIqnPath(iqn);
}
/**
- * Entry point into the create logic. The storage subsystem call this method to
- * create various data objects (volume/snapshot/template)
+ * Entry point into the create logic. The storage subsystem call this method to create various
+ * data objects (volume/snapshot/template)
*
* @param dataStore
* @param dataObject
* @param callback
*/
@Override
- public void createAsync(DataStore dataStore, DataObject dataObject,
- AsyncCompletionCallback callback) {
+ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) {
String iqn = null;
String errMsg = null;
try {
if (dataObject.getType() == DataObjectType.VOLUME) {
- s_logger.debug("createAsync - creating volume");
- iqn = createVolume((VolumeInfo) dataObject, dataStore.getId());
+ iqn = createVolume((VolumeInfo)dataObject, dataStore.getId());
} else if (dataObject.getType() == DataObjectType.SNAPSHOT) {
- s_logger.debug("createAsync - creating snapshot");
createTempVolume((SnapshotInfo) dataObject, dataStore.getId());
} else if (dataObject.getType() == DataObjectType.TEMPLATE) {
- s_logger.debug("createAsync - creating template");
- iqn = createTemplateVolume((TemplateInfo) dataObject, dataStore.getId());
+ iqn = createTemplateVolume((TemplateInfo)dataObject, dataStore.getId());
} else {
errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync";
s_logger.error(errMsg);
@@ -1187,19 +1038,16 @@ public void createAsync(DataStore dataStore, DataObject dataObject,
}
if (callback != null) {
-
CreateCmdResult result = new CreateCmdResult(iqn, new Answer(null, errMsg == null, errMsg));
-
result.setResult(errMsg);
-
callback.complete(result);
}
}
/**
* Helper function which updates volume size in the volume_details table
- * @param volumeId Volume information
- * @param volumeSize Size in GB
+ * @param volumeId Volume information
+ * @param volumeSize Size in GB
*/
private void updateVolumeDetails(long volumeId, long volumeSize) {
VolumeDetailVO volumeDetailVo = volumeDetailsDao.findDetail(volumeId, DateraUtil.VOLUME_SIZE);
@@ -1214,46 +1062,41 @@ private void updateVolumeDetails(long volumeId, long volumeSize) {
/**
* Entrypoint for delete operations.
*
- * @param dataStore Primary storage
+ * @param dataStore Primary storage
* @param dataObject object to delete
- * @param callback used for async, complete the callback after the operation
- * is done.
+ * @param callback used for async, complete the callback after the operation is done.
*/
@Override
- public void deleteAsync(DataStore dataStore, DataObject dataObject,
- AsyncCompletionCallback callback) {
+ public void deleteAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) {
String errMsg = null;
try {
if (dataObject.getType() == DataObjectType.VOLUME) {
- s_logger.debug("deleteAsync - deleting volume");
- deleteVolume((VolumeInfo) dataObject, dataStore.getId());
+ deleteVolume((VolumeInfo)dataObject, dataStore.getId());
} else if (dataObject.getType() == DataObjectType.SNAPSHOT) {
- s_logger.debug("deleteAsync - deleting snapshot");
- deleteSnapshot((SnapshotInfo) dataObject, dataStore.getId());
+ deleteSnapshot((SnapshotInfo)dataObject, dataStore.getId());
} else if (dataObject.getType() == DataObjectType.TEMPLATE) {
- s_logger.debug("deleteAsync - deleting template");
- deleteTemplate((TemplateInfo) dataObject, dataStore.getId());
+ deleteTemplate((TemplateInfo)dataObject, dataStore.getId());
} else {
errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to deleteAsync";
}
- } catch (Exception ex) {
+ }
+ catch (Exception ex) {
errMsg = ex.getMessage();
s_logger.error(errMsg);
}
- CommandResult result = new CommandResult();
-
- result.setResult(errMsg);
-
- callback.complete(result);
+ if (callback != null) {
+ CommandResult result = new CommandResult();
+ result.setResult(errMsg);
+ callback.complete(result);
+ }
}
@Override
- public void copyAsync(DataObject srcData, DataObject destData,
- AsyncCompletionCallback callback) {
+ public void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCallback callback) {
throw new UnsupportedOperationException();
}
@@ -1269,17 +1112,15 @@ public boolean canCopy(DataObject srcData, DataObject destData) {
}
/**
- * Entry point for taking a snapshot. A native snpashot is taken if the
- * hypervisor supports it, otherwise a volume is created and the data is copied
- * via the hypervisor and Cloudstack will treat this volume as a snapshot.
+ * Entry point for taking a snapshot. A native snpashot is taken if the hypervisor supports it, otherwise
+ * a volume is created and the data is copied via the hypervisor and Cloudstack will treat this volume as
+ * a snapshot.
*
* @param snapshotInfo Snapshot information
- * @param callback Async context
+ * @param callback Async context
*/
@Override
public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback callback) {
- s_logger.debug("takeSnapshot() called");
-
CreateCmdResult result;
try {
@@ -1297,64 +1138,59 @@ public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback capacityBytes) {
+ throw new CloudRuntimeException("Insufficient amount of space remains in this primary storage to take a snapshot");
+ }
+
+ storagePool.setUsedBytes(usedBytes);
+
+ SnapshotObjectTO snapshotObjectTo = (SnapshotObjectTO)snapshotInfo.getTO();
if (shouldTakeSnapshot(snapshotInfo.getId())) {
DateraObject.VolumeSnapshot volumeSnapshot = DateraUtil.takeVolumeSnapshot(conn, baseAppInstanceName);
if (volumeSnapshot == null) {
- s_logger.error("Unable to take native snapshot appInstance name:" + baseAppInstanceName
- + " volume ID " + volumeInfo.getId());
- throw new CloudRuntimeException("Unable to take native snapshot for volume " + volumeInfo.getId());
+ s_logger.error("Unable to take native snapshot appInstance name:" + baseAppInstanceName + " volume ID " + volumeInfo.getId());
+ throw new CloudRuntimeException("Unable to take native snapshot for volume " + volumeInfo.getId());
}
- String snapshotName = baseAppInstanceName + ":" + volumeSnapshot.getTimestamp();
- updateSnapshotDetails(snapshotInfo.getId(), baseAppInstanceName, snapshotName, storagePoolId,
- baseAppInstance.getSize());
+ String snapshotName = baseAppInstanceName + SEPERATOR_SNAPSHOT + volumeSnapshot.getTimestamp();
+ updateSnapshotDetails(snapshotInfo.getId(), baseAppInstanceName, snapshotName, storagePoolId, baseAppInstance.getSize());
snapshotObjectTo.setPath("DateraSnapshotId=" + snapshotName);
- s_logger.info(" snapshot taken: " + snapshotName);
} else {
- StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);
-
- long capacityBytes = storagePool.getCapacityBytes();
- long usedBytes = getUsedBytes(storagePool);
- int volumeSizeGib = baseAppInstance.getSize();
- long volumeSizeBytes = DateraUtil.gibToBytes(volumeSizeGib);
- String volumePlacement = getVolPlacement(storagePoolId);
- String ipPool = getIpPool(storagePoolId);
-
- usedBytes += volumeSizeBytes;
-
- if (usedBytes > capacityBytes) {
- throw new CloudRuntimeException(
- "Insufficient amount of space remains in this primary storage to create a snapshot volume");
- }
-
String appInstanceName = getAppInstanceName(snapshotInfo);
DateraObject.AppInstance snapshotAppInstance = DateraUtil.createAppInstance(conn, appInstanceName,
- volumeSizeGib, DateraUtil.MAX_IOPS, getNumReplicas(storagePoolId), volumePlacement, ipPool);
+ getDescription(snapshotInfo), volumeSizeGb, toBandwidthKiBps(DateraUtil.MAX_IOPS), getNumReplicas(storagePoolId));
snapshotObjectTo.setPath(snapshotAppInstance.getName());
String iqnPath = DateraUtil.generateIqnPath(snapshotAppInstance.getIqn());
- updateSnapshotDetails(snapshotInfo.getId(), snapshotAppInstance.getName(), storagePoolId,
- snapshotAppInstance.getSize(), iqnPath);
+ updateSnapshotDetails(snapshotInfo.getId(), snapshotAppInstance.getName(), storagePoolId, snapshotAppInstance.getSize(), iqnPath);
snapshotObjectTo.setPath("DateraVolumeId=" + snapshotAppInstance.getName());
-
- storagePool.setUsedBytes(usedBytes);
- // update size in storage pool
- _storagePoolDao.update(storagePoolId, storagePool);
}
+ //update size in storage pool
+ _storagePoolDao.update(storagePoolId, storagePool);
+
CreateObjectAnswer createObjectAnswer = new CreateObjectAnswer(snapshotObjectTo);
result = new CreateCmdResult(null, createObjectAnswer);
result.setResult(null);
- } catch (Exception ex) {
+ }
+ catch (Exception ex) {
s_logger.debug("Failed to take CloudStack snapshot: " + snapshotInfo.getId(), ex);
result = new CreateCmdResult(null, new CreateObjectAnswer(ex.toString()));
@@ -1362,72 +1198,87 @@ public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback:
- * @param storagePoolId primary storage
- * @param snapshotSizeGb snapshotSize
- * @param snapshotIqn IQN of snapshot
+ * @param csSnapshotId Snapshot ID on Cloudstack
+ * @param snapshotAppInstanceName snapshot name on Datera :
+ * @param storagePoolId primary storage
+ * @param snapshotSizeGb snapshotSize
+ * @param snapshotIqn IQN of snapshot
*/
- private void updateSnapshotDetails(long csSnapshotId, String snapshotAppInstanceName, long storagePoolId,
- long snapshotSizeGb, String snapshotIqn) {
- SnapshotDetailsVO snapshotDetail = new SnapshotDetailsVO(csSnapshotId, DateraUtil.VOLUME_ID,
- String.valueOf(snapshotAppInstanceName), false);
+ private void updateSnapshotDetails(long csSnapshotId, String snapshotAppInstanceName, long storagePoolId, long snapshotSizeGb, String snapshotIqn) {
+ SnapshotDetailsVO snapshotDetail = new SnapshotDetailsVO(csSnapshotId,
+ DateraUtil.VOLUME_ID,
+ String.valueOf(snapshotAppInstanceName),
+ false);
_snapshotDetailsDao.persist(snapshotDetail);
- snapshotDetail = new SnapshotDetailsVO(csSnapshotId, DateraUtil.STORAGE_POOL_ID, String.valueOf(storagePoolId),
+ snapshotDetail = new SnapshotDetailsVO(csSnapshotId,
+ DateraUtil.STORAGE_POOL_ID,
+ String.valueOf(storagePoolId),
false);
_snapshotDetailsDao.persist(snapshotDetail);
- snapshotDetail = new SnapshotDetailsVO(csSnapshotId, DateraUtil.VOLUME_SIZE, String.valueOf(snapshotSizeGb),
+ snapshotDetail = new SnapshotDetailsVO(csSnapshotId,
+ DateraUtil.VOLUME_SIZE,
+ String.valueOf(snapshotSizeGb),
false);
_snapshotDetailsDao.persist(snapshotDetail);
- snapshotDetail = new SnapshotDetailsVO(csSnapshotId, DiskTO.IQN, snapshotIqn, false);
+ snapshotDetail = new SnapshotDetailsVO(csSnapshotId,
+ DiskTO.IQN,
+ snapshotIqn,
+ false);
_snapshotDetailsDao.persist(snapshotDetail);
}
@@ -1437,10 +1288,9 @@ private void updateSnapshotDetails(long csSnapshotId, String snapshotAppInstance
* @param snapshotInfo snapshot information
* @param storagePoolId primary storage
* @throws UnsupportedEncodingException
- * @throws DateraObject.DateraError
+ * @throws DateraObject.DateraError
*/
- private void deleteSnapshot(SnapshotInfo snapshotInfo, long storagePoolId)
- throws UnsupportedEncodingException, DateraObject.DateraError {
+ private void deleteSnapshot(SnapshotInfo snapshotInfo, long storagePoolId) throws UnsupportedEncodingException, DateraObject.DateraError {
long csSnapshotId = snapshotInfo.getId();
@@ -1453,19 +1303,22 @@ private void deleteSnapshot(SnapshotInfo snapshotInfo, long storagePoolId)
// Native snapshot being used, delete that
String snapshotName = snapshotDetails.getValue();
+ // split the snapshot name to appInstanceName and the snapshot timestamp
+ String[] tokens = snapshotName.split(SEPERATOR_SNAPSHOT);
+ Preconditions.checkArgument(tokens.length == 2);
- DateraUtil.deleteVolumeSnapshot(conn, snapshotName);
+ DateraUtil.deleteVolumeSnapshot(conn, tokens[0], tokens[1]);
- // check if the underlying volume needs to be deleted
+ //check if the underlying volume needs to be deleted
SnapshotVO snapshot = _snapshotDao.findById(csSnapshotId);
VolumeVO volume = _volumeDao.findById(snapshot.getVolumeId());
if (volume == null) {
- // deleted from Cloudstack. Check if other snapshots are using this volume
+ //deleted from Cloudstack. Check if other snapshots are using this volume
volume = _volumeDao.findByIdIncludingRemoved(snapshot.getVolumeId());
- if (shouldDeleteVolume(snapshot.getVolumeId(), snapshot.getId())) {
+ if(shouldDeleteVolume(snapshot.getVolumeId(), snapshot.getId())) {
DateraUtil.deleteAppInstance(conn, volume.getFolder());
}
}
@@ -1483,29 +1336,27 @@ private void deleteSnapshot(SnapshotInfo snapshotInfo, long storagePoolId)
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
- // getUsedBytes(StoragePool) will not include the snapshot to delete because it
- // has already been deleted by this point
+ // getUsedBytes(StoragePool) will not include the snapshot to delete because it has already been deleted by this point
long usedBytes = getUsedBytes(storagePool);
storagePool.setUsedBytes(usedBytes < 0 ? 0 : usedBytes);
storagePoolDao.update(storagePoolId, storagePool);
- } catch (Exception ex) {
- s_logger.debug("Error in 'deleteSnapshot(SnapshotInfo, long)'. CloudStack snapshot ID: " + csSnapshotId,
- ex);
+ }
+ catch (Exception ex) {
+ s_logger.debug("Error in 'deleteSnapshot(SnapshotInfo, long)'. CloudStack snapshot ID: " + csSnapshotId, ex);
throw ex;
}
}
/**
* Deletes a template from Datera
- * @param templateInfo Information about Template
- * @param storagePoolId Primary storage
+ * @param templateInfo Information about Template
+ * @param storagePoolId Primary storage
* @throws UnsupportedEncodingException
- * @throws DateraObject.DateraError
+ * @throws DateraObject.DateraError
*/
- private void deleteTemplate(TemplateInfo templateInfo, long storagePoolId)
- throws UnsupportedEncodingException, DateraObject.DateraError {
+ private void deleteTemplate(TemplateInfo templateInfo, long storagePoolId) throws UnsupportedEncodingException, DateraObject.DateraError {
try {
DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePoolId, _storagePoolDetailsDao);
@@ -1520,14 +1371,14 @@ private void deleteTemplate(TemplateInfo templateInfo, long storagePoolId)
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
- // getUsedBytes(StoragePool) will not include the template to delete because the
- // "template_spool_ref" table has already been updated by this point
+ // getUsedBytes(StoragePool) will not include the template to delete because the "template_spool_ref" table has already been updated by this point
long usedBytes = getUsedBytes(storagePool);
storagePool.setUsedBytes(usedBytes < 0 ? 0 : usedBytes);
storagePoolDao.update(storagePoolId, storagePool);
- } catch (Exception ex) {
+ }
+ catch (Exception ex) {
s_logger.debug("Failed to delete template volume. CloudStack template ID: " + templateInfo.getId(), ex);
throw ex;
@@ -1542,67 +1393,14 @@ private void deleteTemplate(TemplateInfo templateInfo, long storagePoolId)
* @throws CloudRuntimeException
*/
@Override
- public void revertSnapshot(SnapshotInfo snapshotInfo, SnapshotInfo snapshotOnPrimaryStore,
- AsyncCompletionCallback callback) {
-
- VolumeInfo volumeInfo = snapshotInfo.getBaseVolume();
- VolumeVO volumeVO = _volumeDao.findById(volumeInfo.getId());
-
- long storagePoolId = volumeVO.getPoolId();
- long csSnapshotId = snapshotInfo.getId();
- s_logger.info("Datera - restoreVolumeSnapshot from snapshotId " + String.valueOf(csSnapshotId) + " to volume"
- + volumeVO.getName());
-
- DateraObject.AppInstance appInstance;
-
- try {
-
- if (volumeVO == null || volumeVO.getRemoved() != null) {
- String errMsg = "The volume that the snapshot belongs to no longer exists.";
-
- CommandResult commandResult = new CommandResult();
-
- commandResult.setResult(errMsg);
-
- callback.complete(commandResult);
-
- return;
- }
-
- DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePoolId, _storagePoolDetailsDao);
-
- SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, DateraUtil.SNAPSHOT_ID);
-
- if (snapshotDetails != null && snapshotDetails.getValue() != null) {
- // Native snapshot being used, restore snapshot from Datera AppInstance
-
- String snapshotName = snapshotDetails.getValue();
-
- s_logger.info("Datera - restoreVolumeSnapshot: " + snapshotName);
-
- appInstance = DateraUtil.restoreVolumeSnapshot(conn, snapshotName);
-
- Preconditions.checkNotNull(appInstance);
-
- updateVolumeDetails(volumeInfo.getId(), appInstance.getSize());
- }
-
- CommandResult commandResult = new CommandResult();
-
- callback.complete(commandResult);
-
- } catch (Exception ex) {
- s_logger.debug("Error in 'revertSnapshot()'. CloudStack snapshot ID: " + csSnapshotId, ex);
- throw new CloudRuntimeException(ex.getMessage());
- }
-
+ public void revertSnapshot(SnapshotInfo snapshot, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback callback) {
+ throw new UnsupportedOperationException("Reverting not supported. Create a template or volume based on the snapshot instead.");
}
/**
- * Resizes a volume on Datera, shrinking is not allowed. Resize also takes into
- * account the HSR
+ * Resizes a volume on Datera, shrinking is not allowed. Resize also takes into account the HSR
* @param dataObject volume to resize
- * @param callback async context
+ * @param callback async context
*/
@Override
public void resize(DataObject dataObject, AsyncCompletionCallback callback) {
@@ -1610,12 +1408,12 @@ public void resize(DataObject dataObject, AsyncCompletionCallback lstSnapshots = getNonDestroyedSnapshots(csVolumeId);
for (SnapshotVO snapshot : lstSnapshots) {
- if (snapshotToIgnoreId != null && snapshot.getId() == snapshotToIgnoreId) {
+ if (snapshotToIgnoreId != null && snapshot.getId() == snapshotToIgnoreId){
continue;
}
SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(snapshot.getId(), DateraUtil.SNAPSHOT_ID);
@@ -1879,4 +1674,14 @@ public boolean isVmTagsNeeded(String tagKey) {
@Override
public void provideVmTags(long vmId, long volumeId, String tagValue) {
}
-}
+
+ @Override
+ public String getConfigComponentName() {
+ return DateraPrimaryDataStoreDriver.class.getSimpleName();
+ }
+
+ @Override
+ public ConfigKey>[] getConfigKeys() {
+ return new ConfigKey>[] {MaxIopsScalingFactor};
+ }
+}
\ No newline at end of file
diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java
index ff253fc8d181..821dd36fd0d0 100644
--- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java
+++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java
@@ -20,14 +20,9 @@
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.capacity.CapacityManager;
-import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.DataCenterDao;
-import com.cloud.dc.ClusterDetailsDao;
-import com.cloud.dc.dao.ClusterDao;
-import com.cloud.host.Host;
import com.cloud.host.HostVO;
-import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.resource.ResourceManager;
import com.cloud.storage.SnapshotVO;
@@ -38,7 +33,6 @@
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.SnapshotDetailsDao;
import com.cloud.storage.dao.SnapshotDetailsVO;
-import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
@@ -46,7 +40,6 @@
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
-import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.util.DateraUtil;
@@ -57,102 +50,40 @@
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
+import java.util.UUID;
public class DateraPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
private static final Logger s_logger = Logger.getLogger(DateraPrimaryDataStoreLifeCycle.class);
- @Inject
- private CapacityManager _capacityMgr;
- @Inject
- private DataCenterDao zoneDao;
- @Inject
- private ClusterDao _clusterDao;
- @Inject
- private ClusterDetailsDao _clusterDetailsDao;
- @Inject
- private PrimaryDataStoreDao storagePoolDao;
- @Inject
- private HostDao _hostDao;
- @Inject
- private PrimaryDataStoreHelper dataStoreHelper;
- @Inject
- private ResourceManager _resourceMgr;
- @Inject
- private SnapshotDao _snapshotDao;
- @Inject
- private SnapshotDetailsDao _snapshotDetailsDao;
- @Inject
- private StorageManager _storageMgr;
- @Inject
- private StoragePoolHostDao _storagePoolHostDao;
- @Inject
- private StoragePoolAutomation storagePoolAutomation;
-
+ @Inject private CapacityManager _capacityMgr;
+ @Inject private DataCenterDao zoneDao;
+ @Inject private PrimaryDataStoreDao storagePoolDao;
+ @Inject private PrimaryDataStoreHelper dataStoreHelper;
+ @Inject private ResourceManager _resourceMgr;
+ @Inject private SnapshotDao _snapshotDao;
+ @Inject private SnapshotDetailsDao _snapshotDetailsDao;
+ @Inject private StorageManager _storageMgr;
+ @Inject private StoragePoolAutomation storagePoolAutomation;
+
+ // invoked to add primary storage that is based on the SolidFire plug-in
@Override
public DataStore initialize(Map dsInfos) {
- String url = (String) dsInfos.get("url");
- Long zoneId = (Long) dsInfos.get("zoneId");
- Long podId = (Long) dsInfos.get("podId");
- Long clusterId = (Long) dsInfos.get("clusterId");
- String storagePoolName = (String) dsInfos.get("name");
- String providerName = (String) dsInfos.get("providerName");
- Long capacityBytes = (Long) dsInfos.get("capacityBytes");
- Long capacityIops = (Long) dsInfos.get("capacityIops");
- String tags = (String) dsInfos.get("tags");
+ String url = (String)dsInfos.get("url");
+ Long zoneId = (Long)dsInfos.get("zoneId");
+ String storagePoolName = (String)dsInfos.get("name");
+ String providerName = (String)dsInfos.get("providerName");
+ Long capacityBytes = (Long)dsInfos.get("capacityBytes");
+ Long capacityIops = (Long)dsInfos.get("capacityIops");
+ String tags = (String)dsInfos.get("tags");
@SuppressWarnings("unchecked")
- Map details = (Map) dsInfos.get("details");
- String domainName = details.get("domainname");
+ Map details = (Map)dsInfos.get("details");
String storageVip = DateraUtil.getStorageVip(url);
-
int storagePort = DateraUtil.getStoragePort(url);
- int numReplicas = DateraUtil.getNumReplicas(url);
- String volPlacement = DateraUtil.getVolPlacement(url);
- String clusterAdminUsername = DateraUtil.getValue(DateraUtil.CLUSTER_ADMIN_USERNAME, url);
- String clusterAdminPassword = DateraUtil.getValue(DateraUtil.CLUSTER_ADMIN_PASSWORD, url);
- String uuid;
- String randomString;
-
- PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
- // checks if primary datastore is clusterwide. If so, uses the clusterId to set
- // the uuid and then sets the podId and clusterId parameters
- if (clusterId != null) {
- if (podId == null) {
- throw new CloudRuntimeException("The Pod ID must be specified.");
- }
- if (zoneId == null) {
- throw new CloudRuntimeException("The Zone ID must be specified.");
- }
- ClusterVO cluster = _clusterDao.findById(clusterId);
- String clusterUuid = cluster.getUuid();
- randomString = DateraUtil.generateUUID(clusterUuid);
- // uuid = DateraUtil.PROVIDER_NAME + "_" + cluster.getUuid() + "_" + storageVip
- // + "_" + clusterAdminUsername + "_" + numReplicas + "_" + volPlacement;
- uuid = DateraUtil.PROVIDER_NAME + "_" + clusterUuid + "_" + randomString;
- s_logger.debug("Datera - Setting Datera cluster-wide primary storage uuid to " + uuid);
- parameters.setPodId(podId);
- parameters.setClusterId(clusterId);
-
- HypervisorType hypervisorType = getHypervisorTypeForCluster(clusterId);
-
- if (!isSupportedHypervisorType(hypervisorType)) {
- throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type.");
- }
+ DataCenterVO zone = zoneDao.findById(zoneId);
- }
- // sets the uuid with zoneid in it
- else {
- DataCenterVO zone = zoneDao.findById(zoneId);
- String zoneUuid = zone.getUuid();
- randomString = DateraUtil.generateUUID(zoneUuid);
- // uuid = DateraUtil.PROVIDER_NAME + "_" + zone.getUuid() + "_" + storageVip +
- // "_" + clusterAdminUsername + "_" + numReplicas + "_" + volPlacement;
- uuid = DateraUtil.PROVIDER_NAME + "_" + zoneUuid + "_" + randomString;
-
- s_logger.debug("Datera - Setting Datera zone-wide primary storage uuid to " + uuid);
- }
if (capacityBytes == null || capacityBytes <= 0) {
throw new IllegalArgumentException("'capacityBytes' must be present and greater than 0.");
}
@@ -161,17 +92,13 @@ public DataStore initialize(Map dsInfos) {
throw new IllegalArgumentException("'capacityIops' must be present and greater than 0.");
}
- if (domainName == null) {
- domainName = "ROOT";
- s_logger.debug("setting the domain to ROOT");
- }
- s_logger.debug("Datera - domainName: " + domainName);
+ PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
parameters.setHost(storageVip);
parameters.setPort(storagePort);
parameters.setPath(DateraUtil.getModifiedUrl(url));
parameters.setType(StoragePoolType.Iscsi);
- parameters.setUuid(uuid);
+ parameters.setUuid(UUID.randomUUID().toString());
parameters.setZoneId(zoneId);
parameters.setName(storagePoolName);
parameters.setProviderName(providerName);
@@ -188,6 +115,10 @@ public DataStore initialize(Map dsInfos) {
details.put(DateraUtil.MANAGEMENT_VIP, managementVip);
details.put(DateraUtil.MANAGEMENT_PORT, String.valueOf(managementPort));
+
+ String clusterAdminUsername = DateraUtil.getValue(DateraUtil.CLUSTER_ADMIN_USERNAME, url);
+ String clusterAdminPassword = DateraUtil.getValue(DateraUtil.CLUSTER_ADMIN_PASSWORD, url);
+
details.put(DateraUtil.CLUSTER_ADMIN_USERNAME, clusterAdminUsername);
details.put(DateraUtil.CLUSTER_ADMIN_PASSWORD, clusterAdminPassword);
@@ -201,8 +132,9 @@ public DataStore initialize(Map dsInfos) {
lClusterDefaultMinIops = Long.parseLong(clusterDefaultMinIops);
}
} catch (NumberFormatException ex) {
- s_logger.warn("Cannot parse the setting of " + DateraUtil.CLUSTER_DEFAULT_MIN_IOPS
- + ", using default value: " + lClusterDefaultMinIops + ". Exception: " + ex);
+ s_logger.warn("Cannot parse the setting of " + DateraUtil.CLUSTER_DEFAULT_MIN_IOPS +
+ ", using default value: " + lClusterDefaultMinIops +
+ ". Exception: " + ex);
}
try {
@@ -212,27 +144,30 @@ public DataStore initialize(Map dsInfos) {
lClusterDefaultMaxIops = Long.parseLong(clusterDefaultMaxIops);
}
} catch (NumberFormatException ex) {
- s_logger.warn("Cannot parse the setting of " + DateraUtil.CLUSTER_DEFAULT_MAX_IOPS
- + ", using default value: " + lClusterDefaultMaxIops + ". Exception: " + ex);
+ s_logger.warn("Cannot parse the setting of " + DateraUtil.CLUSTER_DEFAULT_MAX_IOPS +
+ ", using default value: " + lClusterDefaultMaxIops +
+ ". Exception: " + ex);
}
+
if (lClusterDefaultMinIops > lClusterDefaultMaxIops) {
- throw new CloudRuntimeException("The parameter '" + DateraUtil.CLUSTER_DEFAULT_MIN_IOPS
- + "' must be less than or equal to the parameter '" + DateraUtil.CLUSTER_DEFAULT_MAX_IOPS + "'.");
+ throw new CloudRuntimeException("The parameter '" + DateraUtil.CLUSTER_DEFAULT_MIN_IOPS + "' must be less than or equal to the parameter '" +
+ DateraUtil.CLUSTER_DEFAULT_MAX_IOPS + "'.");
}
+ int numReplicas = DateraUtil.getNumReplicas(url);
+
if (numReplicas < DateraUtil.MIN_NUM_REPLICAS || numReplicas > DateraUtil.MAX_NUM_REPLICAS) {
- throw new CloudRuntimeException("The parameter '" + DateraUtil.NUM_REPLICAS + "' must be between "
- + DateraUtil.CLUSTER_DEFAULT_MAX_IOPS + "' and " + DateraUtil.MAX_NUM_REPLICAS);
+ throw new CloudRuntimeException("The parameter '" + DateraUtil.NUM_REPLICAS + "' must be between " +
+ DateraUtil.CLUSTER_DEFAULT_MAX_IOPS + "' and " + DateraUtil.MAX_NUM_REPLICAS);
}
+ details.put(DateraUtil.NUM_REPLICAS, String.valueOf(DateraUtil.getNumReplicas(url)));
+
details.put(DateraUtil.CLUSTER_DEFAULT_MIN_IOPS, String.valueOf(lClusterDefaultMinIops));
details.put(DateraUtil.CLUSTER_DEFAULT_MAX_IOPS, String.valueOf(lClusterDefaultMaxIops));
- details.put(DateraUtil.NUM_REPLICAS, String.valueOf(DateraUtil.getNumReplicas(url)));
- details.put(DateraUtil.VOL_PLACEMENT, String.valueOf(DateraUtil.getVolPlacement(url)));
- details.put(DateraUtil.IP_POOL, String.valueOf(DateraUtil.getIpPool(url)));
-
+ // this adds a row in the cloud.storage_pool table for this Datera cluster
return dataStoreHelper.createPrimaryDataStore(parameters);
}
@@ -242,59 +177,17 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis
}
@Override
- public boolean attachCluster(DataStore datastore, ClusterScope scope) {
- PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) datastore;
-
- // check if there is at least one host up in this cluster
- List allHosts = _resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing,
- primaryDataStoreInfo.getClusterId(), primaryDataStoreInfo.getPodId(),
- primaryDataStoreInfo.getDataCenterId());
-
- if (allHosts.isEmpty()) {
- storagePoolDao.expunge(primaryDataStoreInfo.getId());
-
- throw new CloudRuntimeException(
- "No host up to associate a storage pool with in cluster " + primaryDataStoreInfo.getClusterId());
- }
-
- List poolHosts = new ArrayList();
-
- for (HostVO host : allHosts) {
- try {
- _storageMgr.connectHostToSharedPool(host.getId(), primaryDataStoreInfo.getId());
-
- poolHosts.add(host);
- } catch (Exception e) {
- s_logger.warn("Unable to establish a connection between " + host + " and " + primaryDataStoreInfo, e);
- }
- }
-
- if (poolHosts.isEmpty()) {
- s_logger.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '"
- + primaryDataStoreInfo.getClusterId() + "'.");
-
- storagePoolDao.expunge(primaryDataStoreInfo.getId());
-
- throw new CloudRuntimeException("Failed to access storage pool");
- }
-
- dataStoreHelper.attachCluster(datastore);
-
- return true;
- // throw new UnsupportedOperationException("Only Zone-wide scope is supported
- // with the Datera Storage driver");
+ public boolean attachCluster(DataStore store, ClusterScope scope) {
+ return true; // should be ignored for zone-wide-only plug-ins
}
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
dataStoreHelper.attachZone(dataStore);
- List